/***********************license start*************** * Copyright (c) 2003-2009 Cavium Networks (support@cavium.com). All rights * reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * * Neither the name of Cavium Networks nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU. * * * For any questions regarding licensing please contact marketing@caviumnetworks.com * ***********************license end**************************************/ /** * @file * * Configuration and status register (CSR) address and type definitions for * Octeon. Include cvmx-csr.h instead of this file directly. * * This file is auto generated. Do not edit. * *
$Revision: 41586 $
* */ #ifndef __CVMX_CSR_TYPEDEFS_H__ #define __CVMX_CSR_TYPEDEFS_H__ /** * cvmx_agl_gmx_bad_reg * * AGL_GMX_BAD_REG = A collection of things that have gone very, very wrong * * * Notes: * OUT_OVR[0], OVRFLW, TXPOP, TXPSH will be reset when MIX0_CTL[RESET] is set to 1. * OUT_OVR[1], OVRFLW1, TXPOP1, TXPSH1 will be reset when MIX1_CTL[RESET] is set to 1. * LOSTSTAT, STATOVR, STATOVR will bee reset when both MIX0/1_CTL[RESET] are set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_bad_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t txpsh1 : 1; /**< TX FIFO overflow (MII1) */ uint64_t txpop1 : 1; /**< TX FIFO underflow (MII1) */ uint64_t ovrflw1 : 1; /**< RX FIFO overflow (MII1) */ uint64_t txpsh : 1; /**< TX FIFO overflow */ uint64_t txpop : 1; /**< TX FIFO underflow */ uint64_t ovrflw : 1; /**< RX FIFO overflow */ uint64_t reserved_27_31 : 5; uint64_t statovr : 1; /**< TX Statistics overflow */ uint64_t reserved_23_25 : 3; uint64_t loststat : 1; /**< TX Statistics data was over-written TX Stats are corrupted */ uint64_t reserved_4_21 : 18; uint64_t out_ovr : 2; /**< Outbound data FIFO overflow */ uint64_t reserved_0_1 : 2; #else uint64_t reserved_0_1 : 2; uint64_t out_ovr : 2; uint64_t reserved_4_21 : 18; uint64_t loststat : 1; uint64_t reserved_23_25 : 3; uint64_t statovr : 1; uint64_t reserved_27_31 : 5; uint64_t ovrflw : 1; uint64_t txpop : 1; uint64_t txpsh : 1; uint64_t ovrflw1 : 1; uint64_t txpop1 : 1; uint64_t txpsh1 : 1; uint64_t reserved_38_63 : 26; #endif } s; struct cvmx_agl_gmx_bad_reg_s cn52xx; struct cvmx_agl_gmx_bad_reg_s cn52xxp1; struct cvmx_agl_gmx_bad_reg_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_35_63 : 29; uint64_t txpsh : 1; /**< TX FIFO overflow */ uint64_t txpop : 1; /**< TX FIFO underflow */ uint64_t ovrflw : 1; /**< RX FIFO overflow */ uint64_t reserved_27_31 : 5; uint64_t statovr : 1; /**< TX Statistics overflow */ uint64_t reserved_23_25 : 3; uint64_t loststat : 1; /**< TX Statistics data was over-written TX Stats are corrupted */ uint64_t reserved_3_21 : 19; uint64_t out_ovr : 1; /**< Outbound data FIFO overflow */ uint64_t reserved_0_1 : 2; #else uint64_t reserved_0_1 : 2; uint64_t out_ovr : 1; uint64_t reserved_3_21 : 19; uint64_t loststat : 1; uint64_t reserved_23_25 : 3; uint64_t statovr : 1; uint64_t reserved_27_31 : 5; uint64_t ovrflw : 1; uint64_t txpop : 1; uint64_t txpsh : 1; uint64_t reserved_35_63 : 29; #endif } cn56xx; struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1; } cvmx_agl_gmx_bad_reg_t; /** * cvmx_agl_gmx_bist * * AGL_GMX_BIST = GMX BIST Results * * * Notes: * Not reset when MIX*_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_bist_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t status : 10; /**< BIST Results. HW sets a bit in BIST for for memory that fails - 0: gmx#.inb.drf64x78m1_bist - 1: gmx#.outb.fif.drf64x71m1_bist - 2: gmx#.csr.gmi0.srf8x64m1_bist - 3: 0 - 4: 0 - 5: 0 - 6: gmx#.csr.drf20x80m1_bist - 7: gmx#.outb.stat.drf16x27m1_bist - 8: gmx#.outb.stat.drf40x64m1_bist - 9: 0 */ #else uint64_t status : 10; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_agl_gmx_bist_s cn52xx; struct cvmx_agl_gmx_bist_s cn52xxp1; struct cvmx_agl_gmx_bist_s cn56xx; struct cvmx_agl_gmx_bist_s cn56xxp1; } cvmx_agl_gmx_bist_t; /** * cvmx_agl_gmx_drv_ctl * * AGL_GMX_DRV_CTL = GMX Drive Control * * * Notes: * NCTL, PCTL, BYP_EN will be reset when MIX0_CTL[RESET] is set to 1. * NCTL1, PCTL1, BYP_EN1 will be reset when MIX1_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_drv_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t byp_en1 : 1; /**< Compensation Controller Bypass Enable (MII1) */ uint64_t reserved_45_47 : 3; uint64_t pctl1 : 5; /**< AGL PCTL (MII1) */ uint64_t reserved_37_39 : 3; uint64_t nctl1 : 5; /**< AGL NCTL (MII1) */ uint64_t reserved_17_31 : 15; uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */ uint64_t reserved_13_15 : 3; uint64_t pctl : 5; /**< AGL PCTL */ uint64_t reserved_5_7 : 3; uint64_t nctl : 5; /**< AGL NCTL */ #else uint64_t nctl : 5; uint64_t reserved_5_7 : 3; uint64_t pctl : 5; uint64_t reserved_13_15 : 3; uint64_t byp_en : 1; uint64_t reserved_17_31 : 15; uint64_t nctl1 : 5; uint64_t reserved_37_39 : 3; uint64_t pctl1 : 5; uint64_t reserved_45_47 : 3; uint64_t byp_en1 : 1; uint64_t reserved_49_63 : 15; #endif } s; struct cvmx_agl_gmx_drv_ctl_s cn52xx; struct cvmx_agl_gmx_drv_ctl_s cn52xxp1; struct cvmx_agl_gmx_drv_ctl_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t byp_en : 1; /**< Compensation Controller Bypass Enable */ uint64_t reserved_13_15 : 3; uint64_t pctl : 5; /**< AGL PCTL */ uint64_t reserved_5_7 : 3; uint64_t nctl : 5; /**< AGL NCTL */ #else uint64_t nctl : 5; uint64_t reserved_5_7 : 3; uint64_t pctl : 5; uint64_t reserved_13_15 : 3; uint64_t byp_en : 1; uint64_t reserved_17_63 : 47; #endif } cn56xx; struct cvmx_agl_gmx_drv_ctl_cn56xx cn56xxp1; } cvmx_agl_gmx_drv_ctl_t; /** * cvmx_agl_gmx_inf_mode * * AGL_GMX_INF_MODE = Interface Mode * * * Notes: * Not reset when MIX*_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_inf_mode_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t en : 1; /**< Interface Enable */ uint64_t reserved_0_0 : 1; #else uint64_t reserved_0_0 : 1; uint64_t en : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_agl_gmx_inf_mode_s cn52xx; struct cvmx_agl_gmx_inf_mode_s cn52xxp1; struct cvmx_agl_gmx_inf_mode_s cn56xx; struct cvmx_agl_gmx_inf_mode_s cn56xxp1; } cvmx_agl_gmx_inf_mode_t; /** * cvmx_agl_gmx_prt#_cfg * * AGL_GMX_PRT_CFG = Port description * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_prtx_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t tx_en : 1; /**< Port enable. Must be set for Octane to send RMGII traffic. When this bit clear on a given port, then all MII cycles will appear as inter-frame cycles. */ uint64_t rx_en : 1; /**< Port enable. Must be set for Octane to receive RMGII traffic. When this bit clear on a given port, then the all MII cycles will appear as inter-frame cycles. */ uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation 0 = 512 bitimes (10/100Mbs operation) 1 = Reserved */ uint64_t duplex : 1; /**< Duplex 0 = Half Duplex (collisions/extentions/bursts) 1 = Full Duplex */ uint64_t speed : 1; /**< Link Speed 0 = 10/100Mbs operation 1 = Reserved */ uint64_t en : 1; /**< Link Enable When EN is clear, packets will not be received or transmitted (including PAUSE and JAM packets). If EN is cleared while a packet is currently being received or transmitted, the packet will be allowed to complete before the bus is idled. On the RX side, subsequent packets in a burst will be ignored. */ #else uint64_t en : 1; uint64_t speed : 1; uint64_t duplex : 1; uint64_t slottime : 1; uint64_t rx_en : 1; uint64_t tx_en : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_agl_gmx_prtx_cfg_s cn52xx; struct cvmx_agl_gmx_prtx_cfg_s cn52xxp1; struct cvmx_agl_gmx_prtx_cfg_s cn56xx; struct cvmx_agl_gmx_prtx_cfg_s cn56xxp1; } cvmx_agl_gmx_prtx_cfg_t; /** * cvmx_agl_gmx_rx#_adr_cam0 * * AGL_GMX_RX_ADR_CAM = Address Filtering Control * * * Notes: * Not reset when MIX*_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to AGL_GMX_RX_ADR_CAM will not change the CSR when AGL_GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xx; struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1; struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx; struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1; } cvmx_agl_gmx_rxx_adr_cam0_t; /** * cvmx_agl_gmx_rx#_adr_cam1 * * AGL_GMX_RX_ADR_CAM = Address Filtering Control * * * Notes: * Not reset when MIX*_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to AGL_GMX_RX_ADR_CAM will not change the CSR when AGL_GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xx; struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1; struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx; struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1; } cvmx_agl_gmx_rxx_adr_cam1_t; /** * cvmx_agl_gmx_rx#_adr_cam2 * * AGL_GMX_RX_ADR_CAM = Address Filtering Control * * * Notes: * Not reset when MIX*_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to AGL_GMX_RX_ADR_CAM will not change the CSR when AGL_GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xx; struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1; struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx; struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1; } cvmx_agl_gmx_rxx_adr_cam2_t; /** * cvmx_agl_gmx_rx#_adr_cam3 * * AGL_GMX_RX_ADR_CAM = Address Filtering Control * * * Notes: * Not reset when MIX*_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to AGL_GMX_RX_ADR_CAM will not change the CSR when AGL_GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xx; struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1; struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx; struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1; } cvmx_agl_gmx_rxx_adr_cam3_t; /** * cvmx_agl_gmx_rx#_adr_cam4 * * AGL_GMX_RX_ADR_CAM = Address Filtering Control * * * Notes: * Not reset when MIX*_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to AGL_GMX_RX_ADR_CAM will not change the CSR when AGL_GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xx; struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1; struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx; struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1; } cvmx_agl_gmx_rxx_adr_cam4_t; /** * cvmx_agl_gmx_rx#_adr_cam5 * * AGL_GMX_RX_ADR_CAM = Address Filtering Control * * * Notes: * Not reset when MIX*_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam5_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to AGL_GMX_RX_ADR_CAM will not change the CSR when AGL_GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xx; struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1; struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx; struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1; } cvmx_agl_gmx_rxx_adr_cam5_t; /** * cvmx_agl_gmx_rx#_adr_cam_en * * AGL_GMX_RX_ADR_CAM_EN = Address Filtering Control Enable * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_cam_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t en : 8; /**< CAM Entry Enables */ #else uint64_t en : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xx; struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1; struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx; struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1; } cvmx_agl_gmx_rxx_adr_cam_en_t; /** * cvmx_agl_gmx_rx#_adr_ctl * * AGL_GMX_RX_ADR_CTL = Address Filtering Control * * * Notes: * * ALGORITHM * Here is some pseudo code that represents the address filter behavior. * * @verbatim * bool dmac_addr_filter(uint8 prt, uint48 dmac) [ * ASSERT(prt >= 0 && prt <= 3); * if (is_bcst(dmac)) // broadcast accept * return (AGL_GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT); * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject * return REJECT; * if (is_mcst(dmac) & AGL_GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept * return ACCEPT; * * cam_hit = 0; * * for (i=0; i<8; i++) [ * if (AGL_GMX_RX[prt]_ADR_CAM_EN[EN] == 0) * continue; * uint48 unswizzled_mac_adr = 0x0; * for (j=5; j>=0; j--) [ * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | AGL_GMX_RX[prt]_ADR_CAM[j][ADR]; * ] * if (unswizzled_mac_adr == dmac) [ * cam_hit = 1; * break; * ] * ] * * if (cam_hit) * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT); * else * return (AGL_GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT); * ] * @endverbatim * * Additionally reset when MIX_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_adr_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter 0 = reject the packet on DMAC address match 1 = accept the packet on DMAC address match */ uint64_t mcst : 2; /**< Multicast Mode 0 = Use the Address Filter CAM 1 = Force reject all multicast packets 2 = Force accept all multicast packets 3 = Reserved */ uint64_t bcst : 1; /**< Accept All Broadcast Packets */ #else uint64_t bcst : 1; uint64_t mcst : 2; uint64_t cam_mode : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xx; struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1; struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx; struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1; } cvmx_agl_gmx_rxx_adr_ctl_t; /** * cvmx_agl_gmx_rx#_decision * * AGL_GMX_RX_DECISION = The byte count to decide when to accept or filter a packet * * * Notes: * As each byte in a packet is received by GMX, the L2 byte count is compared * against the AGL_GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes * from the beginning of the L2 header (DMAC). In normal operation, the L2 * header begins after the PREAMBLE+SFD (AGL_GMX_RX_FRM_CTL[PRE_CHK]=1) and any * optional UDD skip data (AGL_GMX_RX_UDD_SKP[LEN]). * * When AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the * packet and would require UDD skip length to account for them. * * L2 Size * Port Mode <=AGL_GMX_RX_DECISION bytes (default=24) >AGL_GMX_RX_DECISION bytes (default=24) * * MII/Full Duplex accept packet apply filters * no filtering is applied accept packet based on DMAC and PAUSE packet filters * * MII/Half Duplex drop packet apply filters * packet is unconditionally dropped accept packet based on DMAC * * where l2_size = MAX(0, total_packet_size - AGL_GMX_RX_UDD_SKP[LEN] - ((AGL_GMX_RX_FRM_CTL[PRE_CHK]==1)*8) * * Additionally reset when MIX_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_decision_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t cnt : 5; /**< The byte count to decide when to accept or filter a packet. */ #else uint64_t cnt : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_agl_gmx_rxx_decision_s cn52xx; struct cvmx_agl_gmx_rxx_decision_s cn52xxp1; struct cvmx_agl_gmx_rxx_decision_s cn56xx; struct cvmx_agl_gmx_rxx_decision_s cn56xxp1; } cvmx_agl_gmx_rxx_decision_t; /** * cvmx_agl_gmx_rx#_frm_chk * * AGL_GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame * * * Notes: * If AGL_GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW. * * Additionally reset when MIX_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_frm_chk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t reserved_1_1 : 1; uint64_t minerr : 1; /**< Frame was received with length < min_length */ #else uint64_t minerr : 1; uint64_t reserved_1_1 : 1; uint64_t maxerr : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t lenerr : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_agl_gmx_rxx_frm_chk_s cn52xx; struct cvmx_agl_gmx_rxx_frm_chk_s cn52xxp1; struct cvmx_agl_gmx_rxx_frm_chk_s cn56xx; struct cvmx_agl_gmx_rxx_frm_chk_s cn56xxp1; } cvmx_agl_gmx_rxx_frm_chk_t; /** * cvmx_agl_gmx_rx#_frm_ctl * * AGL_GMX_RX_FRM_CTL = Frame Control * * * Notes: * * PRE_CHK * When set, the MII state expects a typical frame consisting of * INTER_FRAME=>PREAMBLE(x7)=>SFD(x1)=>DAT. The state machine watches for * this exact sequence in order to recognize a valid frame and push frame * data into the Octane. There must be exactly 7 PREAMBLE cycles followed by * the single SFD cycle for the frame to be accepted. * * When a problem does occur within the PREAMBLE seqeunce, the frame is * marked as bad and not sent into the core. The AGL_GMX_RX_INT_REG[PCTERR] * interrupt is also raised. * * * PRE_STRP * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane * core as part of the packet. * * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet * size when checking against the MIN and MAX bounds. Furthermore, the bytes * are skipped when locating the start of the L2 header for DMAC and Control * frame recognition. * * * CTL_BCK/CTL_DRP * These bits control how the HW handles incoming PAUSE packets. Here are * the most common modes of operation: * CTL_BCK=1,CTL_DRP=1 - HW does it all * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored * * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode. * Since PAUSE packets only apply to fulldup operation, any PAUSE packet * would constitute an exception which should be handled by the processing * cores. PAUSE packets should not be forwarded. * * Additionally reset when MIX_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_frm_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte regardless of the number of previous PREAMBLE nibbles. In this mode, PREAMBLE can be consumed by the HW so when PRE_ALIGN is set, PRE_FREE, PRE_STRP must be set for correct operation. PRE_CHK must be set to enable this and all PREAMBLE features. */ uint64_t pad_len : 1; /**< When set, disables the length check for non-min sized pkts with padding in the client data */ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict. 0 - 254 cycles of PREAMBLE followed by SFD PRE_FREE must be set if PRE_ALIGN is set. PRE_CHK must be set to enable this and all PREAMBLE features. */ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign Multicast address */ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 0=PREAMBLE+SFD is sent to core as part of frame 1=PREAMBLE+SFD is dropped PRE_STRP must be set if PRE_ALIGN is set. PRE_CHK must be set to enable this and all PREAMBLE features. */ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD to begin every frame. GMX checks that the PREAMBLE is sent correctly */ #else uint64_t pre_chk : 1; uint64_t pre_strp : 1; uint64_t ctl_drp : 1; uint64_t ctl_bck : 1; uint64_t ctl_mcst : 1; uint64_t ctl_smac : 1; uint64_t pre_free : 1; uint64_t vlan_len : 1; uint64_t pad_len : 1; uint64_t pre_align : 1; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_agl_gmx_rxx_frm_ctl_s cn52xx; struct cvmx_agl_gmx_rxx_frm_ctl_s cn52xxp1; struct cvmx_agl_gmx_rxx_frm_ctl_s cn56xx; struct cvmx_agl_gmx_rxx_frm_ctl_s cn56xxp1; } cvmx_agl_gmx_rxx_frm_ctl_t; /** * cvmx_agl_gmx_rx#_frm_max * * AGL_GMX_RX_FRM_MAX = Frame Max length * * * Notes: * When changing the LEN field, be sure that LEN does not exceed * AGL_GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that * are within the maximum length parameter to be rejected because they exceed * the AGL_GMX_RX_JABBER[CNT] limit. * * Notes: * * Additionally reset when MIX_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_frm_max_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t len : 16; /**< Byte count for Max-sized frame check Failing packets set the MAXERR interrupt and are optionally sent with opcode==MAXERR LEN <= AGL_GMX_RX_JABBER[CNT] */ #else uint64_t len : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_agl_gmx_rxx_frm_max_s cn52xx; struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1; struct cvmx_agl_gmx_rxx_frm_max_s cn56xx; struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1; } cvmx_agl_gmx_rxx_frm_max_t; /** * cvmx_agl_gmx_rx#_frm_min * * AGL_GMX_RX_FRM_MIN = Frame Min length * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_frm_min_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t len : 16; /**< Byte count for Min-sized frame check Failing packets set the MINERR interrupt and are optionally sent with opcode==MINERR */ #else uint64_t len : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_agl_gmx_rxx_frm_min_s cn52xx; struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1; struct cvmx_agl_gmx_rxx_frm_min_s cn56xx; struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1; } cvmx_agl_gmx_rxx_frm_min_t; /** * cvmx_agl_gmx_rx#_ifg * * AGL_GMX_RX_IFG = RX Min IFG * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_ifg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t ifg : 4; /**< Min IFG between packets used to determine IFGERR */ #else uint64_t ifg : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_agl_gmx_rxx_ifg_s cn52xx; struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1; struct cvmx_agl_gmx_rxx_ifg_s cn56xx; struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1; } cvmx_agl_gmx_rxx_ifg_t; /** * cvmx_agl_gmx_rx#_int_en * * AGL_GMX_RX_INT_EN = Interrupt Enable * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_int_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t reserved_16_18 : 3; uint64_t ifgerr : 1; /**< Interframe Gap Violation */ uint64_t coldet : 1; /**< Collision Detection */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ uint64_t rsverr : 1; /**< MII reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */ uint64_t reserved_9_9 : 1; uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t reserved_1_1 : 1; uint64_t minerr : 1; /**< Frame was received with length < min_length */ #else uint64_t minerr : 1; uint64_t reserved_1_1 : 1; uint64_t maxerr : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t lenerr : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t reserved_9_9 : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t reserved_16_18 : 3; uint64_t pause_drp : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_agl_gmx_rxx_int_en_s cn52xx; struct cvmx_agl_gmx_rxx_int_en_s cn52xxp1; struct cvmx_agl_gmx_rxx_int_en_s cn56xx; struct cvmx_agl_gmx_rxx_int_en_s cn56xxp1; } cvmx_agl_gmx_rxx_int_en_t; /** * cvmx_agl_gmx_rx#_int_reg * * AGL_GMX_RX_INT_REG = Interrupt Register * * * Notes: * (1) exceptions will only be raised to the control processor if the * corresponding bit in the AGL_GMX_RX_INT_EN register is set. * * (2) exception conditions 10:0 can also set the rcv/opcode in the received * packet's workQ entry. The AGL_GMX_RX_FRM_CHK register provides a bit mask * for configuring which conditions set the error. * * (3) in half duplex operation, the expectation is that collisions will appear * as MINERRs. * * (4) JABBER - An RX Jabber error indicates that a packet was received which * is longer than the maximum allowed packet as defined by the * system. GMX will truncate the packet at the JABBER count. * Failure to do so could lead to system instabilty. * * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS > * AGL_GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS * > AGL_GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED. * * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < AGL_GMX_RX_FRM_MIN. * * (8) ALNERR - Indicates that the packet received was not an integer number of * bytes. If FCS checking is enabled, ALNERR will only assert if * the FCS is bad. If FCS checking is disabled, ALNERR will * assert in all non-integer frame cases. * * (9) Collisions - Collisions can only occur in half-duplex mode. A collision * is assumed by the receiver when the received * frame < AGL_GMX_RX_FRM_MIN - this is normally a MINERR * * (A) LENERR - Length errors occur when the received packet does not match the * length field. LENERR is only checked for packets between 64 * and 1500 bytes. For untagged frames, the length must exact * match. For tagged frames the length or length+4 must match. * * (B) PCTERR - checks that the frame transtions from PREAMBLE=>SFD=>DATA. * Does not check the number of PREAMBLE cycles. * * (C) OVRERR - Not to be included in the HRM * * OVRERR is an architectural assertion check internal to GMX to * make sure no assumption was violated. In a correctly operating * system, this interrupt can never fire. * * GMX has an internal arbiter which selects which of 4 ports to * buffer in the main RX FIFO. If we normally buffer 8 bytes, * then each port will typically push a tick every 8 cycles - if * the packet interface is going as fast as possible. If there * are four ports, they push every two cycles. So that's the * assumption. That the inbound module will always be able to * consume the tick before another is produced. If that doesn't * happen - that's when OVRERR will assert. * * Additionally reset when MIX_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_int_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t reserved_16_18 : 3; uint64_t ifgerr : 1; /**< Interframe Gap Violation Does not necessarily indicate a failure */ uint64_t coldet : 1; /**< Collision Detection */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ uint64_t rsverr : 1; /**< MII reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow This interrupt should never assert */ uint64_t reserved_9_9 : 1; uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with MII Data reception error */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t reserved_1_1 : 1; uint64_t minerr : 1; /**< Frame was received with length < min_length */ #else uint64_t minerr : 1; uint64_t reserved_1_1 : 1; uint64_t maxerr : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t lenerr : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t reserved_9_9 : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t reserved_16_18 : 3; uint64_t pause_drp : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_agl_gmx_rxx_int_reg_s cn52xx; struct cvmx_agl_gmx_rxx_int_reg_s cn52xxp1; struct cvmx_agl_gmx_rxx_int_reg_s cn56xx; struct cvmx_agl_gmx_rxx_int_reg_s cn56xxp1; } cvmx_agl_gmx_rxx_int_reg_t; /** * cvmx_agl_gmx_rx#_jabber * * AGL_GMX_RX_JABBER = The max size packet after which GMX will truncate * * * Notes: * CNT must be 8-byte aligned such that CNT[2:0] == 0 * * The packet that will be sent to the packet input logic will have an * additionl 8 bytes if AGL_GMX_RX_FRM_CTL[PRE_CHK] is set and * AGL_GMX_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is * defined as... * * max_sized_packet = AGL_GMX_RX_JABBER[CNT]+((AGL_GMX_RX_FRM_CTL[PRE_CHK] & !AGL_GMX_RX_FRM_CTL[PRE_STRP])*8) * * Be sure the CNT field value is at least as large as the * AGL_GMX_RX_FRM_MAX[LEN] value. Failure to meet this constraint will cause * packets that are within the AGL_GMX_RX_FRM_MAX[LEN] length to be rejected * because they exceed the CNT limit. * * Additionally reset when MIX_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_jabber_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt : 16; /**< Byte count for jabber check Failing packets set the JABBER interrupt and are optionally sent with opcode==JABBER GMX will truncate the packet to CNT bytes CNT >= AGL_GMX_RX_FRM_MAX[LEN] */ #else uint64_t cnt : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_agl_gmx_rxx_jabber_s cn52xx; struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1; struct cvmx_agl_gmx_rxx_jabber_s cn56xx; struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1; } cvmx_agl_gmx_rxx_jabber_t; /** * cvmx_agl_gmx_rx#_pause_drop_time * * AGL_GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_pause_drop_time_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t status : 16; /**< Time extracted from the dropped PAUSE packet */ #else uint64_t status : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xx; struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1; struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx; struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1; } cvmx_agl_gmx_rxx_pause_drop_time_t; /** * cvmx_agl_gmx_rx#_stats_ctl * * AGL_GMX_RX_STATS_CTL = RX Stats Control register * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t rd_clr : 1; /**< RX Stats registers will clear on reads */ #else uint64_t rd_clr : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xx; struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1; struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx; struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1; } cvmx_agl_gmx_rxx_stats_ctl_t; /** * cvmx_agl_gmx_rx#_stats_octs * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_octs_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t cnt : 48; /**< Octet count of received good packets */ #else uint64_t cnt : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_agl_gmx_rxx_stats_octs_s cn52xx; struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1; struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx; struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1; } cvmx_agl_gmx_rxx_stats_octs_t; /** * cvmx_agl_gmx_rx#_stats_octs_ctl * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_octs_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t cnt : 48; /**< Octet count of received pause packets */ #else uint64_t cnt : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xx; struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1; struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx; struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1; } cvmx_agl_gmx_rxx_stats_octs_ctl_t; /** * cvmx_agl_gmx_rx#_stats_octs_dmac * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_octs_dmac_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t cnt : 48; /**< Octet count of filtered dmac packets */ #else uint64_t cnt : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xx; struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1; struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx; struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1; } cvmx_agl_gmx_rxx_stats_octs_dmac_t; /** * cvmx_agl_gmx_rx#_stats_octs_drp * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_octs_drp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t cnt : 48; /**< Octet count of dropped packets */ #else uint64_t cnt : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xx; struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1; struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx; struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1; } cvmx_agl_gmx_rxx_stats_octs_drp_t; /** * cvmx_agl_gmx_rx#_stats_pkts * * AGL_GMX_RX_STATS_PKTS * * Count of good received packets - packets that are not recognized as PAUSE * packets, dropped due the DMAC filter, dropped due FIFO full status, or * have any other OPCODE (FCS, Length, etc). * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_pkts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Count of received good packets */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xx; struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1; struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx; struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1; } cvmx_agl_gmx_rxx_stats_pkts_t; /** * cvmx_agl_gmx_rx#_stats_pkts_bad * * AGL_GMX_RX_STATS_PKTS_BAD * * Count of all packets received with some error that were not dropped * either due to the dmac filter or lack of room in the receive FIFO. * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_pkts_bad_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Count of bad packets */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xx; struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1; struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx; struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1; } cvmx_agl_gmx_rxx_stats_pkts_bad_t; /** * cvmx_agl_gmx_rx#_stats_pkts_ctl * * AGL_GMX_RX_STATS_PKTS_CTL * * Count of all packets received that were recognized as Flow Control or * PAUSE packets. PAUSE packets with any kind of error are counted in * AGL_GMX_RX_STATS_PKTS_BAD. Pause packets can be optionally dropped or * forwarded based on the AGL_GMX_RX_FRM_CTL[CTL_DRP] bit. This count * increments regardless of whether the packet is dropped. Pause packets * will never be counted in AGL_GMX_RX_STATS_PKTS. Packets dropped due the dmac * filter will be counted in AGL_GMX_RX_STATS_PKTS_DMAC and not here. * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Count of received pause packets */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xx; struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1; struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx; struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1; } cvmx_agl_gmx_rxx_stats_pkts_ctl_t; /** * cvmx_agl_gmx_rx#_stats_pkts_dmac * * AGL_GMX_RX_STATS_PKTS_DMAC * * Count of all packets received that were dropped by the dmac filter. * Packets that match the DMAC will be dropped and counted here regardless * of if they were bad packets. These packets will never be counted in * AGL_GMX_RX_STATS_PKTS. * * Some packets that were not able to satisify the DECISION_CNT may not * actually be dropped by Octeon, but they will be counted here as if they * were dropped. * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Count of filtered dmac packets */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xx; struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1; struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx; struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1; } cvmx_agl_gmx_rxx_stats_pkts_dmac_t; /** * cvmx_agl_gmx_rx#_stats_pkts_drp * * AGL_GMX_RX_STATS_PKTS_DRP * * Count of all packets received that were dropped due to a full receive * FIFO. This counts good and bad packets received - all packets dropped by * the FIFO. It does not count packets dropped by the dmac or pause packet * filters. * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_stats_pkts_drp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Count of dropped packets */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xx; struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1; struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx; struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1; } cvmx_agl_gmx_rxx_stats_pkts_drp_t; /** * cvmx_agl_gmx_rx#_udd_skp * * AGL_GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data * * * Notes: * (1) The skip bytes are part of the packet and will be sent down the NCB * packet interface and will be handled by PKI. * * (2) The system can determine if the UDD bytes are included in the FCS check * by using the FCSSEL field - if the FCS check is enabled. * * (3) Assume that the preamble/sfd is always at the start of the frame - even * before UDD bytes. In most cases, there will be no preamble in these * cases since it will be MII to MII communication without a PHY * involved. * * (4) We can still do address filtering and control packet filtering is the * user desires. * * (5) UDD_SKP must be 0 in half-duplex operation unless * AGL_GMX_RX_FRM_CTL[PRE_CHK] is clear. If AGL_GMX_RX_FRM_CTL[PRE_CHK] is set, * then UDD_SKP will normally be 8. * * (6) In all cases, the UDD bytes will be sent down the packet interface as * part of the packet. The UDD bytes are never stripped from the actual * packet. * * (7) If LEN != 0, then AGL_GMX_RX_FRM_CHK[LENERR] will be disabled and AGL_GMX_RX_INT_REG[LENERR] will be zero * * Additionally reset when MIX_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rxx_udd_skp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t fcssel : 1; /**< Include the skip bytes in the FCS calculation 0 = all skip bytes are included in FCS 1 = the skip bytes are not included in FCS */ uint64_t reserved_7_7 : 1; uint64_t len : 7; /**< Amount of User-defined data before the start of the L2 data. Zero means L2 comes first. Max value is 64. */ #else uint64_t len : 7; uint64_t reserved_7_7 : 1; uint64_t fcssel : 1; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_agl_gmx_rxx_udd_skp_s cn52xx; struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1; struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx; struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1; } cvmx_agl_gmx_rxx_udd_skp_t; /** * cvmx_agl_gmx_rx_bp_drop# * * AGL_GMX_RX_BP_DROP = FIFO mark for packet drop * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rx_bp_dropx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t mark : 6; /**< Number of 8B ticks to reserve in the RX FIFO. When the FIFO exceeds this count, packets will be dropped and not buffered. MARK should typically be programmed to 2. Failure to program correctly can lead to system instability. */ #else uint64_t mark : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_agl_gmx_rx_bp_dropx_s cn52xx; struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1; struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx; struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1; } cvmx_agl_gmx_rx_bp_dropx_t; /** * cvmx_agl_gmx_rx_bp_off# * * AGL_GMX_RX_BP_OFF = Lowater mark for packet drop * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rx_bp_offx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t mark : 6; /**< Water mark (8B ticks) to deassert backpressure */ #else uint64_t mark : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_agl_gmx_rx_bp_offx_s cn52xx; struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1; struct cvmx_agl_gmx_rx_bp_offx_s cn56xx; struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1; } cvmx_agl_gmx_rx_bp_offx_t; /** * cvmx_agl_gmx_rx_bp_on# * * AGL_GMX_RX_BP_ON = Hiwater mark for port/interface backpressure * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rx_bp_onx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t mark : 9; /**< Hiwater mark (8B ticks) for backpressure. */ #else uint64_t mark : 9; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_agl_gmx_rx_bp_onx_s cn52xx; struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1; struct cvmx_agl_gmx_rx_bp_onx_s cn56xx; struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1; } cvmx_agl_gmx_rx_bp_onx_t; /** * cvmx_agl_gmx_rx_prt_info * * AGL_GMX_RX_PRT_INFO = state information for the ports * * * Notes: * COMMIT[0], DROP[0] will be reset when MIX0_CTL[RESET] is set to 1. * COMMIT[1], DROP[1] will be reset when MIX1_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rx_prt_info_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t drop : 2; /**< Port indication that data was dropped */ uint64_t reserved_2_15 : 14; uint64_t commit : 2; /**< Port indication that SOP was accepted */ #else uint64_t commit : 2; uint64_t reserved_2_15 : 14; uint64_t drop : 2; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_agl_gmx_rx_prt_info_s cn52xx; struct cvmx_agl_gmx_rx_prt_info_s cn52xxp1; struct cvmx_agl_gmx_rx_prt_info_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t drop : 1; /**< Port indication that data was dropped */ uint64_t reserved_1_15 : 15; uint64_t commit : 1; /**< Port indication that SOP was accepted */ #else uint64_t commit : 1; uint64_t reserved_1_15 : 15; uint64_t drop : 1; uint64_t reserved_17_63 : 47; #endif } cn56xx; struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1; } cvmx_agl_gmx_rx_prt_info_t; /** * cvmx_agl_gmx_rx_tx_status * * AGL_GMX_RX_TX_STATUS = GMX RX/TX Status * * * Notes: * RX[0], TX[0] will be reset when MIX0_CTL[RESET] is set to 1. * RX[1], TX[1] will be reset when MIX1_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_rx_tx_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t tx : 2; /**< Transmit data since last read */ uint64_t reserved_2_3 : 2; uint64_t rx : 2; /**< Receive data since last read */ #else uint64_t rx : 2; uint64_t reserved_2_3 : 2; uint64_t tx : 2; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_agl_gmx_rx_tx_status_s cn52xx; struct cvmx_agl_gmx_rx_tx_status_s cn52xxp1; struct cvmx_agl_gmx_rx_tx_status_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t tx : 1; /**< Transmit data since last read */ uint64_t reserved_1_3 : 3; uint64_t rx : 1; /**< Receive data since last read */ #else uint64_t rx : 1; uint64_t reserved_1_3 : 3; uint64_t tx : 1; uint64_t reserved_5_63 : 59; #endif } cn56xx; struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1; } cvmx_agl_gmx_rx_tx_status_t; /** * cvmx_agl_gmx_smac# * * AGL_GMX_SMAC = MII SMAC * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_smacx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t smac : 48; /**< The SMAC field is used for generating and accepting Control Pause packets */ #else uint64_t smac : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_agl_gmx_smacx_s cn52xx; struct cvmx_agl_gmx_smacx_s cn52xxp1; struct cvmx_agl_gmx_smacx_s cn56xx; struct cvmx_agl_gmx_smacx_s cn56xxp1; } cvmx_agl_gmx_smacx_t; /** * cvmx_agl_gmx_stat_bp * * AGL_GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation * * * Notes: * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_stat_bp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t bp : 1; /**< Current BP state */ uint64_t cnt : 16; /**< Number of cycles that BP has been asserted Saturating counter */ #else uint64_t cnt : 16; uint64_t bp : 1; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_agl_gmx_stat_bp_s cn52xx; struct cvmx_agl_gmx_stat_bp_s cn52xxp1; struct cvmx_agl_gmx_stat_bp_s cn56xx; struct cvmx_agl_gmx_stat_bp_s cn56xxp1; } cvmx_agl_gmx_stat_bp_t; /** * cvmx_agl_gmx_tx#_append * * AGL_GMX_TX_APPEND = MII TX Append Control * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_append_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t force_fcs : 1; /**< Append the Ethernet FCS on each pause packet when FCS is clear. Pause packets are normally padded to 60 bytes. If AGL_GMX_TX_MIN_PKT[MIN_SIZE] exceeds 59, then FORCE_FCS will not be used. */ uint64_t fcs : 1; /**< Append the Ethernet FCS on each packet */ uint64_t pad : 1; /**< Append PAD bytes such that min sized */ uint64_t preamble : 1; /**< Prepend the Ethernet preamble on each transfer */ #else uint64_t preamble : 1; uint64_t pad : 1; uint64_t fcs : 1; uint64_t force_fcs : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_agl_gmx_txx_append_s cn52xx; struct cvmx_agl_gmx_txx_append_s cn52xxp1; struct cvmx_agl_gmx_txx_append_s cn56xx; struct cvmx_agl_gmx_txx_append_s cn56xxp1; } cvmx_agl_gmx_txx_append_t; /** * cvmx_agl_gmx_tx#_ctl * * AGL_GMX_TX_CTL = TX Control register * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t xsdef_en : 1; /**< Enables the excessive deferral check for stats and interrupts */ uint64_t xscol_en : 1; /**< Enables the excessive collision check for stats and interrupts */ #else uint64_t xscol_en : 1; uint64_t xsdef_en : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_agl_gmx_txx_ctl_s cn52xx; struct cvmx_agl_gmx_txx_ctl_s cn52xxp1; struct cvmx_agl_gmx_txx_ctl_s cn56xx; struct cvmx_agl_gmx_txx_ctl_s cn56xxp1; } cvmx_agl_gmx_txx_ctl_t; /** * cvmx_agl_gmx_tx#_min_pkt * * AGL_GMX_TX_MIN_PKT = MII TX Min Size Packet (PAD upto min size) * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_min_pkt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t min_size : 8; /**< Min frame in bytes before the FCS is applied Padding is only appened when AGL_GMX_TX_APPEND[PAD] for the coresponding MII port is set. Packets will be padded to MIN_SIZE+1 The reset value will pad to 60 bytes. */ #else uint64_t min_size : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_agl_gmx_txx_min_pkt_s cn52xx; struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1; struct cvmx_agl_gmx_txx_min_pkt_s cn56xx; struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1; } cvmx_agl_gmx_txx_min_pkt_t; /** * cvmx_agl_gmx_tx#_pause_pkt_interval * * AGL_GMX_TX_PAUSE_PKT_INTERVAL = MII TX Pause Packet transmission interval - how often PAUSE packets will be sent * * * Notes: * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system * designer. It is suggested that TIME be much greater than INTERVAL and * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE * count and then when the backpressure condition is lifted, a PAUSE packet * with TIME==0 will be sent indicating that Octane is ready for additional * data. * * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is * suggested that TIME and INTERVAL are programmed such that they satisify the * following rule... * * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size) * * where largest_pkt_size is that largest packet that the system can send * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size * of the PAUSE packet (normally 64B). * * Additionally reset when MIX_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_pause_pkt_interval_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t interval : 16; /**< Arbitrate for a pause packet every (INTERVAL*512) bit-times. Normally, 0 < INTERVAL < AGL_GMX_TX_PAUSE_PKT_TIME INTERVAL=0, will only send a single PAUSE packet for each backpressure event */ #else uint64_t interval : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xx; struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1; struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx; struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1; } cvmx_agl_gmx_txx_pause_pkt_interval_t; /** * cvmx_agl_gmx_tx#_pause_pkt_time * * AGL_GMX_TX_PAUSE_PKT_TIME = MII TX Pause Packet pause_time field * * * Notes: * Choosing proper values of AGL_GMX_TX_PAUSE_PKT_TIME[TIME] and * AGL_GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system * designer. It is suggested that TIME be much greater than INTERVAL and * AGL_GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE * count and then when the backpressure condition is lifted, a PAUSE packet * with TIME==0 will be sent indicating that Octane is ready for additional * data. * * If the system chooses to not set AGL_GMX_TX_PAUSE_ZERO[SEND], then it is * suggested that TIME and INTERVAL are programmed such that they satisify the * following rule... * * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size) * * where largest_pkt_size is that largest packet that the system can send * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size * of the PAUSE packet (normally 64B). * * Additionally reset when MIX_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_pause_pkt_time_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t time : 16; /**< The pause_time field placed is outbnd pause pkts pause_time is in 512 bit-times Normally, TIME > AGL_GMX_TX_PAUSE_PKT_INTERVAL */ #else uint64_t time : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xx; struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1; struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx; struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1; } cvmx_agl_gmx_txx_pause_pkt_time_t; /** * cvmx_agl_gmx_tx#_pause_togo * * AGL_GMX_TX_PAUSE_TOGO = MII TX Amount of time remaining to backpressure * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_pause_togo_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t time : 16; /**< Amount of time remaining to backpressure */ #else uint64_t time : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_agl_gmx_txx_pause_togo_s cn52xx; struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1; struct cvmx_agl_gmx_txx_pause_togo_s cn56xx; struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1; } cvmx_agl_gmx_txx_pause_togo_t; /** * cvmx_agl_gmx_tx#_pause_zero * * AGL_GMX_TX_PAUSE_ZERO = MII TX Amount of time remaining to backpressure * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_pause_zero_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t send : 1; /**< When backpressure condition clear, send PAUSE packet with pause_time of zero to enable the channel */ #else uint64_t send : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_agl_gmx_txx_pause_zero_s cn52xx; struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1; struct cvmx_agl_gmx_txx_pause_zero_s cn56xx; struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1; } cvmx_agl_gmx_txx_pause_zero_t; /** * cvmx_agl_gmx_tx#_soft_pause * * AGL_GMX_TX_SOFT_PAUSE = MII TX Software Pause * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_soft_pause_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t time : 16; /**< Back off the TX bus for (TIME*512) bit-times for full-duplex operation only */ #else uint64_t time : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_agl_gmx_txx_soft_pause_s cn52xx; struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1; struct cvmx_agl_gmx_txx_soft_pause_s cn56xx; struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1; } cvmx_agl_gmx_txx_soft_pause_t; /** * cvmx_agl_gmx_tx#_stat0 * * AGL_GMX_TX_STAT0 = AGL_GMX_TX_STATS_XSDEF / AGL_GMX_TX_STATS_XSCOL * * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_stat0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t xsdef : 32; /**< Number of packets dropped (never successfully sent) due to excessive deferal */ uint64_t xscol : 32; /**< Number of packets dropped (never successfully sent) due to excessive collision. Defined by AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */ #else uint64_t xscol : 32; uint64_t xsdef : 32; #endif } s; struct cvmx_agl_gmx_txx_stat0_s cn52xx; struct cvmx_agl_gmx_txx_stat0_s cn52xxp1; struct cvmx_agl_gmx_txx_stat0_s cn56xx; struct cvmx_agl_gmx_txx_stat0_s cn56xxp1; } cvmx_agl_gmx_txx_stat0_t; /** * cvmx_agl_gmx_tx#_stat1 * * AGL_GMX_TX_STAT1 = AGL_GMX_TX_STATS_SCOL / AGL_GMX_TX_STATS_MCOL * * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_stat1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t scol : 32; /**< Number of packets sent with a single collision */ uint64_t mcol : 32; /**< Number of packets sent with multiple collisions but < AGL_GMX_TX_COL_ATTEMPT[LIMIT]. */ #else uint64_t mcol : 32; uint64_t scol : 32; #endif } s; struct cvmx_agl_gmx_txx_stat1_s cn52xx; struct cvmx_agl_gmx_txx_stat1_s cn52xxp1; struct cvmx_agl_gmx_txx_stat1_s cn56xx; struct cvmx_agl_gmx_txx_stat1_s cn56xxp1; } cvmx_agl_gmx_txx_stat1_t; /** * cvmx_agl_gmx_tx#_stat2 * * AGL_GMX_TX_STAT2 = AGL_GMX_TX_STATS_OCTS * * * Notes: * - Octect counts are the sum of all data transmitted on the wire including * packet data, pad bytes, fcs bytes, pause bytes, and jam bytes. The octect * counts do not include PREAMBLE byte or EXTEND cycles. * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_stat2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t octs : 48; /**< Number of total octets sent on the interface. Does not count octets from frames that were truncated due to collisions in halfdup mode. */ #else uint64_t octs : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_agl_gmx_txx_stat2_s cn52xx; struct cvmx_agl_gmx_txx_stat2_s cn52xxp1; struct cvmx_agl_gmx_txx_stat2_s cn56xx; struct cvmx_agl_gmx_txx_stat2_s cn56xxp1; } cvmx_agl_gmx_txx_stat2_t; /** * cvmx_agl_gmx_tx#_stat3 * * AGL_GMX_TX_STAT3 = AGL_GMX_TX_STATS_PKTS * * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_stat3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t pkts : 32; /**< Number of total frames sent on the interface. Does not count frames that were truncated due to collisions in halfdup mode. */ #else uint64_t pkts : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_agl_gmx_txx_stat3_s cn52xx; struct cvmx_agl_gmx_txx_stat3_s cn52xxp1; struct cvmx_agl_gmx_txx_stat3_s cn56xx; struct cvmx_agl_gmx_txx_stat3_s cn56xxp1; } cvmx_agl_gmx_txx_stat3_t; /** * cvmx_agl_gmx_tx#_stat4 * * AGL_GMX_TX_STAT4 = AGL_GMX_TX_STATS_HIST1 (64) / AGL_GMX_TX_STATS_HIST0 (<64) * * * Notes: * - Packet length is the sum of all data transmitted on the wire for the given * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_stat4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t hist1 : 32; /**< Number of packets sent with an octet count of 64. */ uint64_t hist0 : 32; /**< Number of packets sent with an octet count of < 64. */ #else uint64_t hist0 : 32; uint64_t hist1 : 32; #endif } s; struct cvmx_agl_gmx_txx_stat4_s cn52xx; struct cvmx_agl_gmx_txx_stat4_s cn52xxp1; struct cvmx_agl_gmx_txx_stat4_s cn56xx; struct cvmx_agl_gmx_txx_stat4_s cn56xxp1; } cvmx_agl_gmx_txx_stat4_t; /** * cvmx_agl_gmx_tx#_stat5 * * AGL_GMX_TX_STAT5 = AGL_GMX_TX_STATS_HIST3 (128- 255) / AGL_GMX_TX_STATS_HIST2 (65- 127) * * * Notes: * - Packet length is the sum of all data transmitted on the wire for the given * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_stat5_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t hist3 : 32; /**< Number of packets sent with an octet count of 128 - 255. */ uint64_t hist2 : 32; /**< Number of packets sent with an octet count of 65 - 127. */ #else uint64_t hist2 : 32; uint64_t hist3 : 32; #endif } s; struct cvmx_agl_gmx_txx_stat5_s cn52xx; struct cvmx_agl_gmx_txx_stat5_s cn52xxp1; struct cvmx_agl_gmx_txx_stat5_s cn56xx; struct cvmx_agl_gmx_txx_stat5_s cn56xxp1; } cvmx_agl_gmx_txx_stat5_t; /** * cvmx_agl_gmx_tx#_stat6 * * AGL_GMX_TX_STAT6 = AGL_GMX_TX_STATS_HIST5 (512-1023) / AGL_GMX_TX_STATS_HIST4 (256-511) * * * Notes: * - Packet length is the sum of all data transmitted on the wire for the given * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_stat6_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t hist5 : 32; /**< Number of packets sent with an octet count of 512 - 1023. */ uint64_t hist4 : 32; /**< Number of packets sent with an octet count of 256 - 511. */ #else uint64_t hist4 : 32; uint64_t hist5 : 32; #endif } s; struct cvmx_agl_gmx_txx_stat6_s cn52xx; struct cvmx_agl_gmx_txx_stat6_s cn52xxp1; struct cvmx_agl_gmx_txx_stat6_s cn56xx; struct cvmx_agl_gmx_txx_stat6_s cn56xxp1; } cvmx_agl_gmx_txx_stat6_t; /** * cvmx_agl_gmx_tx#_stat7 * * AGL_GMX_TX_STAT7 = AGL_GMX_TX_STATS_HIST7 (1024-1518) / AGL_GMX_TX_STATS_HIST6 (>1518) * * * Notes: * - Packet length is the sum of all data transmitted on the wire for the given * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_stat7_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t hist7 : 32; /**< Number of packets sent with an octet count of > 1518. */ uint64_t hist6 : 32; /**< Number of packets sent with an octet count of 1024 - 1518. */ #else uint64_t hist6 : 32; uint64_t hist7 : 32; #endif } s; struct cvmx_agl_gmx_txx_stat7_s cn52xx; struct cvmx_agl_gmx_txx_stat7_s cn52xxp1; struct cvmx_agl_gmx_txx_stat7_s cn56xx; struct cvmx_agl_gmx_txx_stat7_s cn56xxp1; } cvmx_agl_gmx_txx_stat7_t; /** * cvmx_agl_gmx_tx#_stat8 * * AGL_GMX_TX_STAT8 = AGL_GMX_TX_STATS_MCST / AGL_GMX_TX_STATS_BCST * * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the * packet. GMX assumes that the DMAC lies in the first 6 bytes of the packet * as per the 802.3 frame definition. If the system requires additional data * before the L2 header, then the MCST and BCST counters may not reflect * reality and should be ignored by software. * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_stat8_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mcst : 32; /**< Number of packets sent to multicast DMAC. Does not include BCST packets. */ uint64_t bcst : 32; /**< Number of packets sent to broadcast DMAC. Does not include MCST packets. */ #else uint64_t bcst : 32; uint64_t mcst : 32; #endif } s; struct cvmx_agl_gmx_txx_stat8_s cn52xx; struct cvmx_agl_gmx_txx_stat8_s cn52xxp1; struct cvmx_agl_gmx_txx_stat8_s cn56xx; struct cvmx_agl_gmx_txx_stat8_s cn56xxp1; } cvmx_agl_gmx_txx_stat8_t; /** * cvmx_agl_gmx_tx#_stat9 * * AGL_GMX_TX_STAT9 = AGL_GMX_TX_STATS_UNDFLW / AGL_GMX_TX_STATS_CTL * * * Notes: * - Cleared either by a write (of any value) or a read when AGL_GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Not reset when MIX*_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_stat9_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t undflw : 32; /**< Number of underflow packets */ uint64_t ctl : 32; /**< Number of Control packets (PAUSE flow control) generated by GMX. It does not include control packets forwarded or generated by the PP's. */ #else uint64_t ctl : 32; uint64_t undflw : 32; #endif } s; struct cvmx_agl_gmx_txx_stat9_s cn52xx; struct cvmx_agl_gmx_txx_stat9_s cn52xxp1; struct cvmx_agl_gmx_txx_stat9_s cn56xx; struct cvmx_agl_gmx_txx_stat9_s cn56xxp1; } cvmx_agl_gmx_txx_stat9_t; /** * cvmx_agl_gmx_tx#_stats_ctl * * AGL_GMX_TX_STATS_CTL = TX Stats Control register * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_stats_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t rd_clr : 1; /**< Stats registers will clear on reads */ #else uint64_t rd_clr : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_agl_gmx_txx_stats_ctl_s cn52xx; struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1; struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx; struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1; } cvmx_agl_gmx_txx_stats_ctl_t; /** * cvmx_agl_gmx_tx#_thresh * * AGL_GMX_TX_THRESH = MII TX Threshold * * * Notes: * Additionally reset when MIX_CTL[RESET] is set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_txx_thresh_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t cnt : 6; /**< Number of 16B ticks to accumulate in the TX FIFO before sending on the MII interface This register should be large enough to prevent underflow on the MII interface and must never be set below 4. This register cannot exceed the the TX FIFO depth which is 32 16B entries. */ #else uint64_t cnt : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_agl_gmx_txx_thresh_s cn52xx; struct cvmx_agl_gmx_txx_thresh_s cn52xxp1; struct cvmx_agl_gmx_txx_thresh_s cn56xx; struct cvmx_agl_gmx_txx_thresh_s cn56xxp1; } cvmx_agl_gmx_txx_thresh_t; /** * cvmx_agl_gmx_tx_bp * * AGL_GMX_TX_BP = MII TX BackPressure Register * * * Notes: * BP[0] will be reset when MIX0_CTL[RESET] is set to 1. * BP[1] will be reset when MIX1_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_tx_bp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t bp : 2; /**< Port BackPressure status 0=Port is available 1=Port should be back pressured */ #else uint64_t bp : 2; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_agl_gmx_tx_bp_s cn52xx; struct cvmx_agl_gmx_tx_bp_s cn52xxp1; struct cvmx_agl_gmx_tx_bp_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t bp : 1; /**< Port BackPressure status 0=Port is available 1=Port should be back pressured */ #else uint64_t bp : 1; uint64_t reserved_1_63 : 63; #endif } cn56xx; struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1; } cvmx_agl_gmx_tx_bp_t; /** * cvmx_agl_gmx_tx_col_attempt * * AGL_GMX_TX_COL_ATTEMPT = MII TX collision attempts before dropping frame * * * Notes: * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_tx_col_attempt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t limit : 5; /**< Collision Attempts */ #else uint64_t limit : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_agl_gmx_tx_col_attempt_s cn52xx; struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1; struct cvmx_agl_gmx_tx_col_attempt_s cn56xx; struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1; } cvmx_agl_gmx_tx_col_attempt_t; /** * cvmx_agl_gmx_tx_ifg * * Common * * * AGL_GMX_TX_IFG = MII TX Interframe Gap * * Notes: * Notes: * * Programming IFG1 and IFG2. * * For half-duplex systems that require IEEE 802.3 compatibility, IFG1 must * be in the range of 1-8, IFG2 must be in the range of 4-12, and the * IFG1+IFG2 sum must be 12. * * For full-duplex systems that require IEEE 802.3 compatibility, IFG1 must * be in the range of 1-11, IFG2 must be in the range of 1-11, and the * IFG1+IFG2 sum must be 12. * * For all other systems, IFG1 and IFG2 can be any value in the range of * 1-15. Allowing for a total possible IFG sum of 2-30. * * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_tx_ifg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t ifg2 : 4; /**< 1/3 of the interframe gap timing If CRS is detected during IFG2, then the interFrameSpacing timer is not reset and a frame is transmited once the timer expires. */ uint64_t ifg1 : 4; /**< 2/3 of the interframe gap timing If CRS is detected during IFG1, then the interFrameSpacing timer is reset and a frame is not transmited. */ #else uint64_t ifg1 : 4; uint64_t ifg2 : 4; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_agl_gmx_tx_ifg_s cn52xx; struct cvmx_agl_gmx_tx_ifg_s cn52xxp1; struct cvmx_agl_gmx_tx_ifg_s cn56xx; struct cvmx_agl_gmx_tx_ifg_s cn56xxp1; } cvmx_agl_gmx_tx_ifg_t; /** * cvmx_agl_gmx_tx_int_en * * AGL_GMX_TX_INT_EN = Interrupt Enable * * * Notes: * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0] will be reset when MIX0_CTL[RESET] is set to 1. * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1] will be reset when MIX1_CTL[RESET] is set to 1. * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_tx_int_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t late_col : 2; /**< TX Late Collision */ uint64_t reserved_14_15 : 2; uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */ uint64_t reserved_10_11 : 2; uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */ uint64_t reserved_4_7 : 4; uint64_t undflw : 2; /**< TX Underflow (MII mode only) */ uint64_t reserved_1_1 : 1; uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t reserved_1_1 : 1; uint64_t undflw : 2; uint64_t reserved_4_7 : 4; uint64_t xscol : 2; uint64_t reserved_10_11 : 2; uint64_t xsdef : 2; uint64_t reserved_14_15 : 2; uint64_t late_col : 2; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_agl_gmx_tx_int_en_s cn52xx; struct cvmx_agl_gmx_tx_int_en_s cn52xxp1; struct cvmx_agl_gmx_tx_int_en_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t late_col : 1; /**< TX Late Collision */ uint64_t reserved_13_15 : 3; uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */ uint64_t reserved_9_11 : 3; uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */ uint64_t reserved_3_7 : 5; uint64_t undflw : 1; /**< TX Underflow (MII mode only) */ uint64_t reserved_1_1 : 1; uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t reserved_1_1 : 1; uint64_t undflw : 1; uint64_t reserved_3_7 : 5; uint64_t xscol : 1; uint64_t reserved_9_11 : 3; uint64_t xsdef : 1; uint64_t reserved_13_15 : 3; uint64_t late_col : 1; uint64_t reserved_17_63 : 47; #endif } cn56xx; struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1; } cvmx_agl_gmx_tx_int_en_t; /** * cvmx_agl_gmx_tx_int_reg * * AGL_GMX_TX_INT_REG = Interrupt Register * * * Notes: * UNDFLW[0], XSCOL[0], XSDEF[0], LATE_COL[0] will be reset when MIX0_CTL[RESET] is set to 1. * UNDFLW[1], XSCOL[1], XSDEF[1], LATE_COL[1] will be reset when MIX1_CTL[RESET] is set to 1. * PKO_NXA will bee reset when both MIX0/1_CTL[RESET] are set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_tx_int_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t late_col : 2; /**< TX Late Collision */ uint64_t reserved_14_15 : 2; uint64_t xsdef : 2; /**< TX Excessive deferral (MII/halfdup mode only) */ uint64_t reserved_10_11 : 2; uint64_t xscol : 2; /**< TX Excessive collisions (MII/halfdup mode only) */ uint64_t reserved_4_7 : 4; uint64_t undflw : 2; /**< TX Underflow (MII mode only) */ uint64_t reserved_1_1 : 1; uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t reserved_1_1 : 1; uint64_t undflw : 2; uint64_t reserved_4_7 : 4; uint64_t xscol : 2; uint64_t reserved_10_11 : 2; uint64_t xsdef : 2; uint64_t reserved_14_15 : 2; uint64_t late_col : 2; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_agl_gmx_tx_int_reg_s cn52xx; struct cvmx_agl_gmx_tx_int_reg_s cn52xxp1; struct cvmx_agl_gmx_tx_int_reg_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t late_col : 1; /**< TX Late Collision */ uint64_t reserved_13_15 : 3; uint64_t xsdef : 1; /**< TX Excessive deferral (MII/halfdup mode only) */ uint64_t reserved_9_11 : 3; uint64_t xscol : 1; /**< TX Excessive collisions (MII/halfdup mode only) */ uint64_t reserved_3_7 : 5; uint64_t undflw : 1; /**< TX Underflow (MII mode only) */ uint64_t reserved_1_1 : 1; uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t reserved_1_1 : 1; uint64_t undflw : 1; uint64_t reserved_3_7 : 5; uint64_t xscol : 1; uint64_t reserved_9_11 : 3; uint64_t xsdef : 1; uint64_t reserved_13_15 : 3; uint64_t late_col : 1; uint64_t reserved_17_63 : 47; #endif } cn56xx; struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1; } cvmx_agl_gmx_tx_int_reg_t; /** * cvmx_agl_gmx_tx_jam * * AGL_GMX_TX_JAM = MII TX Jam Pattern * * * Notes: * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_tx_jam_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t jam : 8; /**< Jam pattern */ #else uint64_t jam : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_agl_gmx_tx_jam_s cn52xx; struct cvmx_agl_gmx_tx_jam_s cn52xxp1; struct cvmx_agl_gmx_tx_jam_s cn56xx; struct cvmx_agl_gmx_tx_jam_s cn56xxp1; } cvmx_agl_gmx_tx_jam_t; /** * cvmx_agl_gmx_tx_lfsr * * AGL_GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff * * * Notes: * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_tx_lfsr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t lfsr : 16; /**< The current state of the LFSR used to feed random numbers to compute truncated binary exponential backoff. */ #else uint64_t lfsr : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_agl_gmx_tx_lfsr_s cn52xx; struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1; struct cvmx_agl_gmx_tx_lfsr_s cn56xx; struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1; } cvmx_agl_gmx_tx_lfsr_t; /** * cvmx_agl_gmx_tx_ovr_bp * * AGL_GMX_TX_OVR_BP = MII TX Override BackPressure * * * Notes: * IGN_FULL[0], BP[0], EN[0] will be reset when MIX0_CTL[RESET] is set to 1. * IGN_FULL[1], BP[1], EN[1] will be reset when MIX1_CTL[RESET] is set to 1. */ typedef union { uint64_t u64; struct cvmx_agl_gmx_tx_ovr_bp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t en : 2; /**< Per port Enable back pressure override */ uint64_t reserved_6_7 : 2; uint64_t bp : 2; /**< Port BackPressure status to use 0=Port is available 1=Port should be back pressured */ uint64_t reserved_2_3 : 2; uint64_t ign_full : 2; /**< Ignore the RX FIFO full when computing BP */ #else uint64_t ign_full : 2; uint64_t reserved_2_3 : 2; uint64_t bp : 2; uint64_t reserved_6_7 : 2; uint64_t en : 2; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_agl_gmx_tx_ovr_bp_s cn52xx; struct cvmx_agl_gmx_tx_ovr_bp_s cn52xxp1; struct cvmx_agl_gmx_tx_ovr_bp_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t en : 1; /**< Per port Enable back pressure override */ uint64_t reserved_5_7 : 3; uint64_t bp : 1; /**< Port BackPressure status to use 0=Port is available 1=Port should be back pressured */ uint64_t reserved_1_3 : 3; uint64_t ign_full : 1; /**< Ignore the RX FIFO full when computing BP */ #else uint64_t ign_full : 1; uint64_t reserved_1_3 : 3; uint64_t bp : 1; uint64_t reserved_5_7 : 3; uint64_t en : 1; uint64_t reserved_9_63 : 55; #endif } cn56xx; struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1; } cvmx_agl_gmx_tx_ovr_bp_t; /** * cvmx_agl_gmx_tx_pause_pkt_dmac * * AGL_GMX_TX_PAUSE_PKT_DMAC = MII TX Pause Packet DMAC field * * * Notes: * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_tx_pause_pkt_dmac_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t dmac : 48; /**< The DMAC field placed is outbnd pause pkts */ #else uint64_t dmac : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xx; struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1; struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx; struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1; } cvmx_agl_gmx_tx_pause_pkt_dmac_t; /** * cvmx_agl_gmx_tx_pause_pkt_type * * AGL_GMX_TX_PAUSE_PKT_TYPE = MII TX Pause Packet TYPE field * * * Notes: * Additionally reset when both MIX0/1_CTL[RESET] are set to 1. * */ typedef union { uint64_t u64; struct cvmx_agl_gmx_tx_pause_pkt_type_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t type : 16; /**< The TYPE field placed is outbnd pause pkts */ #else uint64_t type : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xx; struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1; struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx; struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1; } cvmx_agl_gmx_tx_pause_pkt_type_t; /** * cvmx_asx#_gmii_rx_clk_set * * ASX_GMII_RX_CLK_SET = GMII Clock delay setting * */ typedef union { uint64_t u64; struct cvmx_asxx_gmii_rx_clk_set_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t setting : 5; /**< Setting to place on the RXCLK (GMII receive clk) delay line. The intrinsic delay can range from 50ps to 80ps per tap. */ #else uint64_t setting : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_asxx_gmii_rx_clk_set_s cn30xx; struct cvmx_asxx_gmii_rx_clk_set_s cn31xx; struct cvmx_asxx_gmii_rx_clk_set_s cn50xx; } cvmx_asxx_gmii_rx_clk_set_t; /** * cvmx_asx#_gmii_rx_dat_set * * ASX_GMII_RX_DAT_SET = GMII Clock delay setting * */ typedef union { uint64_t u64; struct cvmx_asxx_gmii_rx_dat_set_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t setting : 5; /**< Setting to place on the RXD (GMII receive data) delay lines. The intrinsic delay can range from 50ps to 80ps per tap. */ #else uint64_t setting : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_asxx_gmii_rx_dat_set_s cn30xx; struct cvmx_asxx_gmii_rx_dat_set_s cn31xx; struct cvmx_asxx_gmii_rx_dat_set_s cn50xx; } cvmx_asxx_gmii_rx_dat_set_t; /** * cvmx_asx#_int_en * * ASX_INT_EN = Interrupt Enable * */ typedef union { uint64_t u64; struct cvmx_asxx_int_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t txpsh : 4; /**< TX FIFO overflow on RMGII port */ uint64_t txpop : 4; /**< TX FIFO underflow on RMGII port */ uint64_t ovrflw : 4; /**< RX FIFO overflow on RMGII port */ #else uint64_t ovrflw : 4; uint64_t txpop : 4; uint64_t txpsh : 4; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_asxx_int_en_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t txpsh : 3; /**< TX FIFO overflow on RMGII port */ uint64_t reserved_7_7 : 1; uint64_t txpop : 3; /**< TX FIFO underflow on RMGII port */ uint64_t reserved_3_3 : 1; uint64_t ovrflw : 3; /**< RX FIFO overflow on RMGII port */ #else uint64_t ovrflw : 3; uint64_t reserved_3_3 : 1; uint64_t txpop : 3; uint64_t reserved_7_7 : 1; uint64_t txpsh : 3; uint64_t reserved_11_63 : 53; #endif } cn30xx; struct cvmx_asxx_int_en_cn30xx cn31xx; struct cvmx_asxx_int_en_s cn38xx; struct cvmx_asxx_int_en_s cn38xxp2; struct cvmx_asxx_int_en_cn30xx cn50xx; struct cvmx_asxx_int_en_s cn58xx; struct cvmx_asxx_int_en_s cn58xxp1; } cvmx_asxx_int_en_t; /** * cvmx_asx#_int_reg * * ASX_INT_REG = Interrupt Register * */ typedef union { uint64_t u64; struct cvmx_asxx_int_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t txpsh : 4; /**< TX FIFO overflow on RMGII port */ uint64_t txpop : 4; /**< TX FIFO underflow on RMGII port */ uint64_t ovrflw : 4; /**< RX FIFO overflow on RMGII port */ #else uint64_t ovrflw : 4; uint64_t txpop : 4; uint64_t txpsh : 4; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_asxx_int_reg_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t txpsh : 3; /**< TX FIFO overflow on RMGII port */ uint64_t reserved_7_7 : 1; uint64_t txpop : 3; /**< TX FIFO underflow on RMGII port */ uint64_t reserved_3_3 : 1; uint64_t ovrflw : 3; /**< RX FIFO overflow on RMGII port */ #else uint64_t ovrflw : 3; uint64_t reserved_3_3 : 1; uint64_t txpop : 3; uint64_t reserved_7_7 : 1; uint64_t txpsh : 3; uint64_t reserved_11_63 : 53; #endif } cn30xx; struct cvmx_asxx_int_reg_cn30xx cn31xx; struct cvmx_asxx_int_reg_s cn38xx; struct cvmx_asxx_int_reg_s cn38xxp2; struct cvmx_asxx_int_reg_cn30xx cn50xx; struct cvmx_asxx_int_reg_s cn58xx; struct cvmx_asxx_int_reg_s cn58xxp1; } cvmx_asxx_int_reg_t; /** * cvmx_asx#_mii_rx_dat_set * * ASX_MII_RX_DAT_SET = GMII Clock delay setting * */ typedef union { uint64_t u64; struct cvmx_asxx_mii_rx_dat_set_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t setting : 5; /**< Setting to place on the RXD (MII receive data) delay lines. The intrinsic delay can range from 50ps to 80ps per tap. */ #else uint64_t setting : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_asxx_mii_rx_dat_set_s cn30xx; struct cvmx_asxx_mii_rx_dat_set_s cn50xx; } cvmx_asxx_mii_rx_dat_set_t; /** * cvmx_asx#_prt_loop * * ASX_PRT_LOOP = Internal Loopback mode - TX FIFO output goes into RX FIFO (and maybe pins) * */ typedef union { uint64_t u64; struct cvmx_asxx_prt_loop_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t ext_loop : 4; /**< External Loopback Enable 0 = No Loopback (TX FIFO is filled by RMGII) 1 = RX FIFO drives the TX FIFO - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex) - GMX_PRT_CFG[SPEED] must be 1 (GigE speed) - core clock > 250MHZ - rxc must not deviate from the +-50ppm - if txc>rxc, idle cycle may drop over time */ uint64_t int_loop : 4; /**< Internal Loopback Enable 0 = No Loopback (RX FIFO is filled by RMGII pins) 1 = TX FIFO drives the RX FIFO Note, in internal loop-back mode, the RGMII link status is not used (since there is no real PHY). Software cannot use the inband status. */ #else uint64_t int_loop : 4; uint64_t ext_loop : 4; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_asxx_prt_loop_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t ext_loop : 3; /**< External Loopback Enable 0 = No Loopback (TX FIFO is filled by RMGII) 1 = RX FIFO drives the TX FIFO - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex) - GMX_PRT_CFG[SPEED] must be 1 (GigE speed) - core clock > 250MHZ - rxc must not deviate from the +-50ppm - if txc>rxc, idle cycle may drop over time */ uint64_t reserved_3_3 : 1; uint64_t int_loop : 3; /**< Internal Loopback Enable 0 = No Loopback (RX FIFO is filled by RMGII pins) 1 = TX FIFO drives the RX FIFO - GMX_PRT_CFG[DUPLEX] must be 1 (FullDuplex) - GMX_PRT_CFG[SPEED] must be 1 (GigE speed) - GMX_TX_CLK[CLK_CNT] must be 1 Note, in internal loop-back mode, the RGMII link status is not used (since there is no real PHY). Software cannot use the inband status. */ #else uint64_t int_loop : 3; uint64_t reserved_3_3 : 1; uint64_t ext_loop : 3; uint64_t reserved_7_63 : 57; #endif } cn30xx; struct cvmx_asxx_prt_loop_cn30xx cn31xx; struct cvmx_asxx_prt_loop_s cn38xx; struct cvmx_asxx_prt_loop_s cn38xxp2; struct cvmx_asxx_prt_loop_cn30xx cn50xx; struct cvmx_asxx_prt_loop_s cn58xx; struct cvmx_asxx_prt_loop_s cn58xxp1; } cvmx_asxx_prt_loop_t; /** * cvmx_asx#_rld_bypass * * ASX_RLD_BYPASS * */ typedef union { uint64_t u64; struct cvmx_asxx_rld_bypass_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t bypass : 1; /**< When set, the rld_dll setting is bypassed with ASX_RLD_BYPASS_SETTING */ #else uint64_t bypass : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_asxx_rld_bypass_s cn38xx; struct cvmx_asxx_rld_bypass_s cn38xxp2; struct cvmx_asxx_rld_bypass_s cn58xx; struct cvmx_asxx_rld_bypass_s cn58xxp1; } cvmx_asxx_rld_bypass_t; /** * cvmx_asx#_rld_bypass_setting * * ASX_RLD_BYPASS_SETTING * */ typedef union { uint64_t u64; struct cvmx_asxx_rld_bypass_setting_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t setting : 5; /**< The rld_dll setting bypass value */ #else uint64_t setting : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_asxx_rld_bypass_setting_s cn38xx; struct cvmx_asxx_rld_bypass_setting_s cn38xxp2; struct cvmx_asxx_rld_bypass_setting_s cn58xx; struct cvmx_asxx_rld_bypass_setting_s cn58xxp1; } cvmx_asxx_rld_bypass_setting_t; /** * cvmx_asx#_rld_comp * * ASX_RLD_COMP * */ typedef union { uint64_t u64; struct cvmx_asxx_rld_comp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t pctl : 5; /**< PCTL Compensation Value These bits reflect the computed compensation values from the built-in compensation circuit. */ uint64_t nctl : 4; /**< These bits reflect the computed compensation values from the built-in compensation circuit. */ #else uint64_t nctl : 4; uint64_t pctl : 5; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_asxx_rld_comp_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t pctl : 4; /**< These bits reflect the computed compensation values from the built-in compensation circuit. */ uint64_t nctl : 4; /**< These bits reflect the computed compensation values from the built-in compensation circuit. */ #else uint64_t nctl : 4; uint64_t pctl : 4; uint64_t reserved_8_63 : 56; #endif } cn38xx; struct cvmx_asxx_rld_comp_cn38xx cn38xxp2; struct cvmx_asxx_rld_comp_s cn58xx; struct cvmx_asxx_rld_comp_s cn58xxp1; } cvmx_asxx_rld_comp_t; /** * cvmx_asx#_rld_data_drv * * ASX_RLD_DATA_DRV * */ typedef union { uint64_t u64; struct cvmx_asxx_rld_data_drv_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t pctl : 4; /**< These bits specify a driving strength (positive integer) for the RLD I/Os when the built-in compensation circuit is bypassed. */ uint64_t nctl : 4; /**< These bits specify a driving strength (positive integer) for the RLD I/Os when the built-in compensation circuit is bypassed. */ #else uint64_t nctl : 4; uint64_t pctl : 4; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_asxx_rld_data_drv_s cn38xx; struct cvmx_asxx_rld_data_drv_s cn38xxp2; struct cvmx_asxx_rld_data_drv_s cn58xx; struct cvmx_asxx_rld_data_drv_s cn58xxp1; } cvmx_asxx_rld_data_drv_t; /** * cvmx_asx#_rld_fcram_mode * * ASX_RLD_FCRAM_MODE * */ typedef union { uint64_t u64; struct cvmx_asxx_rld_fcram_mode_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t mode : 1; /**< Memory Mode - 0: RLDRAM - 1: FCRAM */ #else uint64_t mode : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_asxx_rld_fcram_mode_s cn38xx; struct cvmx_asxx_rld_fcram_mode_s cn38xxp2; } cvmx_asxx_rld_fcram_mode_t; /** * cvmx_asx#_rld_nctl_strong * * ASX_RLD_NCTL_STRONG * */ typedef union { uint64_t u64; struct cvmx_asxx_rld_nctl_strong_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t nctl : 5; /**< Duke's drive control */ #else uint64_t nctl : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_asxx_rld_nctl_strong_s cn38xx; struct cvmx_asxx_rld_nctl_strong_s cn38xxp2; struct cvmx_asxx_rld_nctl_strong_s cn58xx; struct cvmx_asxx_rld_nctl_strong_s cn58xxp1; } cvmx_asxx_rld_nctl_strong_t; /** * cvmx_asx#_rld_nctl_weak * * ASX_RLD_NCTL_WEAK * */ typedef union { uint64_t u64; struct cvmx_asxx_rld_nctl_weak_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t nctl : 5; /**< UNUSED (not needed for O9N) */ #else uint64_t nctl : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_asxx_rld_nctl_weak_s cn38xx; struct cvmx_asxx_rld_nctl_weak_s cn38xxp2; struct cvmx_asxx_rld_nctl_weak_s cn58xx; struct cvmx_asxx_rld_nctl_weak_s cn58xxp1; } cvmx_asxx_rld_nctl_weak_t; /** * cvmx_asx#_rld_pctl_strong * * ASX_RLD_PCTL_STRONG * */ typedef union { uint64_t u64; struct cvmx_asxx_rld_pctl_strong_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t pctl : 5; /**< Duke's drive control */ #else uint64_t pctl : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_asxx_rld_pctl_strong_s cn38xx; struct cvmx_asxx_rld_pctl_strong_s cn38xxp2; struct cvmx_asxx_rld_pctl_strong_s cn58xx; struct cvmx_asxx_rld_pctl_strong_s cn58xxp1; } cvmx_asxx_rld_pctl_strong_t; /** * cvmx_asx#_rld_pctl_weak * * ASX_RLD_PCTL_WEAK * */ typedef union { uint64_t u64; struct cvmx_asxx_rld_pctl_weak_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t pctl : 5; /**< UNUSED (not needed for O9N) */ #else uint64_t pctl : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_asxx_rld_pctl_weak_s cn38xx; struct cvmx_asxx_rld_pctl_weak_s cn38xxp2; struct cvmx_asxx_rld_pctl_weak_s cn58xx; struct cvmx_asxx_rld_pctl_weak_s cn58xxp1; } cvmx_asxx_rld_pctl_weak_t; /** * cvmx_asx#_rld_setting * * ASX_RLD_SETTING * */ typedef union { uint64_t u64; struct cvmx_asxx_rld_setting_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t dfaset : 5; /**< RLD ClkGen DLL Setting(debug) ** NEW O9N ** */ uint64_t dfalag : 1; /**< RLD ClkGen DLL Lag Error(debug) ** NEW O9N ** */ uint64_t dfalead : 1; /**< RLD ClkGen DLL Lead Error(debug) ** NEW O9N ** */ uint64_t dfalock : 1; /**< RLD ClkGen DLL Lock acquisition(debug) ** NEW O9N ** */ uint64_t setting : 5; /**< RLDCK90 DLL Setting(debug) */ #else uint64_t setting : 5; uint64_t dfalock : 1; uint64_t dfalead : 1; uint64_t dfalag : 1; uint64_t dfaset : 5; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_asxx_rld_setting_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t setting : 5; /**< This is the read-only true rld dll_setting. */ #else uint64_t setting : 5; uint64_t reserved_5_63 : 59; #endif } cn38xx; struct cvmx_asxx_rld_setting_cn38xx cn38xxp2; struct cvmx_asxx_rld_setting_s cn58xx; struct cvmx_asxx_rld_setting_s cn58xxp1; } cvmx_asxx_rld_setting_t; /** * cvmx_asx#_rx_clk_set# * * ASX_RX_CLK_SET = RGMII Clock delay setting * * * Notes: * Setting to place on the open-loop RXC (RGMII receive clk) * delay line, which can delay the recieved clock. This * can be used if the board and/or transmitting device * has not otherwise delayed the clock. * * A value of SETTING=0 disables the delay line. The delay * line should be disabled unless the transmitter or board * does not delay the clock. * * Note that this delay line provides only a coarse control * over the delay. Generally, it can only reliably provide * a delay in the range 1.25-2.5ns, which may not be adequate * for some system applications. * * The open loop delay line selects * from among a series of tap positions. Each incremental * tap position adds a delay of 50ps to 135ps per tap, depending * on the chip, its temperature, and the voltage. * To achieve from 1.25-2.5ns of delay on the recieved * clock, a fixed value of SETTING=24 may work. * For more precision, we recommend the following settings * based on the chip voltage: * * VDD SETTING * ----------------------------- * 1.0 18 * 1.05 19 * 1.1 21 * 1.15 22 * 1.2 23 * 1.25 24 * 1.3 25 */ typedef union { uint64_t u64; struct cvmx_asxx_rx_clk_setx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t setting : 5; /**< Setting to place on the open-loop RXC delay line */ #else uint64_t setting : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_asxx_rx_clk_setx_s cn30xx; struct cvmx_asxx_rx_clk_setx_s cn31xx; struct cvmx_asxx_rx_clk_setx_s cn38xx; struct cvmx_asxx_rx_clk_setx_s cn38xxp2; struct cvmx_asxx_rx_clk_setx_s cn50xx; struct cvmx_asxx_rx_clk_setx_s cn58xx; struct cvmx_asxx_rx_clk_setx_s cn58xxp1; } cvmx_asxx_rx_clk_setx_t; /** * cvmx_asx#_rx_prt_en * * ASX_RX_PRT_EN = RGMII Port Enable * */ typedef union { uint64_t u64; struct cvmx_asxx_rx_prt_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t prt_en : 4; /**< Port enable. Must be set for Octane to receive RMGII traffic. When this bit clear on a given port, then the all RGMII cycles will appear as inter-frame cycles. */ #else uint64_t prt_en : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_asxx_rx_prt_en_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t prt_en : 3; /**< Port enable. Must be set for Octane to receive RMGII traffic. When this bit clear on a given port, then the all RGMII cycles will appear as inter-frame cycles. */ #else uint64_t prt_en : 3; uint64_t reserved_3_63 : 61; #endif } cn30xx; struct cvmx_asxx_rx_prt_en_cn30xx cn31xx; struct cvmx_asxx_rx_prt_en_s cn38xx; struct cvmx_asxx_rx_prt_en_s cn38xxp2; struct cvmx_asxx_rx_prt_en_cn30xx cn50xx; struct cvmx_asxx_rx_prt_en_s cn58xx; struct cvmx_asxx_rx_prt_en_s cn58xxp1; } cvmx_asxx_rx_prt_en_t; /** * cvmx_asx#_rx_wol * * ASX_RX_WOL = RGMII RX Wake on LAN status register * */ typedef union { uint64_t u64; struct cvmx_asxx_rx_wol_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t status : 1; /**< Copy of PMCSR[15] - PME_status */ uint64_t enable : 1; /**< Copy of PMCSR[8] - PME_enable */ #else uint64_t enable : 1; uint64_t status : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_asxx_rx_wol_s cn38xx; struct cvmx_asxx_rx_wol_s cn38xxp2; } cvmx_asxx_rx_wol_t; /** * cvmx_asx#_rx_wol_msk * * ASX_RX_WOL_MSK = RGMII RX Wake on LAN byte mask * */ typedef union { uint64_t u64; struct cvmx_asxx_rx_wol_msk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t msk : 64; /**< Bytes to include in the CRC signature */ #else uint64_t msk : 64; #endif } s; struct cvmx_asxx_rx_wol_msk_s cn38xx; struct cvmx_asxx_rx_wol_msk_s cn38xxp2; } cvmx_asxx_rx_wol_msk_t; /** * cvmx_asx#_rx_wol_powok * * ASX_RX_WOL_POWOK = RGMII RX Wake on LAN Power OK * */ typedef union { uint64_t u64; struct cvmx_asxx_rx_wol_powok_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t powerok : 1; /**< Power OK */ #else uint64_t powerok : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_asxx_rx_wol_powok_s cn38xx; struct cvmx_asxx_rx_wol_powok_s cn38xxp2; } cvmx_asxx_rx_wol_powok_t; /** * cvmx_asx#_rx_wol_sig * * ASX_RX_WOL_SIG = RGMII RX Wake on LAN CRC signature * */ typedef union { uint64_t u64; struct cvmx_asxx_rx_wol_sig_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t sig : 32; /**< CRC signature */ #else uint64_t sig : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_asxx_rx_wol_sig_s cn38xx; struct cvmx_asxx_rx_wol_sig_s cn38xxp2; } cvmx_asxx_rx_wol_sig_t; /** * cvmx_asx#_tx_clk_set# * * ASX_TX_CLK_SET = RGMII Clock delay setting * * * Notes: * Setting to place on the open-loop TXC (RGMII transmit clk) * delay line, which can delay the transmited clock. This * can be used if the board and/or transmitting device * has not otherwise delayed the clock. * * A value of SETTING=0 disables the delay line. The delay * line should be disabled unless the transmitter or board * does not delay the clock. * * Note that this delay line provides only a coarse control * over the delay. Generally, it can only reliably provide * a delay in the range 1.25-2.5ns, which may not be adequate * for some system applications. * * The open loop delay line selects * from among a series of tap positions. Each incremental * tap position adds a delay of 50ps to 135ps per tap, depending * on the chip, its temperature, and the voltage. * To achieve from 1.25-2.5ns of delay on the recieved * clock, a fixed value of SETTING=24 may work. * For more precision, we recommend the following settings * based on the chip voltage: * * VDD SETTING * ----------------------------- * 1.0 18 * 1.05 19 * 1.1 21 * 1.15 22 * 1.2 23 * 1.25 24 * 1.3 25 */ typedef union { uint64_t u64; struct cvmx_asxx_tx_clk_setx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t setting : 5; /**< Setting to place on the open-loop TXC delay line */ #else uint64_t setting : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_asxx_tx_clk_setx_s cn30xx; struct cvmx_asxx_tx_clk_setx_s cn31xx; struct cvmx_asxx_tx_clk_setx_s cn38xx; struct cvmx_asxx_tx_clk_setx_s cn38xxp2; struct cvmx_asxx_tx_clk_setx_s cn50xx; struct cvmx_asxx_tx_clk_setx_s cn58xx; struct cvmx_asxx_tx_clk_setx_s cn58xxp1; } cvmx_asxx_tx_clk_setx_t; /** * cvmx_asx#_tx_comp_byp * * ASX_TX_COMP_BYP = RGMII Clock delay setting * */ typedef union { uint64_t u64; struct cvmx_asxx_tx_comp_byp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_asxx_tx_comp_byp_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t bypass : 1; /**< Compensation bypass */ uint64_t pctl : 4; /**< PCTL Compensation Value (see Duke) */ uint64_t nctl : 4; /**< NCTL Compensation Value (see Duke) */ #else uint64_t nctl : 4; uint64_t pctl : 4; uint64_t bypass : 1; uint64_t reserved_9_63 : 55; #endif } cn30xx; struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx; struct cvmx_asxx_tx_comp_byp_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t pctl : 4; /**< PCTL Compensation Value (see Duke) */ uint64_t nctl : 4; /**< NCTL Compensation Value (see Duke) */ #else uint64_t nctl : 4; uint64_t pctl : 4; uint64_t reserved_8_63 : 56; #endif } cn38xx; struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2; struct cvmx_asxx_tx_comp_byp_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t bypass : 1; /**< Compensation bypass */ uint64_t reserved_13_15 : 3; uint64_t pctl : 5; /**< PCTL Compensation Value (see Duke) */ uint64_t reserved_5_7 : 3; uint64_t nctl : 5; /**< NCTL Compensation Value (see Duke) */ #else uint64_t nctl : 5; uint64_t reserved_5_7 : 3; uint64_t pctl : 5; uint64_t reserved_13_15 : 3; uint64_t bypass : 1; uint64_t reserved_17_63 : 47; #endif } cn50xx; struct cvmx_asxx_tx_comp_byp_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t pctl : 5; /**< PCTL Compensation Value (see Duke) */ uint64_t reserved_5_7 : 3; uint64_t nctl : 5; /**< NCTL Compensation Value (see Duke) */ #else uint64_t nctl : 5; uint64_t reserved_5_7 : 3; uint64_t pctl : 5; uint64_t reserved_13_63 : 51; #endif } cn58xx; struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1; } cvmx_asxx_tx_comp_byp_t; /** * cvmx_asx#_tx_hi_water# * * ASX_TX_HI_WATER = RGMII TX FIFO Hi WaterMark * */ typedef union { uint64_t u64; struct cvmx_asxx_tx_hi_waterx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t mark : 4; /**< TX FIFO HiWatermark to stall GMX Value of 0 maps to 16 Reset value changed from 10 in pass1 Pass1 settings (assuming 125 tclk) - 325-375: 12 - 375-437: 11 - 437-550: 10 - 550-687: 9 */ #else uint64_t mark : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_asxx_tx_hi_waterx_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t mark : 3; /**< TX FIFO HiWatermark to stall GMX Value 0 maps to 8. */ #else uint64_t mark : 3; uint64_t reserved_3_63 : 61; #endif } cn30xx; struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx; struct cvmx_asxx_tx_hi_waterx_s cn38xx; struct cvmx_asxx_tx_hi_waterx_s cn38xxp2; struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx; struct cvmx_asxx_tx_hi_waterx_s cn58xx; struct cvmx_asxx_tx_hi_waterx_s cn58xxp1; } cvmx_asxx_tx_hi_waterx_t; /** * cvmx_asx#_tx_prt_en * * ASX_TX_PRT_EN = RGMII Port Enable * */ typedef union { uint64_t u64; struct cvmx_asxx_tx_prt_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t prt_en : 4; /**< Port enable. Must be set for Octane to send RMGII traffic. When this bit clear on a given port, then all RGMII cycles will appear as inter-frame cycles. */ #else uint64_t prt_en : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_asxx_tx_prt_en_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t prt_en : 3; /**< Port enable. Must be set for Octane to send RMGII traffic. When this bit clear on a given port, then all RGMII cycles will appear as inter-frame cycles. */ #else uint64_t prt_en : 3; uint64_t reserved_3_63 : 61; #endif } cn30xx; struct cvmx_asxx_tx_prt_en_cn30xx cn31xx; struct cvmx_asxx_tx_prt_en_s cn38xx; struct cvmx_asxx_tx_prt_en_s cn38xxp2; struct cvmx_asxx_tx_prt_en_cn30xx cn50xx; struct cvmx_asxx_tx_prt_en_s cn58xx; struct cvmx_asxx_tx_prt_en_s cn58xxp1; } cvmx_asxx_tx_prt_en_t; /** * cvmx_asx0_dbg_data_drv * * ASX_DBG_DATA_DRV * */ typedef union { uint64_t u64; struct cvmx_asx0_dbg_data_drv_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t pctl : 5; /**< These bits control the driving strength of the dbg interface. */ uint64_t nctl : 4; /**< These bits control the driving strength of the dbg interface. */ #else uint64_t nctl : 4; uint64_t pctl : 5; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_asx0_dbg_data_drv_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t pctl : 4; /**< These bits control the driving strength of the dbg interface. */ uint64_t nctl : 4; /**< These bits control the driving strength of the dbg interface. */ #else uint64_t nctl : 4; uint64_t pctl : 4; uint64_t reserved_8_63 : 56; #endif } cn38xx; struct cvmx_asx0_dbg_data_drv_cn38xx cn38xxp2; struct cvmx_asx0_dbg_data_drv_s cn58xx; struct cvmx_asx0_dbg_data_drv_s cn58xxp1; } cvmx_asx0_dbg_data_drv_t; /** * cvmx_asx0_dbg_data_enable * * ASX_DBG_DATA_ENABLE * */ typedef union { uint64_t u64; struct cvmx_asx0_dbg_data_enable_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t en : 1; /**< A 1->0 transistion, turns the dbg interface OFF. */ #else uint64_t en : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_asx0_dbg_data_enable_s cn38xx; struct cvmx_asx0_dbg_data_enable_s cn38xxp2; struct cvmx_asx0_dbg_data_enable_s cn58xx; struct cvmx_asx0_dbg_data_enable_s cn58xxp1; } cvmx_asx0_dbg_data_enable_t; /** * cvmx_ciu_bist */ typedef union { uint64_t u64; struct cvmx_ciu_bist_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t bist : 4; /**< BIST Results. HW sets a bit in BIST for for memory that fails BIST. */ #else uint64_t bist : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_ciu_bist_s cn30xx; struct cvmx_ciu_bist_s cn31xx; struct cvmx_ciu_bist_s cn38xx; struct cvmx_ciu_bist_s cn38xxp2; struct cvmx_ciu_bist_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t bist : 2; /**< BIST Results. HW sets a bit in BIST for for memory that fails BIST. */ #else uint64_t bist : 2; uint64_t reserved_2_63 : 62; #endif } cn50xx; struct cvmx_ciu_bist_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t bist : 3; /**< BIST Results. HW sets a bit in BIST for for memory that fails BIST. */ #else uint64_t bist : 3; uint64_t reserved_3_63 : 61; #endif } cn52xx; struct cvmx_ciu_bist_cn52xx cn52xxp1; struct cvmx_ciu_bist_s cn56xx; struct cvmx_ciu_bist_s cn56xxp1; struct cvmx_ciu_bist_s cn58xx; struct cvmx_ciu_bist_s cn58xxp1; } cvmx_ciu_bist_t; /** * cvmx_ciu_dint */ typedef union { uint64_t u64; struct cvmx_ciu_dint_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t dint : 16; /**< Send DINT pulse to PP vector */ #else uint64_t dint : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_ciu_dint_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t dint : 1; /**< Send DINT pulse to PP vector */ #else uint64_t dint : 1; uint64_t reserved_1_63 : 63; #endif } cn30xx; struct cvmx_ciu_dint_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t dint : 2; /**< Send DINT pulse to PP vector */ #else uint64_t dint : 2; uint64_t reserved_2_63 : 62; #endif } cn31xx; struct cvmx_ciu_dint_s cn38xx; struct cvmx_ciu_dint_s cn38xxp2; struct cvmx_ciu_dint_cn31xx cn50xx; struct cvmx_ciu_dint_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t dint : 4; /**< Send DINT pulse to PP vector */ #else uint64_t dint : 4; uint64_t reserved_4_63 : 60; #endif } cn52xx; struct cvmx_ciu_dint_cn52xx cn52xxp1; struct cvmx_ciu_dint_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t dint : 12; /**< Send DINT pulse to PP vector */ #else uint64_t dint : 12; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_dint_cn56xx cn56xxp1; struct cvmx_ciu_dint_s cn58xx; struct cvmx_ciu_dint_s cn58xxp1; } cvmx_ciu_dint_t; /** * cvmx_ciu_fuse */ typedef union { uint64_t u64; struct cvmx_ciu_fuse_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t fuse : 16; /**< Physical PP is present */ #else uint64_t fuse : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_ciu_fuse_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t fuse : 1; /**< Physical PP is present */ #else uint64_t fuse : 1; uint64_t reserved_1_63 : 63; #endif } cn30xx; struct cvmx_ciu_fuse_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t fuse : 2; /**< Physical PP is present */ #else uint64_t fuse : 2; uint64_t reserved_2_63 : 62; #endif } cn31xx; struct cvmx_ciu_fuse_s cn38xx; struct cvmx_ciu_fuse_s cn38xxp2; struct cvmx_ciu_fuse_cn31xx cn50xx; struct cvmx_ciu_fuse_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t fuse : 4; /**< Physical PP is present */ #else uint64_t fuse : 4; uint64_t reserved_4_63 : 60; #endif } cn52xx; struct cvmx_ciu_fuse_cn52xx cn52xxp1; struct cvmx_ciu_fuse_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t fuse : 12; /**< Physical PP is present */ #else uint64_t fuse : 12; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_fuse_cn56xx cn56xxp1; struct cvmx_ciu_fuse_s cn58xx; struct cvmx_ciu_fuse_s cn58xxp1; } cvmx_ciu_fuse_t; /** * cvmx_ciu_gstop */ typedef union { uint64_t u64; struct cvmx_ciu_gstop_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t gstop : 1; /**< GSTOP bit */ #else uint64_t gstop : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_ciu_gstop_s cn30xx; struct cvmx_ciu_gstop_s cn31xx; struct cvmx_ciu_gstop_s cn38xx; struct cvmx_ciu_gstop_s cn38xxp2; struct cvmx_ciu_gstop_s cn50xx; struct cvmx_ciu_gstop_s cn52xx; struct cvmx_ciu_gstop_s cn52xxp1; struct cvmx_ciu_gstop_s cn56xx; struct cvmx_ciu_gstop_s cn56xxp1; struct cvmx_ciu_gstop_s cn58xx; struct cvmx_ciu_gstop_s cn58xxp1; } cvmx_ciu_gstop_t; /** * cvmx_ciu_int#_en0 * * Notes: * CIU_INT0_EN0: PP0 /IP2 * CIU_INT1_EN0: PP0 /IP3 * ... * CIU_INT6_EN0: PP3/IP2 * CIU_INT7_EN0: PP3/IP3 * (hole) * CIU_INT32_EN0: PCI /IP */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t mpi : 1; /**< MPI/SPI interrupt */ uint64_t pcm : 1; /**< PCM/TDM interrupt */ uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t pcm : 1; uint64_t mpi : 1; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } s; struct cvmx_ciu_intx_en0_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_59_63 : 5; uint64_t mpi : 1; /**< MPI/SPI interrupt */ uint64_t pcm : 1; /**< PCM/TDM interrupt */ uint64_t usb : 1; /**< USB interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t reserved_47_47 : 1; uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t reserved_47_47 : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t pcm : 1; uint64_t mpi : 1; uint64_t reserved_59_63 : 5; #endif } cn30xx; struct cvmx_ciu_intx_en0_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_59_63 : 5; uint64_t mpi : 1; /**< MPI/SPI interrupt */ uint64_t pcm : 1; /**< PCM/TDM interrupt */ uint64_t usb : 1; /**< USB interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t pcm : 1; uint64_t mpi : 1; uint64_t reserved_59_63 : 5; #endif } cn31xx; struct cvmx_ciu_intx_en0_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_56_63 : 8; uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t reserved_56_63 : 8; #endif } cn38xx; struct cvmx_ciu_intx_en0_cn38xx cn38xxp2; struct cvmx_ciu_intx_en0_cn30xx cn50xx; struct cvmx_ciu_intx_en0_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn52xx; struct cvmx_ciu_intx_en0_cn52xx cn52xxp1; struct cvmx_ciu_intx_en0_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn56xx; struct cvmx_ciu_intx_en0_cn56xx cn56xxp1; struct cvmx_ciu_intx_en0_cn38xx cn58xx; struct cvmx_ciu_intx_en0_cn38xx cn58xxp1; } cvmx_ciu_intx_en0_t; /** * cvmx_ciu_int#_en0_w1c * * Notes: * Write-1-to-clear version of the CIU_INTx_EN0 register * (Pass2 ONLY) */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en0_w1c_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } s; struct cvmx_ciu_intx_en0_w1c_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn52xx; struct cvmx_ciu_intx_en0_w1c_s cn56xx; struct cvmx_ciu_intx_en0_w1c_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_56_63 : 8; uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t reserved_56_63 : 8; #endif } cn58xx; } cvmx_ciu_intx_en0_w1c_t; /** * cvmx_ciu_int#_en0_w1s * * Notes: * Write-1-to-set version of the CIU_INTx_EN0 register * (Pass2 ONLY) */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en0_w1s_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } s; struct cvmx_ciu_intx_en0_w1s_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn52xx; struct cvmx_ciu_intx_en0_w1s_s cn56xx; struct cvmx_ciu_intx_en0_w1s_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_56_63 : 8; uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t reserved_56_63 : 8; #endif } cn58xx; } cvmx_ciu_intx_en0_w1s_t; /** * cvmx_ciu_int#_en1 * * Notes: * @verbatim * PPx/IP2 will be raised when... * * n = x*2 * PPx/IP2 = |([CIU_INT_SUM1, CIU_INTn_SUM0] & [CIU_INTn_EN1, CIU_INTn_EN0]) * * PPx/IP3 will be raised when... * * n = x*2 + 1 * PPx/IP3 = |([CIU_INT_SUM1, CIU_INTn_SUM0] & [CIU_INTn_EN1, CIU_INTn_EN0]) * * PCI/INT will be raised when... * * PCI/INT = |([CIU_INT_SUM1, CIU_INT32_SUM0] & [CIU_INT32_EN1, CIU_INT32_EN0]) * @endverbatim */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_ciu_intx_en1_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t wdog : 1; /**< Watchdog summary interrupt enable vector */ #else uint64_t wdog : 1; uint64_t reserved_1_63 : 63; #endif } cn30xx; struct cvmx_ciu_intx_en1_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t wdog : 2; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 2; uint64_t reserved_2_63 : 62; #endif } cn31xx; struct cvmx_ciu_intx_en1_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t reserved_16_63 : 48; #endif } cn38xx; struct cvmx_ciu_intx_en1_cn38xx cn38xxp2; struct cvmx_ciu_intx_en1_cn31xx cn50xx; struct cvmx_ciu_intx_en1_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t reserved_4_15 : 12; uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */ #else uint64_t wdog : 4; uint64_t reserved_4_15 : 12; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } cn52xx; struct cvmx_ciu_intx_en1_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t reserved_4_15 : 12; uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */ #else uint64_t wdog : 4; uint64_t reserved_4_15 : 12; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t reserved_19_63 : 45; #endif } cn52xxp1; struct cvmx_ciu_intx_en1_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 12; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_intx_en1_cn56xx cn56xxp1; struct cvmx_ciu_intx_en1_cn38xx cn58xx; struct cvmx_ciu_intx_en1_cn38xx cn58xxp1; } cvmx_ciu_intx_en1_t; /** * cvmx_ciu_int#_en1_w1c * * Notes: * Write-1-to-clear version of the CIU_INTx_EN1 register * (Pass2 ONLY) */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en1_w1c_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_ciu_intx_en1_w1c_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t reserved_4_15 : 12; uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */ #else uint64_t wdog : 4; uint64_t reserved_4_15 : 12; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } cn52xx; struct cvmx_ciu_intx_en1_w1c_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 12; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_intx_en1_w1c_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t reserved_16_63 : 48; #endif } cn58xx; } cvmx_ciu_intx_en1_w1c_t; /** * cvmx_ciu_int#_en1_w1s * * Notes: * Write-1-to-set version of the CIU_INTx_EN1 register * (Pass2 ONLY) */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en1_w1s_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_ciu_intx_en1_w1s_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t reserved_4_15 : 12; uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */ #else uint64_t wdog : 4; uint64_t reserved_4_15 : 12; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } cn52xx; struct cvmx_ciu_intx_en1_w1s_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 12; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_intx_en1_w1s_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t reserved_16_63 : 48; #endif } cn58xx; } cvmx_ciu_intx_en1_w1s_t; /** * cvmx_ciu_int#_en4_0 * * Notes: * CIU_INT0_EN4_0: PP0 /IP4 * CIU_INT1_EN4_0: PP1 /IP4 * ... * CIU_INT11_EN4_0: PP11 /IP4 */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en4_0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t mpi : 1; /**< MPI/SPI interrupt */ uint64_t pcm : 1; /**< PCM/TDM interrupt */ uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t pcm : 1; uint64_t mpi : 1; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } s; struct cvmx_ciu_intx_en4_0_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_59_63 : 5; uint64_t mpi : 1; /**< MPI/SPI interrupt */ uint64_t pcm : 1; /**< PCM/TDM interrupt */ uint64_t usb : 1; /**< USB interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t reserved_47_47 : 1; uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t reserved_47_47 : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t pcm : 1; uint64_t mpi : 1; uint64_t reserved_59_63 : 5; #endif } cn50xx; struct cvmx_ciu_intx_en4_0_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn52xx; struct cvmx_ciu_intx_en4_0_cn52xx cn52xxp1; struct cvmx_ciu_intx_en4_0_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn56xx; struct cvmx_ciu_intx_en4_0_cn56xx cn56xxp1; struct cvmx_ciu_intx_en4_0_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_56_63 : 8; uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t reserved_56_63 : 8; #endif } cn58xx; struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1; } cvmx_ciu_intx_en4_0_t; /** * cvmx_ciu_int#_en4_0_w1c * * Notes: * Write-1-to-clear version of the CIU_INTx_EN4_0 register * (Pass2 ONLY) */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en4_0_w1c_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } s; struct cvmx_ciu_intx_en4_0_w1c_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn52xx; struct cvmx_ciu_intx_en4_0_w1c_s cn56xx; struct cvmx_ciu_intx_en4_0_w1c_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_56_63 : 8; uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t reserved_56_63 : 8; #endif } cn58xx; } cvmx_ciu_intx_en4_0_w1c_t; /** * cvmx_ciu_int#_en4_0_w1s * * Notes: * Write-1-to-set version of the CIU_INTx_EN4_0 register * (Pass2 ONLY) */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en4_0_w1s_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } s; struct cvmx_ciu_intx_en4_0_w1s_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn52xx; struct cvmx_ciu_intx_en4_0_w1s_s cn56xx; struct cvmx_ciu_intx_en4_0_w1s_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_56_63 : 8; uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t reserved_44_44 : 1; uint64_t pci_msi : 4; /**< PCI MSI */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox/PCI interrupts */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t reserved_44_44 : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t reserved_56_63 : 8; #endif } cn58xx; } cvmx_ciu_intx_en4_0_w1s_t; /** * cvmx_ciu_int#_en4_1 * * Notes: * PPx/IP4 will be raised when... * PPx/IP4 = |([CIU_INT_SUM1, CIU_INTx_SUM4] & [CIU_INTx_EN4_1, CIU_INTx_EN4_0]) */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en4_1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_ciu_intx_en4_1_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t wdog : 2; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 2; uint64_t reserved_2_63 : 62; #endif } cn50xx; struct cvmx_ciu_intx_en4_1_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t reserved_4_15 : 12; uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */ #else uint64_t wdog : 4; uint64_t reserved_4_15 : 12; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } cn52xx; struct cvmx_ciu_intx_en4_1_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t reserved_4_15 : 12; uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */ #else uint64_t wdog : 4; uint64_t reserved_4_15 : 12; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t reserved_19_63 : 45; #endif } cn52xxp1; struct cvmx_ciu_intx_en4_1_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 12; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_intx_en4_1_cn56xx cn56xxp1; struct cvmx_ciu_intx_en4_1_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t reserved_16_63 : 48; #endif } cn58xx; struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1; } cvmx_ciu_intx_en4_1_t; /** * cvmx_ciu_int#_en4_1_w1c * * Notes: * Write-1-to-clear version of the CIU_INTx_EN4_1 register * (Pass2 ONLY) */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en4_1_w1c_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_ciu_intx_en4_1_w1c_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t reserved_4_15 : 12; uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */ #else uint64_t wdog : 4; uint64_t reserved_4_15 : 12; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } cn52xx; struct cvmx_ciu_intx_en4_1_w1c_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 12; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_intx_en4_1_w1c_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t reserved_16_63 : 48; #endif } cn58xx; } cvmx_ciu_intx_en4_1_w1c_t; /** * cvmx_ciu_int#_en4_1_w1s * * Notes: * Write-1-to-set version of the CIU_INTx_EN4_1 register * (Pass2 ONLY) */ typedef union { uint64_t u64; struct cvmx_ciu_intx_en4_1_w1s_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_ciu_intx_en4_1_w1s_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t reserved_4_15 : 12; uint64_t wdog : 4; /**< Watchdog summary interrupt enable vector */ #else uint64_t wdog : 4; uint64_t reserved_4_15 : 12; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } cn52xx; struct cvmx_ciu_intx_en4_1_w1s_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t wdog : 12; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 12; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_intx_en4_1_w1s_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t wdog : 16; /**< Watchdog summary interrupt enable vectory */ #else uint64_t wdog : 16; uint64_t reserved_16_63 : 48; #endif } cn58xx; } cvmx_ciu_intx_en4_1_w1s_t; /** * cvmx_ciu_int#_sum0 */ typedef union { uint64_t u64; struct cvmx_ciu_intx_sum0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t mpi : 1; /**< MPI/SPI interrupt */ uint64_t pcm : 1; /**< PCM/TDM interrupt */ uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt KEY_ZERO will be set when the external ZERO_KEYS pin is sampled high. KEY_ZERO is cleared by SW */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t wdog_sum : 1; /**< Watchdog summary PPs use CIU_INTx_SUM0 where x=0-31. PCI uses the CIU_INTx_SUM0 where x=32. Even INTx registers report WDOG to IP2 Odd INTx registers report WDOG to IP3 */ uint64_t pci_msi : 4; /**< PCI MSI [43] is the or of <63:48> [42] is the or of <47:32> [41] is the or of <31:16> [40] is the or of <15:0> */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31 [33] is the or of <31:16> [32] is the or of <15:0> Two PCI internal interrupts for entry 32 CIU_PCI_INTA */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts 1 bit/group. A copy of the R/W1C bit in the POW. */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t wdog_sum : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t pcm : 1; uint64_t mpi : 1; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } s; struct cvmx_ciu_intx_sum0_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_59_63 : 5; uint64_t mpi : 1; /**< MPI/SPI interrupt */ uint64_t pcm : 1; /**< PCM/TDM interrupt */ uint64_t usb : 1; /**< USB interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t reserved_47_47 : 1; uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t wdog_sum : 1; /**< Watchdog summary PPs use CIU_INTx_SUM0 where x=0-1. PCI uses the CIU_INTx_SUM0 where x=32. Even INTx registers report WDOG to IP2 Odd INTx registers report WDOG to IP3 */ uint64_t pci_msi : 4; /**< PCI MSI [43] is the or of <63:48> [42] is the or of <47:32> [41] is the or of <31:16> [40] is the or of <15:0> */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31 [33] is the or of <31:16> [32] is the or of <15:0> Two PCI internal interrupts for entry 32 CIU_PCI_INTA */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts 1 bit/group. A copy of the R/W1C bit in the POW. */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t wdog_sum : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t reserved_47_47 : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t pcm : 1; uint64_t mpi : 1; uint64_t reserved_59_63 : 5; #endif } cn30xx; struct cvmx_ciu_intx_sum0_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_59_63 : 5; uint64_t mpi : 1; /**< MPI/SPI interrupt */ uint64_t pcm : 1; /**< PCM/TDM interrupt */ uint64_t usb : 1; /**< USB interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t wdog_sum : 1; /**< Watchdog summary PPs use CIU_INTx_SUM0 where x=0-3. PCI uses the CIU_INTx_SUM0 where x=32. Even INTx registers report WDOG to IP2 Odd INTx registers report WDOG to IP3 */ uint64_t pci_msi : 4; /**< PCI MSI [43] is the or of <63:48> [42] is the or of <47:32> [41] is the or of <31:16> [40] is the or of <15:0> */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31 [33] is the or of <31:16> [32] is the or of <15:0> Two PCI internal interrupts for entry 32 CIU_PCI_INTA */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts 1 bit/group. A copy of the R/W1C bit in the POW. */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t wdog_sum : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t pcm : 1; uint64_t mpi : 1; uint64_t reserved_59_63 : 5; #endif } cn31xx; struct cvmx_ciu_intx_sum0_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_56_63 : 8; uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt KEY_ZERO will be set when the external ZERO_KEYS pin is sampled high. KEY_ZERO is cleared by SW */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t wdog_sum : 1; /**< Watchdog summary PPs use CIU_INTx_SUM0 where x=0-31. PCI uses the CIU_INTx_SUM0 where x=32. Even INTx registers report WDOG to IP2 Odd INTx registers report WDOG to IP3 */ uint64_t pci_msi : 4; /**< PCI MSI [43] is the or of <63:48> [42] is the or of <47:32> [41] is the or of <31:16> [40] is the or of <15:0> */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31 [33] is the or of <31:16> [32] is the or of <15:0> Two PCI internal interrupts for entry 32 CIU_PCI_INTA */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts 1 bit/group. A copy of the R/W1C bit in the POW. */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t wdog_sum : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t reserved_56_63 : 8; #endif } cn38xx; struct cvmx_ciu_intx_sum0_cn38xx cn38xxp2; struct cvmx_ciu_intx_sum0_cn30xx cn50xx; struct cvmx_ciu_intx_sum0_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t wdog_sum : 1; /**< SUM1&EN1 summary bit This read-only bit reads as a one whenever any CIU_INT_SUM1 bit is set and corresponding enable bit in CIU_INTx_EN is set, where x is the same as x in this CIU_INTx_SUM0. PPs use CIU_INTx_SUM0 where x=0-7. PCI uses the CIU_INTx_SUM0 where x=32. Even INTx registers report WDOG to IP2 Odd INTx registers report WDOG to IP3 Note that WDOG_SUM only summarizes the SUM/EN1 result and does not have a corresponding enable bit, so does not directly contribute to interrupts. */ uint64_t pci_msi : 4; /**< PCI MSI Refer to "Receiving Message-Signalled Interrupts" in the PCIe chapter of the spec */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D Refer to "Receiving Emulated INTA/INTB/ INTC/INTD" in the PCIe chapter of the spec */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-7 [33] is the or of <31:16> [32] is the or of <15:0> Two PCI internal interrupts for entry 32 CIU_PCI_INTA */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts 1 bit/group. A copy of the R/W1C bit in the POW. */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t wdog_sum : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn52xx; struct cvmx_ciu_intx_sum0_cn52xx cn52xxp1; struct cvmx_ciu_intx_sum0_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt KEY_ZERO will be set when the external ZERO_KEYS pin is sampled high. KEY_ZERO is cleared by SW */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t wdog_sum : 1; /**< Watchdog summary PPs use CIU_INTx_SUM0 where x=0-23. PCI uses the CIU_INTx_SUM0 where x=32. Even INTx registers report WDOG to IP2 Odd INTx registers report WDOG to IP3 */ uint64_t pci_msi : 4; /**< PCI MSI Refer to "Receiving Message-Signalled Interrupts" in the PCIe chapter of the spec */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D Refer to "Receiving Emulated INTA/INTB/ INTC/INTD" in the PCIe chapter of the spec */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-23 [33] is the or of <31:16> [32] is the or of <15:0> Two PCI internal interrupts for entry 32 CIU_PCI_INTA */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts 1 bit/group. A copy of the R/W1C bit in the POW. */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t wdog_sum : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn56xx; struct cvmx_ciu_intx_sum0_cn56xx cn56xxp1; struct cvmx_ciu_intx_sum0_cn38xx cn58xx; struct cvmx_ciu_intx_sum0_cn38xx cn58xxp1; } cvmx_ciu_intx_sum0_t; /** * cvmx_ciu_int#_sum4 */ typedef union { uint64_t u64; struct cvmx_ciu_intx_sum4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t mpi : 1; /**< MPI/SPI interrupt */ uint64_t pcm : 1; /**< PCM/TDM interrupt */ uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt KEY_ZERO will be set when the external ZERO_KEYS pin is sampled high. KEY_ZERO is cleared by SW */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t wdog_sum : 1; /**< Watchdog summary These registers report WDOG to IP4 */ uint64_t pci_msi : 4; /**< PCI MSI [43] is the or of <63:48> [42] is the or of <47:32> [41] is the or of <31:16> [40] is the or of <15:0> */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31 [33] is the or of <31:16> [32] is the or of <15:0> Two PCI internal interrupts for entry 32 CIU_PCI_INTA */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts 1 bit/group. A copy of the R/W1C bit in the POW. */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t wdog_sum : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t pcm : 1; uint64_t mpi : 1; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } s; struct cvmx_ciu_intx_sum4_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_59_63 : 5; uint64_t mpi : 1; /**< MPI/SPI interrupt */ uint64_t pcm : 1; /**< PCM/TDM interrupt */ uint64_t usb : 1; /**< USB interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t reserved_47_47 : 1; uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t wdog_sum : 1; /**< Watchdog summary PPs use CIU_INTx_SUM4 where x=0-1. */ uint64_t pci_msi : 4; /**< PCI MSI [43] is the or of <63:48> [42] is the or of <47:32> [41] is the or of <31:16> [40] is the or of <15:0> */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31 [33] is the or of <31:16> [32] is the or of <15:0> Two PCI internal interrupts for entry 32 CIU_PCI_INTA */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts 1 bit/group. A copy of the R/W1C bit in the POW. */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t wdog_sum : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t reserved_47_47 : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t pcm : 1; uint64_t mpi : 1; uint64_t reserved_59_63 : 5; #endif } cn50xx; struct cvmx_ciu_intx_sum4_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t reserved_51_51 : 1; uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t reserved_49_49 : 1; uint64_t gmx_drp : 1; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t wdog_sum : 1; /**< SUM1&EN4_1 summary bit This read-only bit reads as a one whenever any CIU_INT_SUM1 bit is set and corresponding enable bit in CIU_INTx_EN4_1 is set, where x is the same as x in this CIU_INTx_SUM4. PPs use CIU_INTx_SUM4 for IP4, where x=PPid. Note that WDOG_SUM only summarizes the SUM/EN4_1 result and does not have a corresponding enable bit, so does not directly contribute to interrupts. */ uint64_t pci_msi : 4; /**< PCI MSI Refer to "Receiving Message-Signalled Interrupts" in the PCIe chapter of the spec */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D Refer to "Receiving Emulated INTA/INTB/ INTC/INTD" in the PCIe chapter of the spec */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-3 [33] is the or of <31:16> [32] is the or of <15:0> */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts 1 bit/group. A copy of the R/W1C bit in the POW. */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t wdog_sum : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 1; uint64_t reserved_49_49 : 1; uint64_t ipd_drp : 1; uint64_t reserved_51_51 : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn52xx; struct cvmx_ciu_intx_sum4_cn52xx cn52xxp1; struct cvmx_ciu_intx_sum4_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bootdma : 1; /**< Boot bus DMA engines Interrupt */ uint64_t mii : 1; /**< MII Interface Interrupt */ uint64_t ipdppthr : 1; /**< IPD per-port counter threshold interrupt */ uint64_t powiq : 1; /**< POW IQ interrupt */ uint64_t twsi2 : 1; /**< 2nd TWSI Interrupt */ uint64_t reserved_57_58 : 2; uint64_t usb : 1; /**< USB Interrupt */ uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt KEY_ZERO will be set when the external ZERO_KEYS pin is sampled high. KEY_ZERO is cleared by SW */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t wdog_sum : 1; /**< Watchdog summary These registers report WDOG to IP4 */ uint64_t pci_msi : 4; /**< PCI MSI Refer to "Receiving Message-Signalled Interrupts" in the PCIe chapter of the spec */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D Refer to "Receiving Emulated INTA/INTB/ INTC/INTD" in the PCIe chapter of the spec */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-11 [33] is the or of <31:16> [32] is the or of <15:0> */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts 1 bit/group. A copy of the R/W1C bit in the POW. */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t wdog_sum : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t usb : 1; uint64_t reserved_57_58 : 2; uint64_t twsi2 : 1; uint64_t powiq : 1; uint64_t ipdppthr : 1; uint64_t mii : 1; uint64_t bootdma : 1; #endif } cn56xx; struct cvmx_ciu_intx_sum4_cn56xx cn56xxp1; struct cvmx_ciu_intx_sum4_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_56_63 : 8; uint64_t timer : 4; /**< General timer interrupts */ uint64_t key_zero : 1; /**< Key Zeroization interrupt KEY_ZERO will be set when the external ZERO_KEYS pin is sampled high. KEY_ZERO is cleared by SW */ uint64_t ipd_drp : 1; /**< IPD QOS packet drop */ uint64_t gmx_drp : 2; /**< GMX packet drop */ uint64_t trace : 1; /**< L2C has the CMB trace buffer */ uint64_t rml : 1; /**< RML Interrupt */ uint64_t twsi : 1; /**< TWSI Interrupt */ uint64_t wdog_sum : 1; /**< Watchdog summary These registers report WDOG to IP4 */ uint64_t pci_msi : 4; /**< PCI MSI [43] is the or of <63:48> [42] is the or of <47:32> [41] is the or of <31:16> [40] is the or of <15:0> */ uint64_t pci_int : 4; /**< PCI INTA/B/C/D */ uint64_t uart : 2; /**< Two UART interrupts */ uint64_t mbox : 2; /**< Two mailbox interrupts for entries 0-31 [33] is the or of <31:16> [32] is the or of <15:0> Two PCI internal interrupts for entry 32 CIU_PCI_INTA */ uint64_t gpio : 16; /**< 16 GPIO interrupts */ uint64_t workq : 16; /**< 16 work queue interrupts 1 bit/group. A copy of the R/W1C bit in the POW. */ #else uint64_t workq : 16; uint64_t gpio : 16; uint64_t mbox : 2; uint64_t uart : 2; uint64_t pci_int : 4; uint64_t pci_msi : 4; uint64_t wdog_sum : 1; uint64_t twsi : 1; uint64_t rml : 1; uint64_t trace : 1; uint64_t gmx_drp : 2; uint64_t ipd_drp : 1; uint64_t key_zero : 1; uint64_t timer : 4; uint64_t reserved_56_63 : 8; #endif } cn58xx; struct cvmx_ciu_intx_sum4_cn58xx cn58xxp1; } cvmx_ciu_intx_sum4_t; /** * cvmx_ciu_int_sum1 */ typedef union { uint64_t u64; struct cvmx_ciu_int_sum1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t wdog : 16; /**< 16 watchdog interrupts */ #else uint64_t wdog : 16; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_ciu_int_sum1_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t wdog : 1; /**< 1 watchdog interrupt */ #else uint64_t wdog : 1; uint64_t reserved_1_63 : 63; #endif } cn30xx; struct cvmx_ciu_int_sum1_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t wdog : 2; /**< 2 watchdog interrupts */ #else uint64_t wdog : 2; uint64_t reserved_2_63 : 62; #endif } cn31xx; struct cvmx_ciu_int_sum1_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t wdog : 16; /**< 16 watchdog interrupts */ #else uint64_t wdog : 16; uint64_t reserved_16_63 : 48; #endif } cn38xx; struct cvmx_ciu_int_sum1_cn38xx cn38xxp2; struct cvmx_ciu_int_sum1_cn31xx cn50xx; struct cvmx_ciu_int_sum1_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t nand : 1; /**< NAND Flash Controller */ uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t reserved_4_15 : 12; uint64_t wdog : 4; /**< 4 watchdog interrupts */ #else uint64_t wdog : 4; uint64_t reserved_4_15 : 12; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t nand : 1; uint64_t reserved_20_63 : 44; #endif } cn52xx; struct cvmx_ciu_int_sum1_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t mii1 : 1; /**< Second MII Interrupt */ uint64_t usb1 : 1; /**< Second USB Interrupt */ uint64_t uart2 : 1; /**< Third UART interrupt */ uint64_t reserved_4_15 : 12; uint64_t wdog : 4; /**< 4 watchdog interrupts */ #else uint64_t wdog : 4; uint64_t reserved_4_15 : 12; uint64_t uart2 : 1; uint64_t usb1 : 1; uint64_t mii1 : 1; uint64_t reserved_19_63 : 45; #endif } cn52xxp1; struct cvmx_ciu_int_sum1_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t wdog : 12; /**< 12 watchdog interrupts */ #else uint64_t wdog : 12; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_int_sum1_cn56xx cn56xxp1; struct cvmx_ciu_int_sum1_cn38xx cn58xx; struct cvmx_ciu_int_sum1_cn38xx cn58xxp1; } cvmx_ciu_int_sum1_t; /** * cvmx_ciu_mbox_clr# */ typedef union { uint64_t u64; struct cvmx_ciu_mbox_clrx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t bits : 32; /**< On writes, clr corresponding bit in MBOX register on reads, return the MBOX register */ #else uint64_t bits : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_ciu_mbox_clrx_s cn30xx; struct cvmx_ciu_mbox_clrx_s cn31xx; struct cvmx_ciu_mbox_clrx_s cn38xx; struct cvmx_ciu_mbox_clrx_s cn38xxp2; struct cvmx_ciu_mbox_clrx_s cn50xx; struct cvmx_ciu_mbox_clrx_s cn52xx; struct cvmx_ciu_mbox_clrx_s cn52xxp1; struct cvmx_ciu_mbox_clrx_s cn56xx; struct cvmx_ciu_mbox_clrx_s cn56xxp1; struct cvmx_ciu_mbox_clrx_s cn58xx; struct cvmx_ciu_mbox_clrx_s cn58xxp1; } cvmx_ciu_mbox_clrx_t; /** * cvmx_ciu_mbox_set# */ typedef union { uint64_t u64; struct cvmx_ciu_mbox_setx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t bits : 32; /**< On writes, set corresponding bit in MBOX register on reads, return the MBOX register */ #else uint64_t bits : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_ciu_mbox_setx_s cn30xx; struct cvmx_ciu_mbox_setx_s cn31xx; struct cvmx_ciu_mbox_setx_s cn38xx; struct cvmx_ciu_mbox_setx_s cn38xxp2; struct cvmx_ciu_mbox_setx_s cn50xx; struct cvmx_ciu_mbox_setx_s cn52xx; struct cvmx_ciu_mbox_setx_s cn52xxp1; struct cvmx_ciu_mbox_setx_s cn56xx; struct cvmx_ciu_mbox_setx_s cn56xxp1; struct cvmx_ciu_mbox_setx_s cn58xx; struct cvmx_ciu_mbox_setx_s cn58xxp1; } cvmx_ciu_mbox_setx_t; /** * cvmx_ciu_nmi */ typedef union { uint64_t u64; struct cvmx_ciu_nmi_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t nmi : 16; /**< Send NMI pulse to PP vector */ #else uint64_t nmi : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_ciu_nmi_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t nmi : 1; /**< Send NMI pulse to PP vector */ #else uint64_t nmi : 1; uint64_t reserved_1_63 : 63; #endif } cn30xx; struct cvmx_ciu_nmi_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t nmi : 2; /**< Send NMI pulse to PP vector */ #else uint64_t nmi : 2; uint64_t reserved_2_63 : 62; #endif } cn31xx; struct cvmx_ciu_nmi_s cn38xx; struct cvmx_ciu_nmi_s cn38xxp2; struct cvmx_ciu_nmi_cn31xx cn50xx; struct cvmx_ciu_nmi_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t nmi : 4; /**< Send NMI pulse to PP vector */ #else uint64_t nmi : 4; uint64_t reserved_4_63 : 60; #endif } cn52xx; struct cvmx_ciu_nmi_cn52xx cn52xxp1; struct cvmx_ciu_nmi_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t nmi : 12; /**< Send NMI pulse to PP vector */ #else uint64_t nmi : 12; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_nmi_cn56xx cn56xxp1; struct cvmx_ciu_nmi_s cn58xx; struct cvmx_ciu_nmi_s cn58xxp1; } cvmx_ciu_nmi_t; /** * cvmx_ciu_pci_inta */ typedef union { uint64_t u64; struct cvmx_ciu_pci_inta_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t intr : 2; /**< PCI interrupt These bits are observed in CIU_INT32_SUM0<33:32> */ #else uint64_t intr : 2; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_ciu_pci_inta_s cn30xx; struct cvmx_ciu_pci_inta_s cn31xx; struct cvmx_ciu_pci_inta_s cn38xx; struct cvmx_ciu_pci_inta_s cn38xxp2; struct cvmx_ciu_pci_inta_s cn50xx; struct cvmx_ciu_pci_inta_s cn52xx; struct cvmx_ciu_pci_inta_s cn52xxp1; struct cvmx_ciu_pci_inta_s cn56xx; struct cvmx_ciu_pci_inta_s cn56xxp1; struct cvmx_ciu_pci_inta_s cn58xx; struct cvmx_ciu_pci_inta_s cn58xxp1; } cvmx_ciu_pci_inta_t; /** * cvmx_ciu_pp_dbg */ typedef union { uint64_t u64; struct cvmx_ciu_pp_dbg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t ppdbg : 16; /**< Debug[DM] value for each PP whether the PP's are in debug mode or not */ #else uint64_t ppdbg : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_ciu_pp_dbg_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t ppdbg : 1; /**< Debug[DM] value for each PP whether the PP's are in debug mode or not */ #else uint64_t ppdbg : 1; uint64_t reserved_1_63 : 63; #endif } cn30xx; struct cvmx_ciu_pp_dbg_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t ppdbg : 2; /**< Debug[DM] value for each PP whether the PP's are in debug mode or not */ #else uint64_t ppdbg : 2; uint64_t reserved_2_63 : 62; #endif } cn31xx; struct cvmx_ciu_pp_dbg_s cn38xx; struct cvmx_ciu_pp_dbg_s cn38xxp2; struct cvmx_ciu_pp_dbg_cn31xx cn50xx; struct cvmx_ciu_pp_dbg_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t ppdbg : 4; /**< Debug[DM] value for each PP whether the PP's are in debug mode or not */ #else uint64_t ppdbg : 4; uint64_t reserved_4_63 : 60; #endif } cn52xx; struct cvmx_ciu_pp_dbg_cn52xx cn52xxp1; struct cvmx_ciu_pp_dbg_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t ppdbg : 12; /**< Debug[DM] value for each PP whether the PP's are in debug mode or not */ #else uint64_t ppdbg : 12; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_pp_dbg_cn56xx cn56xxp1; struct cvmx_ciu_pp_dbg_s cn58xx; struct cvmx_ciu_pp_dbg_s cn58xxp1; } cvmx_ciu_pp_dbg_t; /** * cvmx_ciu_pp_poke# * * Notes: * Any write to a CIU_PP_POKE register clears any pending interrupt generated * by the associated watchdog, resets the CIU_WDOG[STATE] field, and set * CIU_WDOG[CNT] to be (CIU_WDOG[LEN] << 8). * * Reads to this register will return the associated CIU_WDOG register. */ typedef union { uint64_t u64; struct cvmx_ciu_pp_pokex_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t poke : 64; /**< Reserved */ #else uint64_t poke : 64; #endif } s; struct cvmx_ciu_pp_pokex_s cn30xx; struct cvmx_ciu_pp_pokex_s cn31xx; struct cvmx_ciu_pp_pokex_s cn38xx; struct cvmx_ciu_pp_pokex_s cn38xxp2; struct cvmx_ciu_pp_pokex_s cn50xx; struct cvmx_ciu_pp_pokex_s cn52xx; struct cvmx_ciu_pp_pokex_s cn52xxp1; struct cvmx_ciu_pp_pokex_s cn56xx; struct cvmx_ciu_pp_pokex_s cn56xxp1; struct cvmx_ciu_pp_pokex_s cn58xx; struct cvmx_ciu_pp_pokex_s cn58xxp1; } cvmx_ciu_pp_pokex_t; /** * cvmx_ciu_pp_rst * * Contains the reset control for each PP. Value of '1' will hold a PP in reset, '0' will release. * Resets to 0xffff when PCI boot is enabled, 0xfffe otherwise. */ typedef union { uint64_t u64; struct cvmx_ciu_pp_rst_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t rst : 15; /**< PP Rst for PP's 15-1 */ uint64_t rst0 : 1; /**< PP Rst for PP0 depends on standalone mode */ #else uint64_t rst0 : 1; uint64_t rst : 15; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_ciu_pp_rst_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t rst0 : 1; /**< PP Rst for PP0 depends on standalone mode */ #else uint64_t rst0 : 1; uint64_t reserved_1_63 : 63; #endif } cn30xx; struct cvmx_ciu_pp_rst_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t rst : 1; /**< PP Rst for PP1 */ uint64_t rst0 : 1; /**< PP Rst for PP0 depends on standalone mode */ #else uint64_t rst0 : 1; uint64_t rst : 1; uint64_t reserved_2_63 : 62; #endif } cn31xx; struct cvmx_ciu_pp_rst_s cn38xx; struct cvmx_ciu_pp_rst_s cn38xxp2; struct cvmx_ciu_pp_rst_cn31xx cn50xx; struct cvmx_ciu_pp_rst_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t rst : 3; /**< PP Rst for PP's 11-1 */ uint64_t rst0 : 1; /**< PP Rst for PP0 depends on standalone mode */ #else uint64_t rst0 : 1; uint64_t rst : 3; uint64_t reserved_4_63 : 60; #endif } cn52xx; struct cvmx_ciu_pp_rst_cn52xx cn52xxp1; struct cvmx_ciu_pp_rst_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t rst : 11; /**< PP Rst for PP's 11-1 */ uint64_t rst0 : 1; /**< PP Rst for PP0 depends on standalone mode */ #else uint64_t rst0 : 1; uint64_t rst : 11; uint64_t reserved_12_63 : 52; #endif } cn56xx; struct cvmx_ciu_pp_rst_cn56xx cn56xxp1; struct cvmx_ciu_pp_rst_s cn58xx; struct cvmx_ciu_pp_rst_s cn58xxp1; } cvmx_ciu_pp_rst_t; /** * cvmx_ciu_qlm_dcok */ typedef union { uint64_t u64; struct cvmx_ciu_qlm_dcok_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t qlm_dcok : 4; /**< Re-assert dcok for each QLM. The value in this field is "anded" with the pll_dcok pin and then sent to each QLM (0..3). */ #else uint64_t qlm_dcok : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_ciu_qlm_dcok_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t qlm_dcok : 2; /**< Re-assert dcok for each QLM. The value in this field is "anded" with the pll_dcok pin and then sent to each QLM (0..3). */ #else uint64_t qlm_dcok : 2; uint64_t reserved_2_63 : 62; #endif } cn52xx; struct cvmx_ciu_qlm_dcok_cn52xx cn52xxp1; struct cvmx_ciu_qlm_dcok_s cn56xx; struct cvmx_ciu_qlm_dcok_s cn56xxp1; } cvmx_ciu_qlm_dcok_t; /** * cvmx_ciu_qlm_jtgc */ typedef union { uint64_t u64; struct cvmx_ciu_qlm_jtgc_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t clk_div : 3; /**< Clock divider for QLM JTAG operations. eclk is divided by 2^(CLK_DIV + 2) */ uint64_t reserved_6_7 : 2; uint64_t mux_sel : 2; /**< Selects which QLM JTAG shift out is shifted into the QLM JTAG shift register: CIU_QLM_JTGD[SHFT_REG] */ uint64_t bypass : 4; /**< Selects which QLM JTAG shift chains are bypassed by the QLM JTAG data register (CIU_QLM_JTGD) (one bit per QLM) */ #else uint64_t bypass : 4; uint64_t mux_sel : 2; uint64_t reserved_6_7 : 2; uint64_t clk_div : 3; uint64_t reserved_11_63 : 53; #endif } s; struct cvmx_ciu_qlm_jtgc_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t clk_div : 3; /**< Clock divider for QLM JTAG operations. eclk is divided by 2^(CLK_DIV + 2) */ uint64_t reserved_5_7 : 3; uint64_t mux_sel : 1; /**< Selects which QLM JTAG shift out is shifted into the QLM JTAG shift register: CIU_QLM_JTGD[SHFT_REG] */ uint64_t reserved_2_3 : 2; uint64_t bypass : 2; /**< Selects which QLM JTAG shift chains are bypassed by the QLM JTAG data register (CIU_QLM_JTGD) (one bit per QLM) */ #else uint64_t bypass : 2; uint64_t reserved_2_3 : 2; uint64_t mux_sel : 1; uint64_t reserved_5_7 : 3; uint64_t clk_div : 3; uint64_t reserved_11_63 : 53; #endif } cn52xx; struct cvmx_ciu_qlm_jtgc_cn52xx cn52xxp1; struct cvmx_ciu_qlm_jtgc_s cn56xx; struct cvmx_ciu_qlm_jtgc_s cn56xxp1; } cvmx_ciu_qlm_jtgc_t; /** * cvmx_ciu_qlm_jtgd */ typedef union { uint64_t u64; struct cvmx_ciu_qlm_jtgd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t capture : 1; /**< Perform JTAG capture operation (self-clearing when op completes) */ uint64_t shift : 1; /**< Perform JTAG shift operation (self-clearing when op completes) */ uint64_t update : 1; /**< Perform JTAG update operation (self-clearing when op completes) */ uint64_t reserved_44_60 : 17; uint64_t select : 4; /**< Selects which QLM JTAG shift chains the JTAG operations are performed on */ uint64_t reserved_37_39 : 3; uint64_t shft_cnt : 5; /**< QLM JTAG shift count (encoded in -1 notation) */ uint64_t shft_reg : 32; /**< QLM JTAG shift register */ #else uint64_t shft_reg : 32; uint64_t shft_cnt : 5; uint64_t reserved_37_39 : 3; uint64_t select : 4; uint64_t reserved_44_60 : 17; uint64_t update : 1; uint64_t shift : 1; uint64_t capture : 1; #endif } s; struct cvmx_ciu_qlm_jtgd_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t capture : 1; /**< Perform JTAG capture operation (self-clearing when op completes) */ uint64_t shift : 1; /**< Perform JTAG shift operation (self-clearing when op completes) */ uint64_t update : 1; /**< Perform JTAG update operation (self-clearing when op completes) */ uint64_t reserved_42_60 : 19; uint64_t select : 2; /**< Selects which QLM JTAG shift chains the JTAG operations are performed on */ uint64_t reserved_37_39 : 3; uint64_t shft_cnt : 5; /**< QLM JTAG shift count (encoded in -1 notation) */ uint64_t shft_reg : 32; /**< QLM JTAG shift register */ #else uint64_t shft_reg : 32; uint64_t shft_cnt : 5; uint64_t reserved_37_39 : 3; uint64_t select : 2; uint64_t reserved_42_60 : 19; uint64_t update : 1; uint64_t shift : 1; uint64_t capture : 1; #endif } cn52xx; struct cvmx_ciu_qlm_jtgd_cn52xx cn52xxp1; struct cvmx_ciu_qlm_jtgd_s cn56xx; struct cvmx_ciu_qlm_jtgd_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t capture : 1; /**< Perform JTAG capture operation (self-clearing when op completes) */ uint64_t shift : 1; /**< Perform JTAG shift operation (self-clearing when op completes) */ uint64_t update : 1; /**< Perform JTAG update operation (self-clearing when op completes) */ uint64_t reserved_37_60 : 24; uint64_t shft_cnt : 5; /**< QLM JTAG shift count (encoded in -1 notation) */ uint64_t shft_reg : 32; /**< QLM JTAG shift register */ #else uint64_t shft_reg : 32; uint64_t shft_cnt : 5; uint64_t reserved_37_60 : 24; uint64_t update : 1; uint64_t shift : 1; uint64_t capture : 1; #endif } cn56xxp1; } cvmx_ciu_qlm_jtgd_t; /** * cvmx_ciu_soft_bist */ typedef union { uint64_t u64; struct cvmx_ciu_soft_bist_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t soft_bist : 1; /**< Run BIST on soft reset. */ #else uint64_t soft_bist : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_ciu_soft_bist_s cn30xx; struct cvmx_ciu_soft_bist_s cn31xx; struct cvmx_ciu_soft_bist_s cn38xx; struct cvmx_ciu_soft_bist_s cn38xxp2; struct cvmx_ciu_soft_bist_s cn50xx; struct cvmx_ciu_soft_bist_s cn52xx; struct cvmx_ciu_soft_bist_s cn52xxp1; struct cvmx_ciu_soft_bist_s cn56xx; struct cvmx_ciu_soft_bist_s cn56xxp1; struct cvmx_ciu_soft_bist_s cn58xx; struct cvmx_ciu_soft_bist_s cn58xxp1; } cvmx_ciu_soft_bist_t; /** * cvmx_ciu_soft_prst */ typedef union { uint64_t u64; struct cvmx_ciu_soft_prst_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t host64 : 1; /**< PCX Host Mode Device Capability (0=32b/1=64b) */ uint64_t npi : 1; /**< When PCI soft reset is asserted, also reset the NPI and PNI logic */ uint64_t soft_prst : 1; /**< Reset the PCI bus. Only works when Octane is configured as a HOST. When OCTEON is a PCI host (i.e. when PCI_HOST_MODE = 1), This controls PCI_RST_L. Refer to section 10.11.1. */ #else uint64_t soft_prst : 1; uint64_t npi : 1; uint64_t host64 : 1; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_ciu_soft_prst_s cn30xx; struct cvmx_ciu_soft_prst_s cn31xx; struct cvmx_ciu_soft_prst_s cn38xx; struct cvmx_ciu_soft_prst_s cn38xxp2; struct cvmx_ciu_soft_prst_s cn50xx; struct cvmx_ciu_soft_prst_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t soft_prst : 1; /**< Reset the PCI bus. Only works when Octane is configured as a HOST. When OCTEON is a PCI host (i.e. when PCI_HOST_MODE = 1), This controls PCI_RST_L. Refer to section 10.11.1. */ #else uint64_t soft_prst : 1; uint64_t reserved_1_63 : 63; #endif } cn52xx; struct cvmx_ciu_soft_prst_cn52xx cn52xxp1; struct cvmx_ciu_soft_prst_cn52xx cn56xx; struct cvmx_ciu_soft_prst_cn52xx cn56xxp1; struct cvmx_ciu_soft_prst_s cn58xx; struct cvmx_ciu_soft_prst_s cn58xxp1; } cvmx_ciu_soft_prst_t; /** * cvmx_ciu_soft_prst1 */ typedef union { uint64_t u64; struct cvmx_ciu_soft_prst1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t soft_prst : 1; /**< Reset the PCI bus. Only works when Octane is configured as a HOST. When OCTEON is a PCI host (i.e. when PCI_HOST_MODE = 1), This controls PCI_RST_L. Refer to section 10.11.1. */ #else uint64_t soft_prst : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_ciu_soft_prst1_s cn52xx; struct cvmx_ciu_soft_prst1_s cn52xxp1; struct cvmx_ciu_soft_prst1_s cn56xx; struct cvmx_ciu_soft_prst1_s cn56xxp1; } cvmx_ciu_soft_prst1_t; /** * cvmx_ciu_soft_rst */ typedef union { uint64_t u64; struct cvmx_ciu_soft_rst_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t soft_rst : 1; /**< Resets Octeon When soft reseting Octeon from a remote PCI host, always read CIU_SOFT_RST (and wait for result) before writing SOFT_RST to '1'. */ #else uint64_t soft_rst : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_ciu_soft_rst_s cn30xx; struct cvmx_ciu_soft_rst_s cn31xx; struct cvmx_ciu_soft_rst_s cn38xx; struct cvmx_ciu_soft_rst_s cn38xxp2; struct cvmx_ciu_soft_rst_s cn50xx; struct cvmx_ciu_soft_rst_s cn52xx; struct cvmx_ciu_soft_rst_s cn52xxp1; struct cvmx_ciu_soft_rst_s cn56xx; struct cvmx_ciu_soft_rst_s cn56xxp1; struct cvmx_ciu_soft_rst_s cn58xx; struct cvmx_ciu_soft_rst_s cn58xxp1; } cvmx_ciu_soft_rst_t; /** * cvmx_ciu_tim# */ typedef union { uint64_t u64; struct cvmx_ciu_timx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_37_63 : 27; uint64_t one_shot : 1; /**< One-shot mode */ uint64_t len : 36; /**< Timeout length in core clock cycles Periodic interrupts will occur every LEN+1 core clock cycles when ONE_SHOT==0 Timer disabled when LEN==0 */ #else uint64_t len : 36; uint64_t one_shot : 1; uint64_t reserved_37_63 : 27; #endif } s; struct cvmx_ciu_timx_s cn30xx; struct cvmx_ciu_timx_s cn31xx; struct cvmx_ciu_timx_s cn38xx; struct cvmx_ciu_timx_s cn38xxp2; struct cvmx_ciu_timx_s cn50xx; struct cvmx_ciu_timx_s cn52xx; struct cvmx_ciu_timx_s cn52xxp1; struct cvmx_ciu_timx_s cn56xx; struct cvmx_ciu_timx_s cn56xxp1; struct cvmx_ciu_timx_s cn58xx; struct cvmx_ciu_timx_s cn58xxp1; } cvmx_ciu_timx_t; /** * cvmx_ciu_wdog# */ typedef union { uint64_t u64; struct cvmx_ciu_wdogx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_46_63 : 18; uint64_t gstopen : 1; /**< GSTOPEN */ uint64_t dstop : 1; /**< DSTOP */ uint64_t cnt : 24; /**< Number of 256-cycle intervals until next watchdog expiration. Cleared on write to associated CIU_PP_POKE register. */ uint64_t len : 16; /**< Watchdog time expiration length The 16 bits of LEN represent the most significant bits of a 24 bit decrementer that decrements every 256 cycles. LEN must be set > 0 */ uint64_t state : 2; /**< Watchdog state number of watchdog time expirations since last PP poke. Cleared on write to associated CIU_PP_POKE register. */ uint64_t mode : 2; /**< Watchdog mode 0 = Off 1 = Interrupt Only 2 = Interrupt + NMI 3 = Interrupt + NMI + Soft-Reset */ #else uint64_t mode : 2; uint64_t state : 2; uint64_t len : 16; uint64_t cnt : 24; uint64_t dstop : 1; uint64_t gstopen : 1; uint64_t reserved_46_63 : 18; #endif } s; struct cvmx_ciu_wdogx_s cn30xx; struct cvmx_ciu_wdogx_s cn31xx; struct cvmx_ciu_wdogx_s cn38xx; struct cvmx_ciu_wdogx_s cn38xxp2; struct cvmx_ciu_wdogx_s cn50xx; struct cvmx_ciu_wdogx_s cn52xx; struct cvmx_ciu_wdogx_s cn52xxp1; struct cvmx_ciu_wdogx_s cn56xx; struct cvmx_ciu_wdogx_s cn56xxp1; struct cvmx_ciu_wdogx_s cn58xx; struct cvmx_ciu_wdogx_s cn58xxp1; } cvmx_ciu_wdogx_t; /** * cvmx_dbg_data * * DBG_DATA = Debug Data Register * * Value returned on the debug-data lines from the RSLs */ typedef union { uint64_t u64; struct cvmx_dbg_data_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the debug select value. */ uint64_t data : 17; /**< Value on the debug data lines. */ #else uint64_t data : 17; uint64_t dsel_ext : 1; uint64_t c_mul : 5; uint64_t reserved_23_63 : 41; #endif } s; struct cvmx_dbg_data_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t pll_mul : 3; /**< pll_mul pins sampled at DCOK assertion */ uint64_t reserved_23_27 : 5; uint64_t c_mul : 5; /**< Core PLL multiplier sampled at DCOK assertion */ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the debug select value. */ uint64_t data : 17; /**< Value on the debug data lines. */ #else uint64_t data : 17; uint64_t dsel_ext : 1; uint64_t c_mul : 5; uint64_t reserved_23_27 : 5; uint64_t pll_mul : 3; uint64_t reserved_31_63 : 33; #endif } cn30xx; struct cvmx_dbg_data_cn30xx cn31xx; struct cvmx_dbg_data_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t d_mul : 4; /**< D_MUL pins sampled on DCOK assertion */ uint64_t dclk_mul2 : 1; /**< Should always be set for fast DDR-II operation */ uint64_t cclk_div2 : 1; /**< Should always be clear for fast core clock */ uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the debug select value. */ uint64_t data : 17; /**< Value on the debug data lines. */ #else uint64_t data : 17; uint64_t dsel_ext : 1; uint64_t c_mul : 5; uint64_t cclk_div2 : 1; uint64_t dclk_mul2 : 1; uint64_t d_mul : 4; uint64_t reserved_29_63 : 35; #endif } cn38xx; struct cvmx_dbg_data_cn38xx cn38xxp2; struct cvmx_dbg_data_cn30xx cn50xx; struct cvmx_dbg_data_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t rem : 6; /**< Remaining debug_select pins sampled at DCOK */ uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the debug select value. */ uint64_t data : 17; /**< Value on the debug data lines. */ #else uint64_t data : 17; uint64_t dsel_ext : 1; uint64_t c_mul : 5; uint64_t rem : 6; uint64_t reserved_29_63 : 35; #endif } cn58xx; struct cvmx_dbg_data_cn58xx cn58xxp1; } cvmx_dbg_data_t; /** * cvmx_dfa_bst0 * * DFA_BST0 = DFA Bist Status * * Description: */ typedef union { uint64_t u64; struct cvmx_dfa_bst0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t rdf : 16; /**< Bist Results for RDF[3:0] RAM(s) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t pdf : 16; /**< Bist Results for PDF[3:0] RAM(s) - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t pdf : 16; uint64_t rdf : 16; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_dfa_bst0_s cn31xx; struct cvmx_dfa_bst0_s cn38xx; struct cvmx_dfa_bst0_s cn38xxp2; struct cvmx_dfa_bst0_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t rdf : 4; /**< Bist Results for RDF[3:0] RAM(s) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_4_15 : 12; uint64_t pdf : 4; /**< Bist Results for PDF[3:0] RAM(s) - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t pdf : 4; uint64_t reserved_4_15 : 12; uint64_t rdf : 4; uint64_t reserved_20_63 : 44; #endif } cn58xx; struct cvmx_dfa_bst0_cn58xx cn58xxp1; } cvmx_dfa_bst0_t; /** * cvmx_dfa_bst1 * * DFA_BST1 = DFA Bist Status * * Description: */ typedef union { uint64_t u64; struct cvmx_dfa_bst1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t crq : 1; /**< Bist Results for CRQ RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t ifu : 1; /**< Bist Results for IFU RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t gfu : 1; /**< Bist Results for GFU RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t drf : 1; /**< Bist Results for DRF RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t crf : 1; /**< Bist Results for CRF RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t p0_bwb : 1; /**< Bist Results for P0_BWB RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t p1_bwb : 1; /**< Bist Results for P1_BWB RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t p0_brf : 8; /**< Bist Results for P0_BRF RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t p1_brf : 8; /**< Bist Results for P1_BRF RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t p1_brf : 8; uint64_t p0_brf : 8; uint64_t p1_bwb : 1; uint64_t p0_bwb : 1; uint64_t crf : 1; uint64_t drf : 1; uint64_t gfu : 1; uint64_t ifu : 1; uint64_t crq : 1; uint64_t reserved_23_63 : 41; #endif } s; struct cvmx_dfa_bst1_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t crq : 1; /**< Bist Results for CRQ RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t ifu : 1; /**< Bist Results for IFU RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t gfu : 1; /**< Bist Results for GFU RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t drf : 1; /**< Bist Results for DRF RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t crf : 1; /**< Bist Results for CRF RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_0_17 : 18; #else uint64_t reserved_0_17 : 18; uint64_t crf : 1; uint64_t drf : 1; uint64_t gfu : 1; uint64_t ifu : 1; uint64_t crq : 1; uint64_t reserved_23_63 : 41; #endif } cn31xx; struct cvmx_dfa_bst1_s cn38xx; struct cvmx_dfa_bst1_s cn38xxp2; struct cvmx_dfa_bst1_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t crq : 1; /**< Bist Results for CRQ RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t ifu : 1; /**< Bist Results for IFU RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t gfu : 1; /**< Bist Results for GFU RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_19_19 : 1; uint64_t crf : 1; /**< Bist Results for CRF RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t p0_bwb : 1; /**< Bist Results for P0_BWB RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t p1_bwb : 1; /**< Bist Results for P1_BWB RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t p0_brf : 8; /**< Bist Results for P0_BRF RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t p1_brf : 8; /**< Bist Results for P1_BRF RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t p1_brf : 8; uint64_t p0_brf : 8; uint64_t p1_bwb : 1; uint64_t p0_bwb : 1; uint64_t crf : 1; uint64_t reserved_19_19 : 1; uint64_t gfu : 1; uint64_t ifu : 1; uint64_t crq : 1; uint64_t reserved_23_63 : 41; #endif } cn58xx; struct cvmx_dfa_bst1_cn58xx cn58xxp1; } cvmx_dfa_bst1_t; /** * cvmx_dfa_cfg * * Specify the RSL base addresses for the block * * DFA_CFG = DFA Configuration * * Description: */ typedef union { uint64_t u64; struct cvmx_dfa_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t nrpl_ena : 1; /**< When set, allows the per-node replication feature to be enabled. In 36-bit mode: The IWORD0[31:30]=SNREPL field AND bits [21:20] of the Next Node ptr are used in generating the next node address (see OCTEON HRM - DFA Chapter for psuedo-code of DTE next node address generation). NOTE: When NRPL_ENA=1 and IWORD0[TY]=1(36b mode), (regardless of IWORD0[NRPLEN]), the Resultant Word1+ [[47:44],[23:20]] = Next Node's [27:20] bits. This allows SW to use the RESERVED bits of the final node for SW caching. Also, if required, SW will use [22:21]=Node Replication to re-start the same graph walk(if graph walk prematurely terminated (ie: DATA_GONE). In 18-bit mode: The IWORD0[31:30]=SNREPL field AND bit [16:14] of the Next Node ptr are used in generating the next node address (see OCTEON HRM - DFA Chapter for psuedo-code of DTE next node address generation). If (IWORD0[NREPLEN]=1 and DFA_CFG[NRPL_ENA]=1) [ If next node ptr[16] is set [ next node ptr[15:14] indicates the next node repl next node ptr[13:0] indicates the position of the node relative to the first normal node (i.e. IWORD3[Msize] must be added to get the final node) ] else If next node ptr[16] is not set [ next node ptr[15:0] indicates the next node id next node repl = 0 ] ] NOTE: For 18b node replication, MAX node space=64KB(2^16) is used in detecting terminal node space(see HRM for full description). NOTE: The DFA graphs MUST BE built/written to DFA LLM memory aware of the "per-node" replication. */ uint64_t nxor_ena : 1; /**< When set, allows the DTE Instruction IWORD0[NXOREN] to be used to enable/disable the per-node address 'scramble' of the LLM address to lessen the effects of bank conflicts. If IWORD0[NXOREN] is also set, then: In 36-bit mode: The node_Id[7:0] 8-bit value is XORed against the LLM address addr[9:2]. In 18-bit mode: The node_id[6:0] 7-bit value is XORed against the LLM address addr[8:2]. (note: we don't address scramble outside the mode's node space). NOTE: The DFA graphs MUST BE built/written to DFA LLM memory aware of the "per-node" address scramble. NOTE: The address 'scramble' ocurs for BOTH DFA LLM graph read/write operations. */ uint64_t gxor_ena : 1; /**< When set, the DTE Instruction IWORD0[GXOR] field is used to 'scramble' the LLM address to lessen the effects of bank conflicts. In 36-bit mode: The GXOR[7:0] 8-bit value is XORed against the LLM address addr[9:2]. In 18-bit mode: GXOR[6:0] 7-bit value is XORed against the LLM address addr[8:2]. (note: we don't address scramble outside the mode's node space) NOTE: The DFA graphs MUST BE built/written to DFA LLM memory aware of the "per-graph" address scramble. NOTE: The address 'scramble' ocurs for BOTH DFA LLM graph read/write operations. */ uint64_t sarb : 1; /**< DFA Source Arbiter Mode Selects the arbitration mode used to select DFA requests issued from either CP2 or the DTE (NCB-CSR or DFA HW engine). - 0: Fixed Priority [Highest=CP2, Lowest=DTE] - 1: Round-Robin NOTE: This should only be written to a different value during power-on SW initialization. */ #else uint64_t sarb : 1; uint64_t gxor_ena : 1; uint64_t nxor_ena : 1; uint64_t nrpl_ena : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_dfa_cfg_s cn38xx; struct cvmx_dfa_cfg_cn38xxp2 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t sarb : 1; /**< DFA Source Arbiter Mode Selects the arbitration mode used to select DFA requests issued from either CP2 or the DTE (NCB-CSR or DFA HW engine). - 0: Fixed Priority [Highest=CP2, Lowest=DTE] - 1: Round-Robin NOTE: This should only be written to a different value during power-on SW initialization. */ #else uint64_t sarb : 1; uint64_t reserved_1_63 : 63; #endif } cn38xxp2; struct cvmx_dfa_cfg_s cn58xx; struct cvmx_dfa_cfg_s cn58xxp1; } cvmx_dfa_cfg_t; /** * cvmx_dfa_dbell * * DFA_DBELL = DFA Doorbell Register * * Description: * NOTE: To write to the DFA_DBELL register, a device would issue an IOBST directed at the DFA with addr[34:33]=2'b00. * To read the DFA_DBELL register, a device would issue an IOBLD64 directed at the DFA with addr[34:33]=2'b00. * * NOTE: If DFA_CFG[DTECLKDIS]=1 (DFA-DTE clocks disabled), reads/writes to the DFA_DBELL register do not take effect. * NOTE: If FUSE[120]="DFA DTE disable" is blown, reads/writes to the DFA_DBELL register do not take effect. */ typedef union { uint64_t u64; struct cvmx_dfa_dbell_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t dbell : 20; /**< Represents the cumulative total of pending DFA instructions which SW has previously written into the DFA Instruction FIFO (DIF) in main memory. Each DFA instruction contains a fixed size 32B instruction word which is executed by the DFA HW. The DBL register can hold up to 1M-1 (2^20-1) pending DFA instruction requests. During a read (by SW), the 'most recent' contents of the DFA_DBELL register are returned at the time the NCB-INB bus is driven. NOTE: Since DFA HW updates this register, its contents are unpredictable in SW. */ #else uint64_t dbell : 20; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_dfa_dbell_s cn31xx; struct cvmx_dfa_dbell_s cn38xx; struct cvmx_dfa_dbell_s cn38xxp2; struct cvmx_dfa_dbell_s cn58xx; struct cvmx_dfa_dbell_s cn58xxp1; } cvmx_dfa_dbell_t; /** * cvmx_dfa_ddr2_addr * * DFA_DDR2_ADDR = DFA DDR2 fclk-domain Memory Address Config Register * * * Description: The following registers are used to compose the DFA's DDR2 address into ROW/COL/BNK * etc. */ typedef union { uint64_t u64; struct cvmx_dfa_ddr2_addr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t rdimm_ena : 1; /**< If there is a need to insert a register chip on the system (the equivalent of a registered DIMM) to provide better setup for the command and control bits turn this mode on. RDIMM_ENA 0 Registered Mode OFF 1 Registered Mode ON */ uint64_t num_rnks : 2; /**< NUM_RNKS is programmed based on how many ranks there are in the system. This needs to be programmed correctly regardless of whether we are in RNK_LO mode or not. NUM_RNKS \# of Ranks 0 1 1 2 2 4 3 RESERVED */ uint64_t rnk_lo : 1; /**< When this mode is turned on, consecutive addresses outside the bank boundary are programmed to go to different ranks in order to minimize bank conflicts. It is useful in 4-bank DDR2 parts based memory to extend out the \#physical banks available and minimize bank conflicts. On 8 bank ddr2 parts, this mode is not very useful because this mode does come with a penalty which is that every successive reads that cross rank boundary will need a 1 cycle bubble inserted to prevent bus turnaround conflicts. RNK_LO 0 - OFF 1 - ON */ uint64_t num_colrows : 3; /**< NUM_COLROWS is used to set the MSB of the ROW_ADDR and the LSB of RANK address when not in RNK_LO mode. Calculate the sum of \#COL and \#ROW and program the controller appropriately RANK_LSB \#COLs + \#ROWs ------------------------------ - 000: 22 - 001: 23 - 010: 24 - 011: 25 - 100-111: RESERVED */ uint64_t num_cols : 2; /**< The Long word address that the controller receives needs to be converted to Row, Col, Rank and Bank addresses depending on the memory part's micro arch. NUM_COL tells the controller how many colum bits there are and the controller uses this info to map the LSB of the row address - 00: num_cols = 9 - 01: num_cols = 10 - 10: num_cols = 11 - 11: RESERVED */ #else uint64_t num_cols : 2; uint64_t num_colrows : 3; uint64_t rnk_lo : 1; uint64_t num_rnks : 2; uint64_t rdimm_ena : 1; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_dfa_ddr2_addr_s cn31xx; } cvmx_dfa_ddr2_addr_t; /** * cvmx_dfa_ddr2_bus * * DFA_DDR2_BUS = DFA DDR Bus Activity Counter * * * Description: This counter counts \# cycles that the memory bus is doing a read/write/command * Useful to benchmark the bus utilization as a ratio of * \#Cycles of Data Transfer/\#Cycles since init or * \#Cycles of Data Transfer/\#Cycles that memory controller is active */ typedef union { uint64_t u64; struct cvmx_dfa_ddr2_bus_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_47_63 : 17; uint64_t bus_cnt : 47; /**< Counter counts the \# cycles of Data transfer */ #else uint64_t bus_cnt : 47; uint64_t reserved_47_63 : 17; #endif } s; struct cvmx_dfa_ddr2_bus_s cn31xx; } cvmx_dfa_ddr2_bus_t; /** * cvmx_dfa_ddr2_cfg * * DFA_DDR2_CFG = DFA DDR2 fclk-domain Memory Configuration \#0 Register * * Description: */ typedef union { uint64_t u64; struct cvmx_dfa_ddr2_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_41_63 : 23; uint64_t trfc : 5; /**< Establishes tRFC(from DDR2 data sheets) in \# of 4 fclk intervals. General Equation: TRFC(csr) = ROUNDUP[tRFC(data-sheet-ns)/(4 * fclk(ns))] Example: tRFC(data-sheet-ns) = 127.5ns Operational Frequency: 533MHz DDR rate [fclk=266MHz(3.75ns)] Then: TRFC(csr) = ROUNDUP[127.5ns/(4 * 3.75ns)] = 9 */ uint64_t mrs_pgm : 1; /**< When clear, the HW initialization sequence fixes some of the *MRS register bit definitions. EMRS: A[14:13] = 0 RESERVED A[12] = 0 Output Buffers Enabled (FIXED) A[11] = 0 RDQS Disabled (FIXED) A[10] = 0 DQSn Enabled (FIXED) A[9:7] = 0 OCD Not supported (FIXED) A[6] = 0 RTT Disabled (FIXED) A[5:3]=DFA_DDR2_TMG[ADDLAT] (if DFA_DDR2_TMG[POCAS]=1) Additive LATENCY (Programmable) A[2]=0 RTT Disabled (FIXED) A[1]=DFA_DDR2_TMG[DIC] (Programmable) A[0] = 0 DLL Enabled (FIXED) MRS: A[14:13] = 0 RESERVED A[12] = 0 Fast Active Power Down Mode (FIXED) A[11:9] = DFA_DDR2_TMG[TWR](Programmable) A[8] = 1 DLL Reset (FIXED) A[7] = 0 Test Mode (FIXED) A[6:4]=DFA_DDR2_TMG[CASLAT] CAS LATENCY (Programmable) A[3] = 0 Burst Type(must be 0:Sequential) (FIXED) A[2:0] = 2 Burst Length=4 (must be 0:Sequential) (FIXED) When set, the HW initialization sequence sources the DFA_DDR2_MRS, DFA_DDR2_EMRS registers which are driven onto the DFA_A[] pins. (this allows the MRS/EMRS fields to be completely programmable - however care must be taken by software). This mode is useful for customers who wish to: 1) override the FIXED definitions(above), or 2) Use a "clamshell mode" of operation where the address bits(per rank) are swizzled on the board to reduce stub lengths for optimal frequency operation. Use this in combination with DFA_DDR2_CFG[RNK_MSK] to specify the INIT sequence for each of the 4 supported ranks. */ uint64_t fpip : 3; /**< Early Fill Programmable Pipe [\#fclks] This field dictates the \#fclks prior to the arrival of fill data(in fclk domain), to start the 'early' fill command pipe (in the eclk domain) so as to minimize the overall fill latency. The programmable early fill command signal is synchronized into the eclk domain, where it is used to pull data out of asynchronous RAM as fast as possible. NOTE: A value of FPIP=0 is the 'safest' setting and will result in the early fill command pipe starting in the same cycle as the fill data. General Equation: (for FPIP) FPIP <= MIN[6, (ROUND_DOWN[6/EF_RATIO] + 1)] where: EF_RATIO = ECLK/FCLK Ratio [eclk(MHz)/fclk(MHz)] Example: FCLK=200MHz/ECLK=600MHz FPIP = MIN[6, (ROUND_DOWN[6/(600/200))] + 1)] FPIP <= 3 */ uint64_t reserved_29_31 : 3; uint64_t ref_int : 13; /**< Refresh Interval (represented in \#of fclk increments). Each refresh interval will generate a single auto-refresh command sequence which implicitly targets all banks within the device: Example: For fclk=200MHz(5ns)/400MHz(DDR): trefint(ns) = [tREFI(max)=3.9us = 3900ns [datasheet] REF_INT = ROUND_DOWN[(trefint/fclk)] = ROUND_DOWN[(3900ns/5ns)] = 780 fclks (0x30c) NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t reserved_14_15 : 2; uint64_t tskw : 2; /**< Board Skew (represented in \#fclks) Represents additional board skew of DQ/DQS. - 00: board-skew = 0 fclk - 01: board-skew = 1 fclk - 10: board-skew = 2 fclk - 11: board-skew = 3 fclk NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t rnk_msk : 4; /**< Controls the CS_N[3:0] during a) a HW Initialization sequence (triggered by DFA_DDR2_CFG[INIT]) or b) during a normal refresh sequence. If the RNK_MSK[x]=1, the corresponding CS_N[x] is driven. NOTE: This is required for DRAM used in a clamshell configuration, since the address lines carry Mode Register write data that is unique per rank(or clam). In a clamshell configuration, the N3K DFA_A[x] pin may be tied into Clam#0's A[x] and also into Clam#1's 'mirrored' address bit A[y] (eg: Clam0 sees A[5] and Clam1 sees A[15]). To support clamshell designs, SW must initiate separate HW init sequences each unique rank address mapping. Before each HW init sequence is triggered, SW must preload the DFA_DDR2_MRS/EMRS registers with the data that will be driven onto the A[14:0] wires during the EMRS/MRS mode register write(s). NOTE: After the final HW initialization sequence has been triggered, SW must wait 64K eclks before writing the RNK_MSK[3:0] field = 3'b1111 (so that CS_N[3:0] is driven during refresh sequences in normal operation. NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t silo_qc : 1; /**< Enables Quarter Cycle move of the Rd sampling window */ uint64_t silo_hc : 1; /**< A combination of SILO_HC, SILO_QC and TSKW specifies the positioning of the sampling strobe when receiving read data back from DDR2. This is done to offset any board trace induced delay on the DQ and DQS which inherently makes these asynchronous with respect to the internal clk of controller. TSKW moves this sampling window by integer cycles. SILO_QC and HC move this quarter and half a cycle respectively. */ uint64_t sil_lat : 2; /**< Silo Latency (\#fclks): On reads, determines how many additional fclks to wait (on top of CASLAT+1) before pulling data out of the padring silos used for time domain boundary crossing. NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t bprch : 1; /**< Tristate Enable (back porch) (\#fclks) On reads, allows user to control the shape of the tristate disable back porch for the DQ data bus. This parameter is also very dependent on the RW_DLY and WR_DLY parameters and care must be taken when programming these parameters to avoid data bus contention. Valid range [0..2] NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t fprch : 1; /**< Tristate Enable (front porch) (\#fclks) On reads, allows user to control the shape of the tristate disable front porch for the DQ data bus. This parameter is also very dependent on the RW_DLY and WR_DLY parameters and care must be taken when programming these parameters to avoid data bus contention. Valid range [0..2] NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t init : 1; /**< When a '1' is written (and the previous value was '0'), the HW init sequence(s) for the LLM Memory Port is initiated. NOTE: To initialize memory, SW must: 1) Enable memory port a) PRTENA=1 2) Wait 200us (to ensure a stable clock to the DDR2) - as per DDR2 spec. 3) Write a '1' to the INIT which will initiate a hardware initialization sequence. NOTE: After writing a '1', SW must wait 64K eclk cycles to ensure the HW init sequence has completed before writing to ANY of the DFA_DDR2* registers. NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t prtena : 1; /**< Enable DFA Memory When enabled, this bit lets N3K be the default driver for DFA-LLM memory port. */ #else uint64_t prtena : 1; uint64_t init : 1; uint64_t fprch : 1; uint64_t bprch : 1; uint64_t sil_lat : 2; uint64_t silo_hc : 1; uint64_t silo_qc : 1; uint64_t rnk_msk : 4; uint64_t tskw : 2; uint64_t reserved_14_15 : 2; uint64_t ref_int : 13; uint64_t reserved_29_31 : 3; uint64_t fpip : 3; uint64_t mrs_pgm : 1; uint64_t trfc : 5; uint64_t reserved_41_63 : 23; #endif } s; struct cvmx_dfa_ddr2_cfg_s cn31xx; } cvmx_dfa_ddr2_cfg_t; /** * cvmx_dfa_ddr2_comp * * DFA_DDR2_COMP = DFA DDR2 I/O PVT Compensation Configuration * * * Description: The following are registers to program the DDR2 PLL and DLL */ typedef union { uint64_t u64; struct cvmx_dfa_ddr2_comp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t dfa__pctl : 4; /**< DFA DDR pctl from compensation circuit Internal DBG only */ uint64_t dfa__nctl : 4; /**< DFA DDR nctl from compensation circuit Internal DBG only */ uint64_t reserved_9_55 : 47; uint64_t pctl_csr : 4; /**< Compensation control bits */ uint64_t nctl_csr : 4; /**< Compensation control bits */ uint64_t comp_bypass : 1; /**< Compensation Bypass */ #else uint64_t comp_bypass : 1; uint64_t nctl_csr : 4; uint64_t pctl_csr : 4; uint64_t reserved_9_55 : 47; uint64_t dfa__nctl : 4; uint64_t dfa__pctl : 4; #endif } s; struct cvmx_dfa_ddr2_comp_s cn31xx; } cvmx_dfa_ddr2_comp_t; /** * cvmx_dfa_ddr2_emrs * * DFA_DDR2_EMRS = DDR2 EMRS Register(s) EMRS1[14:0], EMRS1_OCD[14:0] * Description: This register contains the data driven onto the Address[14:0] lines during DDR INIT * To support Clamshelling (where N3K DFA_A[] pins are not 1:1 mapped to each clam(or rank), a HW init * sequence is allowed on a "per-rank" basis. Care must be taken in the values programmed into these * registers during the HW initialization sequence (see N3K specific restrictions in notes below). * DFA_DDR2_CFG[MRS_PGM] must be 1 to support this feature. * * Notes: * For DDR-II please consult your device's data sheet for further details: * */ typedef union { uint64_t u64; struct cvmx_dfa_ddr2_emrs_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t emrs1_ocd : 15; /**< Memory Address[14:0] during "EMRS1 (OCD Calibration)" step \#12a "EMRS OCD Default Command" A[9:7]=111 of DDR2 HW initialization sequence. (See JEDEC DDR2 specification (JESD79-2): Power Up and initialization sequence). A[14:13] = 0, RESERVED A[12] = 0, Output Buffers Enabled A[11] = 0, RDQS Disabled (we do not support RDQS) A[10] = 0, DQSn Enabled A[9:7] = 7, OCD Calibration Mode Default A[6] = 0, ODT Disabled A[5:3]=DFA_DDR2_TMG[ADDLAT] Additive LATENCY (Default 0) A[2]=0 Termination Res RTT (ODT off Default) [A6,A2] = 0 -> ODT Disabled 1 -> 75 ohm; 2 -> 150 ohm; 3 - Reserved A[1]=0 Normal Output Driver Imp mode (1 - weak ie., 60% of normal drive strength) A[0] = 0 DLL Enabled */ uint64_t reserved_15_15 : 1; uint64_t emrs1 : 15; /**< Memory Address[14:0] during: a) Step \#7 "EMRS1 to enable DLL (A[0]=0)" b) Step \#12b "EMRS OCD Calibration Mode Exit" steps of DDR2 HW initialization sequence. (See JEDEC DDR2 specification (JESD79-2): Power Up and initialization sequence). A[14:13] = 0, RESERVED A[12] = 0, Output Buffers Enabled A[11] = 0, RDQS Disabled (we do not support RDQS) A[10] = 0, DQSn Enabled A[9:7] = 0, OCD Calibration Mode exit/maintain A[6] = 0, ODT Disabled A[5:3]=DFA_DDR2_TMG[ADDLAT] Additive LATENCY (Default 0) A[2]=0 Termination Res RTT (ODT off Default) [A6,A2] = 0 -> ODT Disabled 1 -> 75 ohm; 2 -> 150 ohm; 3 - Reserved A[1]=0 Normal Output Driver Imp mode (1 - weak ie., 60% of normal drive strength) A[0] = 0 DLL Enabled */ #else uint64_t emrs1 : 15; uint64_t reserved_15_15 : 1; uint64_t emrs1_ocd : 15; uint64_t reserved_31_63 : 33; #endif } s; struct cvmx_dfa_ddr2_emrs_s cn31xx; } cvmx_dfa_ddr2_emrs_t; /** * cvmx_dfa_ddr2_fcnt * * DFA_DDR2_FCNT = DFA FCLK Counter * * * Description: This FCLK cycle counter gets going after memory has been initialized */ typedef union { uint64_t u64; struct cvmx_dfa_ddr2_fcnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_47_63 : 17; uint64_t fcyc_cnt : 47; /**< Counter counts FCLK cycles or \# cycles that the memory controller has requests queued up depending on FCNT_MODE If FCNT_MODE = 0, this counter counts the \# FCLK cycles If FCNT_MODE = 1, this counter counts the \# cycles the controller is active with memory requests. */ #else uint64_t fcyc_cnt : 47; uint64_t reserved_47_63 : 17; #endif } s; struct cvmx_dfa_ddr2_fcnt_s cn31xx; } cvmx_dfa_ddr2_fcnt_t; /** * cvmx_dfa_ddr2_mrs * * DFA_DDR2_MRS = DDR2 MRS Register(s) MRS_DLL[14:0], MRS[14:0] * Description: This register contains the data driven onto the Address[14:0] lines during DDR INIT * To support Clamshelling (where N3K DFA_A[] pins are not 1:1 mapped to each clam(or rank), a HW init * sequence is allowed on a "per-rank" basis. Care must be taken in the values programmed into these * registers during the HW initialization sequence (see N3K specific restrictions in notes below). * DFA_DDR2_CFG[MRS_PGM] must be 1 to support this feature. * * Notes: * For DDR-II please consult your device's data sheet for further details: * */ typedef union { uint64_t u64; struct cvmx_dfa_ddr2_mrs_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t mrs : 15; /**< Memory Address[14:0] during "MRS without resetting DLL A[8]=0" step of HW initialization sequence. (See JEDEC DDR2 specification (JESD79-2): Power Up and initialization sequence - Step \#11). A[14:13] = 0, RESERVED A[12] = 0, Fast Active Power Down Mode A[11:9] = DFA_DDR2_TMG[TWR] A[8] = 0, for DLL Reset A[7] =0 Test Mode (must be 0 for normal operation) A[6:4]=DFA_DDR2_TMG[CASLAT] CAS LATENCY (default 4) A[3]=0 Burst Type(must be 0:Sequential) A[2:0]=2 Burst Length=4(default) */ uint64_t reserved_15_15 : 1; uint64_t mrs_dll : 15; /**< Memory Address[14:0] during "MRS for DLL_RESET A[8]=1" step of HW initialization sequence. (See JEDEC DDR2 specification (JESD79-2): Power Up and initialization sequence - Step \#8). A[14:13] = 0, RESERVED A[12] = 0, Fast Active Power Down Mode A[11:9] = DFA_DDR2_TMG[TWR] A[8] = 1, for DLL Reset A[7] = 0 Test Mode (must be 0 for normal operation) A[6:4]=DFA_DDR2_TMG[CASLAT] CAS LATENCY (default 4) A[3] = 0 Burst Type(must be 0:Sequential) A[2:0] = 2 Burst Length=4(default) */ #else uint64_t mrs_dll : 15; uint64_t reserved_15_15 : 1; uint64_t mrs : 15; uint64_t reserved_31_63 : 33; #endif } s; struct cvmx_dfa_ddr2_mrs_s cn31xx; } cvmx_dfa_ddr2_mrs_t; /** * cvmx_dfa_ddr2_opt * * DFA_DDR2_OPT = DFA DDR2 Optimization Registers * * * Description: The following are registers to tweak certain parameters to boost performance */ typedef union { uint64_t u64; struct cvmx_dfa_ddr2_opt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t max_read_batch : 5; /**< Maximum number of consecutive read to service before allowing write to interrupt. */ uint64_t max_write_batch : 5; /**< Maximum number of consecutive writes to service before allowing reads to interrupt. */ #else uint64_t max_write_batch : 5; uint64_t max_read_batch : 5; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_dfa_ddr2_opt_s cn31xx; } cvmx_dfa_ddr2_opt_t; /** * cvmx_dfa_ddr2_pll * * DFA_DDR2_PLL = DFA DDR2 PLL and DLL Configuration * * * Description: The following are registers to program the DDR2 PLL and DLL */ typedef union { uint64_t u64; struct cvmx_dfa_ddr2_pll_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t pll_setting : 17; /**< Internal Debug Use Only */ uint64_t reserved_32_46 : 15; uint64_t setting90 : 5; /**< Contains the setting of DDR DLL; Internal DBG only */ uint64_t reserved_21_26 : 6; uint64_t dll_setting : 5; /**< Contains the open loop setting value for the DDR90 delay line. */ uint64_t dll_byp : 1; /**< DLL Bypass. When set, the DDR90 DLL is bypassed and the DLL behaves in Open Loop giving a fixed delay set by DLL_SETTING */ uint64_t qdll_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after erst deassertion will reset the DDR 90 DLL. Allow 200 micro seconds for Lock before DDR Init. */ uint64_t bw_ctl : 4; /**< Internal Use Only - for Debug */ uint64_t bw_upd : 1; /**< Internal Use Only - for Debug */ uint64_t pll_div2 : 1; /**< PLL Output is further divided by 2. Useful for slow fclk frequencies where the PLL may be out of range. */ uint64_t reserved_7_7 : 1; uint64_t pll_ratio : 5; /**< Bits <6:2> sets the clk multiplication ratio If the fclk frequency desired is less than 260MHz (lower end saturation point of the pll), write 2x the ratio desired in this register and set PLL_DIV2 */ uint64_t pll_bypass : 1; /**< PLL Bypass. Uses the ref_clk without multiplication. */ uint64_t pll_init : 1; /**< Need a 0 to 1 pulse on this CSR to get the DFA Clk Generator Started. Write this register before starting anything. Allow 200 uS for PLL Lock before doing anything. */ #else uint64_t pll_init : 1; uint64_t pll_bypass : 1; uint64_t pll_ratio : 5; uint64_t reserved_7_7 : 1; uint64_t pll_div2 : 1; uint64_t bw_upd : 1; uint64_t bw_ctl : 4; uint64_t qdll_ena : 1; uint64_t dll_byp : 1; uint64_t dll_setting : 5; uint64_t reserved_21_26 : 6; uint64_t setting90 : 5; uint64_t reserved_32_46 : 15; uint64_t pll_setting : 17; #endif } s; struct cvmx_dfa_ddr2_pll_s cn31xx; } cvmx_dfa_ddr2_pll_t; /** * cvmx_dfa_ddr2_tmg * * DFA_DDR2_TMG = DFA DDR2 Memory Timing Config Register * * * Description: The following are registers to program the DDR2 memory timing parameters. */ typedef union { uint64_t u64; struct cvmx_dfa_ddr2_tmg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_47_63 : 17; uint64_t fcnt_mode : 1; /**< If FCNT_MODE = 0, this counter counts the \# FCLK cycles If FCNT_MODE = 1, this counter counts the \# cycles the controller is active with memory requests. */ uint64_t cnt_clr : 1; /**< Clears the FCLK Cyc & Bus Util counter */ uint64_t cavmipo : 1; /**< RESERVED */ uint64_t ctr_rst : 1; /**< Reset oneshot pulse for refresh counter & Perf counters SW should first write this field to a one to clear & then write to a zero for normal operation */ uint64_t odt_rtt : 2; /**< DDR2 Termination Resistor Setting These two bits are loaded into the RTT portion of the EMRS register bits A6 & A2. If DDR2's termination (for the memory's DQ/DQS/DM pads) is not desired, set it to 00. If it is, chose between 01 for 75 ohm and 10 for 150 ohm termination. 00 = ODT Disabled 01 = 75 ohm Termination 10 = 150 ohm Termination 11 = 50 ohm Termination */ uint64_t dqsn_ena : 1; /**< For DDR-II Mode, DIC[1] is used to load into EMRS bit 10 - DQSN Enable/Disable field. By default, we program the DDR's to drive the DQSN also. Set it to 1 if DQSN should be Hi-Z. 0 - DQSN Enable 1 - DQSN Disable */ uint64_t dic : 1; /**< Drive Strength Control: For DDR-I/II Mode, DIC[0] is loaded into the Extended Mode Register (EMRS) A1 bit during initialization. (see DDR-I data sheet EMRS description) 0 = Normal 1 = Reduced */ uint64_t r2r_slot : 1; /**< A 1 on this register will force the controller to slot a bubble between every reads */ uint64_t tfaw : 5; /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1 Four Access Window time. Relevant only in 8-bank parts. TFAW = 5'b0 for DDR2-4bank TFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1 in DDR2-8bank */ uint64_t twtr : 4; /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)] Last Wr Data to Rd Command time. (Represented in fclk cycles) TYP=15ns - 0000: RESERVED - 0001: 1 - ... - 0111: 7 - 1000-1111: RESERVED */ uint64_t twr : 3; /**< DDR Write Recovery time (tWR). Last Wr Brst to Prech This is not a direct encoding of the value. Its programmed as below per DDR2 spec. The decimal number on the right is RNDUP(tWR(ns) / clkFreq) TYP=15ns - 000: RESERVED - 001: 2 - 010: 3 - 011: 4 - 100: 5 - 101: 6 - 110-111: RESERVED */ uint64_t trp : 4; /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)] (Represented in fclk cycles) TYP=15ns - 0000: RESERVED - 0001: 1 - ... - 0111: 7 - 1000-1111: RESERVED When using parts with 8 banks (DFA_CFG->MAX_BNK is 1), load tRP cycles + 1 into this register. */ uint64_t tras : 5; /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)] (Represented in fclk cycles) TYP=45ns - 00000-0001: RESERVED - 00010: 2 - ... - 10100: 20 - 10101-11111: RESERVED */ uint64_t trrd : 3; /**< tRRD cycles: ACT-ACT timing parameter for different banks. (Represented in fclk cycles) For DDR2, TYP=7.5ns - 000: RESERVED - 001: 1 tCYC - 010: 2 tCYC - 011: 3 tCYC - 100: 4 tCYC - 101: 5 tCYC - 110-111: RESERVED */ uint64_t trcd : 4; /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)] (Represented in fclk cycles) TYP=15ns - 0000: RESERVED - 0001: 2 (2 is the smallest value allowed) - 0002: 2 - ... - 0111: 7 - 1110-1111: RESERVED */ uint64_t addlat : 3; /**< When in Posted CAS mode ADDLAT needs to be programmed to tRCD-1 ADDLAT \#additional latency cycles 000 0 001 1 (tRCD = 2 fclk's) 010 2 (tRCD = 3 fclk's) 011 3 (tRCD = 4 fclk's) 100 4 (tRCD = 5 fclk's) 101 5 (tRCD = 6 fclk's) 110 6 (tRCD = 7 fclk's) 111 7 (tRCD = 8 fclk's) */ uint64_t pocas : 1; /**< Posted CAS mode. When 1, we use DDR2's Posted CAS feature. When using this mode, ADDLAT needs to be programmed as well */ uint64_t caslat : 3; /**< CAS Latency in \# fclk Cycles CASLAT \# CAS latency cycles 000 - 010 RESERVED 011 3 100 4 101 5 110 6 111 7 */ uint64_t tmrd : 2; /**< tMRD Cycles (Represented in fclk tCYC) For DDR2, its TYP 2*tCYC) - 000: RESERVED - 001: 1 - 010: 2 - 011: 3 */ uint64_t ddr2t : 1; /**< When 2T mode is turned on, command signals are setup a cycle ahead of when the CS is enabled and kept for a total of 2 cycles. This mode is enabled in higher speeds when there is difficulty meeting setup. Performance could be negatively affected in 2T mode */ #else uint64_t ddr2t : 1; uint64_t tmrd : 2; uint64_t caslat : 3; uint64_t pocas : 1; uint64_t addlat : 3; uint64_t trcd : 4; uint64_t trrd : 3; uint64_t tras : 5; uint64_t trp : 4; uint64_t twr : 3; uint64_t twtr : 4; uint64_t tfaw : 5; uint64_t r2r_slot : 1; uint64_t dic : 1; uint64_t dqsn_ena : 1; uint64_t odt_rtt : 2; uint64_t ctr_rst : 1; uint64_t cavmipo : 1; uint64_t cnt_clr : 1; uint64_t fcnt_mode : 1; uint64_t reserved_47_63 : 17; #endif } s; struct cvmx_dfa_ddr2_tmg_s cn31xx; } cvmx_dfa_ddr2_tmg_t; /** * cvmx_dfa_difctl * * DFA_DIFCTL = DFA Instruction FIFO (DIF) Control Register * * Description: * NOTE: To write to the DFA_DIFCTL register, a device would issue an IOBST directed at the DFA with addr[34:33]=2'b11. * To read the DFA_DIFCTL register, a device would issue an IOBLD64 directed at the DFA with addr[34:33]=2'b11. * * NOTE: This register is intended to ONLY be written once (at power-up). Any future writes could * cause the DFA and FPA HW to become unpredictable. * * NOTE: If DFA_CFG[DTECLKDIS]=1 (DFA-DTE clocks disabled), reads/writes to the DFA_DIFCTL register do not take effect. * NOTE: If FUSE[120]="DFA DTE disable" is blown, reads/writes to the DFA_DIFCTL register do not take effect. */ typedef union { uint64_t u64; struct cvmx_dfa_difctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t dwbcnt : 8; /**< Represents the \# of cache lines in the instruction buffer that may be dirty and should not be written-back to memory when the instruction chunk is returned to the Free Page list. NOTE: Typically SW will want to mark all DFA Instruction memory returned to the Free Page list as DWB (Don't WriteBack), therefore SW should seed this register as: DFA_DIFCTL[DWBCNT] = (DFA_DIFCTL[SIZE] + 4)/4 */ uint64_t pool : 3; /**< Represents the 3bit buffer pool-id used by DFA HW when the DFA instruction chunk is recycled back to the Free Page List maintained by the FPA HW (once the DFA instruction has been issued). */ uint64_t size : 9; /**< Represents the \# of 32B instructions contained within each DFA instruction chunk. At Power-on, SW will seed the SIZE register with a fixed chunk-size. (Must be at least 3) DFA HW uses this field to determine the size of each DFA instruction chunk, in order to: a) determine when to read the next DFA instruction chunk pointer which is written by SW at the end of the current DFA instruction chunk (see DFA description of next chunk buffer Ptr for format). b) determine when a DFA instruction chunk can be returned to the Free Page List maintained by the FPA HW. */ #else uint64_t size : 9; uint64_t pool : 3; uint64_t dwbcnt : 8; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_dfa_difctl_s cn31xx; struct cvmx_dfa_difctl_s cn38xx; struct cvmx_dfa_difctl_s cn38xxp2; struct cvmx_dfa_difctl_s cn58xx; struct cvmx_dfa_difctl_s cn58xxp1; } cvmx_dfa_difctl_t; /** * cvmx_dfa_difrdptr * * DFA_DIFRDPTR = DFA Instruction FIFO (DIF) RDPTR Register * * Description: * NOTE: To write to the DFA_DIFRDPTR register, a device would issue an IOBST directed at the DFA with addr[34:33]=2'b01. * To read the DFA_DIFRDPTR register, a device would issue an IOBLD64 directed at the DFA with addr[34:33]=2'b01. * * NOTE: If DFA_CFG[DTECLKDIS]=1 (DFA-DTE clocks disabled), reads/writes to the DFA_DIFRDPTR register do not take effect. * NOTE: If FUSE[120]="DFA DTE disable" is blown, reads/writes to the DFA_DIFRDPTR register do not take effect. */ typedef union { uint64_t u64; struct cvmx_dfa_difrdptr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t rdptr : 31; /**< Represents the 32B-aligned address of the current instruction in the DFA Instruction FIFO in main memory. The RDPTR must be seeded by software at boot time, and is then maintained thereafter by DFA HW. During the seed write (by SW), RDPTR[6:5]=0, since DFA instruction chunks must be 128B aligned. During a read (by SW), the 'most recent' contents of the RDPTR register are returned at the time the NCB-INB bus is driven. NOTE: Since DFA HW updates this register, its contents are unpredictable in SW (unless its guaranteed that no new DoorBell register writes have occurred and the DoorBell register is read as zero). */ uint64_t reserved_0_4 : 5; #else uint64_t reserved_0_4 : 5; uint64_t rdptr : 31; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_dfa_difrdptr_s cn31xx; struct cvmx_dfa_difrdptr_s cn38xx; struct cvmx_dfa_difrdptr_s cn38xxp2; struct cvmx_dfa_difrdptr_s cn58xx; struct cvmx_dfa_difrdptr_s cn58xxp1; } cvmx_dfa_difrdptr_t; /** * cvmx_dfa_eclkcfg * * Specify the RSL base addresses for the block * * DFA_ECLKCFG = DFA eclk-domain Configuration Registers * * Description: */ typedef union { uint64_t u64; struct cvmx_dfa_eclkcfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t sbdnum : 3; /**< SBD Debug Entry# For internal use only. (DFA Scoreboard debug) Selects which one of 8 DFA Scoreboard entries is latched into the DFA_SBD_DBG[0-3] registers. */ uint64_t reserved_15_15 : 1; uint64_t sbdlck : 1; /**< DFA Scoreboard LOCK Strobe For internal use only. (DFA Scoreboard debug) When written with a '1', the DFA Scoreboard Debug registers (DFA_SBD_DBG[0-3]) are all locked down. This allows SW to lock down the contents of the entire SBD for a single instant in time. All subsequent reads of the DFA scoreboard registers will return the data from that instant in time. */ uint64_t dcmode : 1; /**< DRF-CRQ/DTE Arbiter Mode DTE-DRF Arbiter (0=FP [LP=CRQ/HP=DTE],1=RR) NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t dtmode : 1; /**< DRF-DTE Arbiter Mode DTE-DRF Arbiter (0=FP [LP=DTE[15],...,HP=DTE[0]],1=RR) NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t pmode : 1; /**< NCB-NRP Arbiter Mode (0=Fixed Priority [LP=WQF,DFF,HP=RGF]/1=RR NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t qmode : 1; /**< NCB-NRQ Arbiter Mode (0=Fixed Priority [LP=IRF,RWF,PRF,HP=GRF]/1=RR NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t imode : 1; /**< NCB-Inbound Arbiter (0=FP [LP=NRQ,HP=NRP], 1=RR) NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t sarb : 1; /**< DFA Source Arbiter Mode Selects the arbitration mode used to select DFA requests issued from either CP2 or the DTE (NCB-CSR or DFA HW engine). - 0: Fixed Priority [Highest=CP2, Lowest=DTE] - 1: Round-Robin NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t reserved_3_7 : 5; uint64_t dteclkdis : 1; /**< DFA DTE Clock Disable When SET, the DFA clocks for DTE(thread engine) operation are disabled. NOTE: When SET, SW MUST NEVER issue ANY operations to the DFA via the NCB Bus. All DFA Operations must be issued solely through the CP2 interface. */ uint64_t maxbnk : 1; /**< Maximum Banks per-device (used by the address mapper when extracting address bits for the memory bank#. - 0: 4 banks/device - 1: 8 banks/device */ uint64_t dfa_frstn : 1; /**< Hold this 0 until the DFA DDR PLL and DLL lock and then write a 1. A 1 on this register deasserts the internal frst_n. Refer to DFA_DDR2_PLL registers for more startup information. Startup sequence if DFA interface needs to be ON: After valid power up, Write DFA_DDR2_PLL-> PLL_RATIO & PLL_DIV2 & PLL_BYPASS to the appropriate values Wait a few cycles Write a 1 DFA_DDR2_PLL -> PLL_INIT Wait 100 microseconds Write a 1 to DFA_DDR2_PLL -> QDLL_ENA Wait 10 microseconds Write a 1 to this register DFA_FRSTN to pull DFA out of reset Now the DFA block is ready to be initialized (follow the DDR init sequence). */ #else uint64_t dfa_frstn : 1; uint64_t maxbnk : 1; uint64_t dteclkdis : 1; uint64_t reserved_3_7 : 5; uint64_t sarb : 1; uint64_t imode : 1; uint64_t qmode : 1; uint64_t pmode : 1; uint64_t dtmode : 1; uint64_t dcmode : 1; uint64_t sbdlck : 1; uint64_t reserved_15_15 : 1; uint64_t sbdnum : 3; uint64_t reserved_19_63 : 45; #endif } s; struct cvmx_dfa_eclkcfg_s cn31xx; } cvmx_dfa_eclkcfg_t; /** * cvmx_dfa_err * * DFA_ERR = DFA ERROR Register * * Description: */ typedef union { uint64_t u64; struct cvmx_dfa_err_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_33_63 : 31; uint64_t dblina : 1; /**< Doorbell Overflow Interrupt Enable bit. When set, doorbell overflow conditions are reported. */ uint64_t dblovf : 1; /**< Doorbell Overflow detected - Status bit When set, the 20b accumulated doorbell register had overflowed (SW wrote too many doorbell requests). If the DBLINA had previously been enabled(set), an interrupt will be posted. Software can clear the interrupt by writing a 1 to this register bit. NOTE: Detection of a Doorbell Register overflow is a catastrophic error which may leave the DFA HW in an unrecoverable state. */ uint64_t cp2pina : 1; /**< CP2 LW Mode Parity Error Interrupt Enable bit. When set, all PP-generated LW Mode read transactions which encounter a parity error (across the 36b of data) are reported. */ uint64_t cp2perr : 1; /**< PP-CP2 Parity Error Detected - Status bit When set, a parity error had been detected for a PP-generated LW Mode read transaction. If the CP2PINA had previously been enabled(set), an interrupt will be posted. Software can clear the interrupt by writing a 1 to this register bit. See also: DFA_MEMFADR CSR which contains more data about the memory address/control to help isolate the failure. */ uint64_t cp2parena : 1; /**< CP2 LW Mode Parity Error Enable When set, all PP-generated LW Mode read transactions which encounter a parity error (across the 36b of data) are reported. NOTE: This signal must only be written to a different value when there are no PP-CP2 transactions (preferrably during power-on software initialization). */ uint64_t dtepina : 1; /**< DTE Parity Error Interrupt Enable bit (for 18b SIMPLE mode ONLY). When set, all DTE-generated 18b SIMPLE Mode read transactions which encounter a parity error (across the 17b of data) are reported. */ uint64_t dteperr : 1; /**< DTE Parity Error Detected (for 18b SIMPLE mode ONLY) When set, all DTE-generated 18b SIMPLE Mode read transactions which encounter a parity error (across the 17b of data) are reported. */ uint64_t dteparena : 1; /**< DTE Parity Error Enable (for 18b SIMPLE mode ONLY) When set, all DTE-generated 18b SIMPLE Mode read transactions which encounter a parity error (across the 17b of data) are reported. NOTE: This signal must only be written to a different value when there are no DFA thread engines active (preferrably during power-on). */ uint64_t dtesyn : 7; /**< DTE 29b ECC Failing 6bit Syndrome When DTESBE or DTEDBE are set, this field contains the failing 7b ECC syndrome. */ uint64_t dtedbina : 1; /**< DTE 29b Double Bit Error Interrupt Enable bit When set, an interrupt is posted for any DTE-generated 36b SIMPLE Mode read which encounters a double bit error. */ uint64_t dtesbina : 1; /**< DTE 29b Single Bit Error Interrupt Enable bit When set, an interrupt is posted for any DTE-generated 36b SIMPLE Mode read which encounters a single bit error (which is also corrected). */ uint64_t dtedbe : 1; /**< DTE 29b Double Bit Error Detected - Status bit When set, a double bit error had been detected for a DTE-generated 36b SIMPLE Mode read transaction. The DTESYN contains the failing syndrome. If the DTEDBINA had previously been enabled(set), an interrupt will be posted. Software can clear the interrupt by writing a 1 to this register bit. See also: DFA_MEMFADR CSR which contains more data about the memory address/control to help isolate the failure. NOTE: DTE-generated 18b SIMPLE Mode Read transactions do not participate in ECC check/correct). */ uint64_t dtesbe : 1; /**< DTE 29b Single Bit Error Corrected - Status bit When set, a single bit error had been detected and corrected for a DTE-generated 36b SIMPLE Mode read transaction. If the DTEDBE=0, then the DTESYN contains the failing syndrome (used during correction). NOTE: DTE-generated 18b SIMPLE Mode Read transactions do not participate in ECC check/correct). If the DTESBINA had previously been enabled(set), an interrupt will be posted. Software can clear the interrupt by writing a 1 to this register bit. See also: DFA_MEMFADR CSR which contains more data about the memory address/control to help isolate the failure. */ uint64_t dteeccena : 1; /**< DTE 29b ECC Enable (for 36b SIMPLE mode ONLY) When set, 29b ECC is enabled on all DTE-generated 36b SIMPLE Mode read transactions. NOTE: This signal must only be written to a different value when there are no DFA thread engines active (preferrably during power-on software initialization). */ uint64_t cp2syn : 8; /**< PP-CP2 QW ECC Failing 8bit Syndrome When CP2SBE or CP2DBE are set, this field contains the failing ECC 8b syndrome. Refer to CP2ECCENA. */ uint64_t cp2dbina : 1; /**< PP-CP2 Double Bit Error Interrupt Enable bit When set, an interrupt is posted for any PP-generated QW Mode read which encounters a double bit error. Refer to CP2DBE. */ uint64_t cp2sbina : 1; /**< PP-CP2 Single Bit Error Interrupt Enable bit When set, an interrupt is posted for any PP-generated QW Mode read which encounters a single bit error (which is also corrected). Refer to CP2SBE. */ uint64_t cp2dbe : 1; /**< PP-CP2 Double Bit Error Detected - Status bit When set, a double bit error had been detected for a PP-generated QW Mode read transaction. The CP2SYN contains the failing syndrome. NOTE: PP-generated LW Mode Read transactions do not participate in ECC check/correct). Refer to CP2ECCENA. If the CP2DBINA had previously been enabled(set), an interrupt will be posted. Software can clear the interrupt by writing a 1 to this register bit. See also: DFA_MEMFADR CSR which contains more data about the memory address/control to help isolate the failure. */ uint64_t cp2sbe : 1; /**< PP-CP2 Single Bit Error Corrected - Status bit When set, a single bit error had been detected and corrected for a PP-generated QW Mode read transaction. If the CP2DBE=0, then the CP2SYN contains the failing syndrome (used during correction). Refer to CP2ECCENA. If the CP2SBINA had previously been enabled(set), an interrupt will be posted. Software can clear the interrupt by writing a 1 to this register bit. See also: DFA_MEMFADR CSR which contains more data about the memory address/control to help isolate the failure. NOTE: PP-generated LW Mode Read transactions do not participate in ECC check/correct). */ uint64_t cp2eccena : 1; /**< PP-CP2 QW ECC Enable (for QW Mode transactions) When set, 8bit QW ECC is enabled on all PP-generated QW Mode read transactions, CP2SBE and CP2DBE may be set, and CP2SYN may be filled. NOTE: This signal must only be written to a different value when there are no PP-CP2 transactions (preferrably during power-on software initialization). NOTE: QW refers to a 64-bit LLM Load/Store (intiated by a processor core). LW refers to a 36-bit load/store. */ #else uint64_t cp2eccena : 1; uint64_t cp2sbe : 1; uint64_t cp2dbe : 1; uint64_t cp2sbina : 1; uint64_t cp2dbina : 1; uint64_t cp2syn : 8; uint64_t dteeccena : 1; uint64_t dtesbe : 1; uint64_t dtedbe : 1; uint64_t dtesbina : 1; uint64_t dtedbina : 1; uint64_t dtesyn : 7; uint64_t dteparena : 1; uint64_t dteperr : 1; uint64_t dtepina : 1; uint64_t cp2parena : 1; uint64_t cp2perr : 1; uint64_t cp2pina : 1; uint64_t dblovf : 1; uint64_t dblina : 1; uint64_t reserved_33_63 : 31; #endif } s; struct cvmx_dfa_err_s cn31xx; struct cvmx_dfa_err_s cn38xx; struct cvmx_dfa_err_s cn38xxp2; struct cvmx_dfa_err_s cn58xx; struct cvmx_dfa_err_s cn58xxp1; } cvmx_dfa_err_t; /** * cvmx_dfa_memcfg0 * * DFA_MEMCFG0 = DFA Memory Configuration * * Description: */ typedef union { uint64_t u64; struct cvmx_dfa_memcfg0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t rldqck90_rst : 1; /**< RLDCK90 and RLDQK90 DLL SW Reset When written with a '1' the RLDCK90 and RLDQK90 DLL are in soft-reset. */ uint64_t rldck_rst : 1; /**< RLDCK Zero Delay DLL(Clock Generator) SW Reset When written with a '1' the RLDCK zero delay DLL is in soft-reset. */ uint64_t clkdiv : 2; /**< RLDCLK Divisor Select - 0: RLDx_CK_H/L = Core Clock /2 - 1: RESERVED (must not be used) - 2: RLDx_CK_H/L = Core Clock /3 - 3: RLDx_CK_H/L = Core Clock /4 The DFA LLM interface(s) are tied to the core clock frequency through this programmable clock divisor. Examples: Core Clock(MHz) | DFA-LLM Clock(MHz) | CLKDIV -----------------+--------------------+-------- 800 | 400/(800-DDR) | /2 1000 | 333/(666-DDR) | /3 800 | 200/(400-DDR) | /4 NOTE: This value MUST BE programmed BEFORE doing a Hardware init sequence (see: DFA_MEMCFG0[INIT_Px] bits). *** NOTE: O9N PASS1 Addition */ uint64_t lpp_ena : 1; /**< PP Linear Port Addressing Mode Enable When enabled, PP-core LLM accesses to the lower-512MB LLM address space are sent to the single DFA port which is enabled. NOTE: If LPP_ENA=1, only one DFA RLDRAM port may be enabled for RLDRAM accesses (ie: ENA_P0 and ENA_P1 CAN NEVER BOTH be set). PP-core LLM accesses to the upper-512MB LLM address space are sent to the other 'disabled' DFA port. SW RESTRICTION: If LPP_ENA=1, then only one DFA port may be enabled for RLDRAM accesses (ie: ENA_P0 and ENA_P1 CAN NEVER BOTH be set). NOTE: This bit is used to allow PP-Core LLM accesses to a disabled port, such that each port can be sequentially addressed (ie: disable LW address interleaving). Enabling this bit allows BOTH PORTs to be active and sequentially addressable. The single port that is enabled(ENA_Px) will respond to the low-512MB LLM address space, and the other 'disabled' port will respond to the high-512MB LLM address space. Example usage: - DFA RLD0 pins used for TCAM-FPGA(CP2 accesses) - DFA RLD1 pins used for RLDRAM (DTE/CP2 accesses). USAGE NOTE: If LPP_ENA=1 and SW DOES NOT initialize the disabled port (ie: INIT_Px=0->1), then refreshes and the HW init sequence WILL NOT occur for the disabled port. If LPP_ENA=1 and SW does initialize the disabled port (INIT_Px=0->1 with ENA_Px=0), then refreshes and the HW init sequence WILL occur to the disabled port. */ uint64_t bunk_init : 2; /**< Controls the CS_N[1:0] during a) a HW Initialization sequence (triggered by DFA_MEMCFG0[INIT_Px]) or b) during a normal refresh sequence. If the BNK_INIT[x]=1, the corresponding CS_N[x] is driven. NOTE: This is required for DRAM used in a clamshell configuration, since the address lines carry Mode Register write data that is unique per bunk(or clam). In a clamshell configuration, The N3K A[x] pin may be tied into Clam#0's A[x] and also into Clam#1's 'mirrored' address bit A[y] (eg: Clam0 sees A[5] and Clam1 sees A[15]). To support clamshell designs, SW must initiate two separate HW init sequences for the two bunks (or clams) . Before each HW init sequence is triggered, SW must preload the DFA_MEMRLD[22:0] with the data that will be driven onto the A[22:0] wires during an MRS mode register write. NOTE: After the final HW initialization sequence has been triggered, SW must wait 64K eclks before writing the BUNK_INIT[1:0] field = 3'b11 (so that CS_N[1:0] is driven during refresh sequences in normal operation. NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t init_p0 : 1; /**< When a '1' is written (and the previous value was '0'), the HW init sequence(s) for Memory Port \#0 is initiated. NOTE: To initialize memory, SW must: 1) Set up the DFA_MEMCFG0[CLKDIV] ratio for intended RLDRAM operation. [legal values 0: DIV2 2: DIV3 3: DIV4] 2) Write a '1' into BOTH the DFA_MEM_CFG0[RLDCK_RST] and DFA_MEM_CFG0[RLDQCK90_RST] field at the SAME TIME. This step puts all three DLLs in SW reset (RLDCK, RLDCK90, RLDQK90 DLLs). 3) Write a '0' into the DFA_MEM_CFG0[RLDCK_RST] field. This step takes the RLDCK DLL out of soft-reset so that the DLL can generate the RLDx_CK_H/L clock pins. 4) Wait 1ms (for RLDCK DLL to achieve lock) 5) Write a '0' into DFA_MEM_CFG0[RLDQCK90_RST] field. This step takes the RLDCK90 DLL AND RLDQK90 DLL out of soft-reset. 6) Wait 1ms (for RLDCK90/RLDQK90 DLLs to achieve lock) 7) Enable memory port(s): ENA_P0=1/ENA_P1=1 8) Wait 100us (to ensure a stable clock to the RLDRAMs) - as per RLDRAM spec. - - - - - Hardware Initialization Sequence - - - - - 9) Setup the DFA_MEMCFG0[BUNK_INIT] for the bunk(s) intended to be initialized. 10) Write a '1' to the corresponding INIT_Px which will initiate a hardware initialization sequence to that'specific' port. 11) Wait (DFA_MEMCFG0[CLKDIV] * 32K) eclk cycles. [to ensure the HW init sequence has completed before writing to ANY of the DFA_MEM* registers] - - - - - Hardware Initialization Sequence - - - - - 12) Write the DFA_MEMCFG0[BUNK_INIT]=3 to enable refreshes to BOTH bunks. NOTE: In some cases (where the address wires are routed differently between the front and back 'bunks'), SW will need to use DFA_MEMCFG0[BUNK_INIT] bits to control the Hardware initialization sequence for a 'specific bunk'. In these cases, SW would setup the BUNK_INIT and repeat Steps \#9-11 for each bunk/port. NOTE: This should only be written to a different value during power-on SW initialization. NOTE: DFA Memory Port#0 corresponds to the Octeon RLD0_* pins. */ uint64_t init_p1 : 1; /**< When a '1' is written (and the previous value was '0'), the HW init sequence(s) for Memory Port \#1 is initiated. NOTE: To initialize memory, SW must: 1) Set up the DFA_MEMCFG0[CLKDIV] ratio for intended RLDRAM operation. [legal values 0: DIV2 2: DIV3 3: DIV4] 2) Write a '1' into BOTH the DFA_MEM_CFG0[RLDCK_RST] and DFA_MEM_CFG0[RLDQCK90_RST] field at the SAME TIME. This step puts all three DLLs in SW reset (RLDCK, RLDCK90, RLDQK90 DLLs). 3) Write a '0' into the DFA_MEM_CFG0[RLDCK_RST] field. This step takes the RLDCK DLL out of soft-reset so that the DLL can generate the RLDx_CK_H/L clock pins. 4) Wait 1ms (for RLDCK DLL to achieve lock) 5) Write a '0' into DFA_MEM_CFG0[RLDQCK90_RST] field. This step takes the RLDCK90 DLL AND RLDQK90 DLL out of soft-reset. 6) Wait 1ms (for RLDCK90/RLDQK90 DLLs to achieve lock) 7) Enable memory port(s) ENA_P0=1/ENA_P1=1 8) Wait 100us (to ensure a stable clock to the RLDRAMs) - as per RLDRAM spec. - - - - - Hardware Initialization Sequence - - - - - 9) Setup the DFA_MEMCFG0[BUNK_INIT] for the bunk(s) intended to be initialized. 10) Write a '1' to the corresponding INIT_Px which will initiate a hardware initialization sequence to that'specific' port. 11) Wait (DFA_MEMCFG0[CLKDIV] * 32K) eclk cycles. [to ensure the HW init sequence has completed before writing to ANY of the DFA_MEM* registers] - - - - - Hardware Initialization Sequence - - - - - 12) Write the DFA_MEMCFG0[BUNK_INIT]=3 to enable refreshes to BOTH bunks. NOTE: In some cases (where the address wires are routed differently between the front and back 'bunks'), SW will need to use DFA_MEMCFG0[BUNK_INIT] bits to control the Hardware initialization sequence for a 'specific bunk'. In these cases, SW would setup the BUNK_INIT and repeat Steps \#9-11 for each bunk/port. NOTE: This should only be written to a different value during power-on SW initialization. NOTE: DFA Memory Port#1 corresponds to the Octeon RLD1_* pins. */ uint64_t r2r_pbunk : 1; /**< When enabled, an additional command bubble is inserted if back to back reads are issued to different physical bunks. This is to avoid DQ data bus collisions when references cross between physical bunks. [NOTE: the physical bunk address boundary is determined by the PBUNK bit]. NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t pbunk : 3; /**< Physical Bunk address bit pointer. Specifies which address bit within the Longword Memory address MA[23:0] is used to determine the chip selects. [RLD_CS0_N corresponds to physical bunk \#0, and RLD_CS1_N corresponds to physical bunk \#1]. - 000: CS0_N = MA[19]/CS1_N = !MA[19] - 001: CS0_N = MA[20]/CS1_N = !MA[20] - 010: CS0_N = MA[21]/CS1_N = !MA[21] - 011: CS0_N = MA[22]/CS1_N = !MA[22] - 100: CS0_N = MA[23]/CS1_N = !MA[23] - 101-111: CS0_N = 0 /CS1_N = 1 Example(s): To build out a 128MB DFA memory, 4x 32Mx9 parts could be used to fill out TWO physical bunks (clamshell configuration). Each (of the two) physical bunks contains 2x 32Mx9 = 16Mx36. Each RLDRAM device also contains 8 internal banks, therefore the memory Address is 16M/8banks = 2M addresses/bunk (2^21). In this case, MA[21] would select the physical bunk. NOTE: This should only be written to a different value during power-on SW initialization. be used to determine the Chip Select(s). */ uint64_t blen : 1; /**< Device Burst Length (0=2-burst/1=4-burst) NOTE: RLDRAM-II MUST USE BLEN=0(2-burst) */ uint64_t bprch : 2; /**< Tristate Enable (back porch) (\#dclks) On reads, allows user to control the shape of the tristate disable back porch for the DQ data bus. This parameter is also very dependent on the RW_DLY and WR_DLY parameters and care must be taken when programming these parameters to avoid data bus contention. Valid range [0..2] NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t fprch : 2; /**< Tristate Enable (front porch) (\#dclks) On reads, allows user to control the shape of the tristate disable front porch for the DQ data bus. This parameter is also very dependent on the RW_DLY and WR_DLY parameters and care must be taken when programming these parameters to avoid data bus contention. Valid range [0..2] NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t wr_dly : 4; /**< Write->Read CMD Delay (\#mclks): Determines \#mclk cycles to insert when controller switches from write to read. This allows programmer to control the data bus contention. For RLDRAM-II(BL2): (TBL=1) WR_DLY = ROUND_UP[((TWL+TBL)*2 - TSKW + FPRCH) / 2] - TRL + 1 NOTE: This should only be written to a different value during power-on SW initialization. NOTE: For aggressive(performance optimal) designs, the WR_DLY 'may' be tuned down(-1) if bus fight on W->R transitions is not pronounced. */ uint64_t rw_dly : 4; /**< Read->Write CMD Delay (\#mclks): Determines \#mclk cycles to insert when controller switches from read to write. This allows programmer to control the data bus contention. For RLDRAM-II(BL2): (TBL=1) RW_DLY = ROUND_UP[((TRL+TBL)*2 + TSKW + BPRCH+2)/2] - TWL + 1 NOTE: This should only be written to a different value during power-on SW initialization. NOTE: For aggressive(performance optimal) designs, the RW_DLY 'may' be tuned down(-1) if bus fight on R->W transitions is not pronounced. */ uint64_t sil_lat : 2; /**< Silo Latency (\#dclks): On reads, determines how many additional dclks to wait (on top of tRL+1) before pulling data out of the padring silos used for time domain boundary crossing. NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t mtype : 1; /**< FCRAM-II Memory Type *** O9N UNSUPPORTED *** */ uint64_t reserved_2_2 : 1; uint64_t ena_p0 : 1; /**< Enable DFA RLDRAM Port#0 When enabled, this bit lets N3K be the default driver for memory port \#0. NOTE: a customer is at liberty to enable either Port#0 or Port#1 or both. NOTE: Once a port has been disabled, it MUST NEVER be re-enabled. [the only way to enable a port is through a chip reset]. NOTE: DFA Memory Port#0 corresponds to the Octeon RLD0_* pins. */ uint64_t ena_p1 : 1; /**< Enable DFA RLDRAM Port#1 When enabled, this bit lets N3K be the default driver for memory port \#1. NOTE: a customer is at liberty to enable either Port#0 or Port#1 or both. NOTE: Once a port has been disabled, it MUST NEVER be re-enabled. [the only way to enable a port is through a chip reset]. NOTE: DFA Memory Port#1 corresponds to the Octeon RLD1_* pins. */ #else uint64_t ena_p1 : 1; uint64_t ena_p0 : 1; uint64_t reserved_2_2 : 1; uint64_t mtype : 1; uint64_t sil_lat : 2; uint64_t rw_dly : 4; uint64_t wr_dly : 4; uint64_t fprch : 2; uint64_t bprch : 2; uint64_t blen : 1; uint64_t pbunk : 3; uint64_t r2r_pbunk : 1; uint64_t init_p1 : 1; uint64_t init_p0 : 1; uint64_t bunk_init : 2; uint64_t lpp_ena : 1; uint64_t clkdiv : 2; uint64_t rldck_rst : 1; uint64_t rldqck90_rst : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_dfa_memcfg0_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t lpp_ena : 1; /**< PP Linear Port Addressing Mode Enable When enabled, PP-core LLM accesses to the lower-512MB LLM address space are sent to the single DFA port which is enabled. NOTE: If LPP_ENA=1, only one DFA RLDRAM port may be enabled for RLDRAM accesses (ie: ENA_P0 and ENA_P1 CAN NEVER BOTH be set). PP-core LLM accesses to the upper-512MB LLM address space are sent to the other 'disabled' DFA port. SW RESTRICTION: If LPP_ENA=1, then only one DFA port may be enabled for RLDRAM accesses (ie: ENA_P0 and ENA_P1 CAN NEVER BOTH be set). NOTE: This bit is used to allow PP-Core LLM accesses to a disabled port, such that each port can be sequentially addressed (ie: disable LW address interleaving). Enabling this bit allows BOTH PORTs to be active and sequentially addressable. The single port that is enabled(ENA_Px) will respond to the low-512MB LLM address space, and the other 'disabled' port will respond to the high-512MB LLM address space. Example usage: - DFA RLD0 pins used for TCAM-FPGA(CP2 accesses) - DFA RLD1 pins used for RLDRAM (DTE/CP2 accesses). USAGE NOTE: If LPP_ENA=1 and SW DOES NOT initialize the disabled port (ie: INIT_Px=0->1), then refreshes and the HW init sequence WILL NOT occur for the disabled port. If LPP_ENA=1 and SW does initialize the disabled port (INIT_Px=0->1 with ENA_Px=0), then refreshes and the HW init sequence WILL occur to the disabled port. */ uint64_t bunk_init : 2; /**< Controls the CS_N[1:0] during a) a HW Initialization sequence (triggered by DFA_MEMCFG0[INIT_Px]) or b) during a normal refresh sequence. If the BNK_INIT[x]=1, the corresponding CS_N[x] is driven. NOTE: This is required for DRAM used in a clamshell configuration, since the address lines carry Mode Register write data that is unique per bunk(or clam). In a clamshell configuration, The N3K A[x] pin may be tied into Clam#0's A[x] and also into Clam#1's 'mirrored' address bit A[y] (eg: Clam0 sees A[5] and Clam1 sees A[15]). To support clamshell designs, SW must initiate two separate HW init sequences for the two bunks (or clams) . Before each HW init sequence is triggered, SW must preload the DFA_MEMRLD[22:0] with the data that will be driven onto the A[22:0] wires during an MRS mode register write. NOTE: After the final HW initialization sequence has been triggered, SW must wait 64K eclks before writing the BUNK_INIT[1:0] field = 3'b11 (so that CS_N[1:0] is driven during refresh sequences in normal operation. NOTE: This should only be written to a different value during power-on SW initialization. NOTE: For MTYPE=1(FCRAM) Mode, each bunk MUST BE initialized independently. In other words, a HW init must be done for Bunk#0, and then another HW init must be done for Bunk#1 at power-on. */ uint64_t init_p0 : 1; /**< When a '1' is written (and the previous value was '0'), the HW init sequence(s) for Memory Port \#0 is initiated. NOTE: To initialize memory, SW must: 1) Enable memory port(s): a) ENA_P1=1 (single port in pass 1) OR b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1) 2) Wait 100us (to ensure a stable clock to the RLDRAMs) - as per RLDRAM spec. 3) Write a '1' to the corresponding INIT_Px which will initiate a hardware initialization sequence. NOTE: After writing a '1', SW must wait 64K eclk cycles to ensure the HW init sequence has completed before writing to ANY of the DFA_MEM* registers. NOTE: This should only be written to a different value during power-on SW initialization. NOTE: DFA Memory Port#0 corresponds to the Octeon RLD0_* pins. */ uint64_t init_p1 : 1; /**< When a '1' is written (and the previous value was '0'), the HW init sequence(s) for Memory Port \#1 is initiated. NOTE: To initialize memory, SW must: 1) Enable memory port(s): a) ENA_P1=1 (single port in pass 1) OR b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1) 2) Wait 100us (to ensure a stable clock to the RLDRAMs) - as per RLDRAM spec. 3) Write a '1' to the corresponding INIT_Px which will initiate a hardware initialization sequence. NOTE: After writing a '1', SW must wait 64K eclk cycles to ensure the HW init sequence has completed before writing to ANY of the DFA_MEM* registers. NOTE: This should only be written to a different value during power-on SW initialization. NOTE: DFA Memory Port#1 corresponds to the Octeon RLD1_* pins. */ uint64_t r2r_pbunk : 1; /**< When enabled, an additional command bubble is inserted if back to back reads are issued to different physical bunks. This is to avoid DQ data bus collisions when references cross between physical bunks. [NOTE: the physical bunk address boundary is determined by the PBUNK bit]. NOTE: This should only be written to a different value during power-on SW initialization. When MTYPE=1(FCRAM)/BLEN=0(2-burst), R2R_PBUNK SHOULD BE ZERO(for optimal performance). However, if electrically, DQ-sharing becomes a power/heat issue, then R2R_PBUNK should be set (but at a cost to performance (1/2 BW). */ uint64_t pbunk : 3; /**< Physical Bunk address bit pointer. Specifies which address bit within the Longword Memory address MA[23:0] is used to determine the chip selects. [RLD_CS0_N corresponds to physical bunk \#0, and RLD_CS1_N corresponds to physical bunk \#1]. - 000: CS0_N = MA[19]/CS1_N = !MA[19] - 001: CS0_N = MA[20]/CS1_N = !MA[20] - 010: CS0_N = MA[21]/CS1_N = !MA[21] - 011: CS0_N = MA[22]/CS1_N = !MA[22] - 100: CS0_N = MA[23]/CS1_N = !MA[23] - 101-111: CS0_N = 0 /CS1_N = 1 Example(s): To build out a 128MB DFA memory, 4x 32Mx9 parts could be used to fill out TWO physical bunks (clamshell configuration). Each (of the two) physical bunks contains 2x 32Mx9 = 16Mx36. Each RLDRAM device also contains 8 internal banks, therefore the memory Address is 16M/8banks = 2M addresses/bunk (2^21). In this case, MA[21] would select the physical bunk. NOTE: This should only be written to a different value during power-on SW initialization. be used to determine the Chip Select(s). NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), a "Redundant Bunk" scheme is employed to provide the highest overall performance (1 Req/ MCLK cycle). In this mode, it's imperative that SW set the PBUNK field +1 'above' the highest address bit. (such that the PBUNK extracted from the address will always be zero). In this mode, the CS_N[1:0] pins are driven to each redundant bunk based on a TDM scheme: [MCLK-EVEN=Bunk#0/MCLK-ODD=Bunk#1]. */ uint64_t blen : 1; /**< Device Burst Length (0=2-burst/1=4-burst) When BLEN=0(BL2), all QW reads/writes from CP2 are decomposed into 2 separate BL2(LW) requests to the Low-Latency memory. When BLEN=1(BL4), a LW request (from CP2 or NCB) is treated as 1 BL4(QW) request to the low latency memory. NOTE: QW refers to a 64-bit LLM Load/Store (intiated by a processor core). LW refers to a 36-bit load/store. NOTE: This should only be written to a different value during power-on SW initialization before the DFA LLM (low latency memory) is used. NOTE: MTYPE=0(RLDRAM-II) MUST USE BLEN=0(2-burst) NOTE: MTYPE=1(FCRAM)/BLEN=0(BL2) requires a multi-bunk(clam) board design. NOTE: If MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=1(BL4), SW SHOULD use CP2 QW read/write requests (for optimal low-latency bus performance). [LW length read/write requests(in BL4 mode) use 50% of the available bus bandwidth] NOTE: MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=0(BL2) can only be used with FCRAM-II devices which support BL2 mode (see: Toshiba FCRAM-II, where DQ tristate after 2 data transfers). NOTE: MTYPE=1(FCRAM)/FCRAM2P=1(II+) does not support LW write requests (FCRAM-II+ device specification has removed the variable write mask function from the devices). As such, if this mode is used, SW must be careful to issue only PP-CP2 QW write requests. */ uint64_t bprch : 2; /**< Tristate Enable (back porch) (\#dclks) On reads, allows user to control the shape of the tristate disable back porch for the DQ data bus. This parameter is also very dependent on the RW_DLY and WR_DLY parameters and care must be taken when programming these parameters to avoid data bus contention. Valid range [0..2] NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t fprch : 2; /**< Tristate Enable (front porch) (\#dclks) On reads, allows user to control the shape of the tristate disable front porch for the DQ data bus. This parameter is also very dependent on the RW_DLY and WR_DLY parameters and care must be taken when programming these parameters to avoid data bus contention. Valid range [0..2] NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t wr_dly : 4; /**< Write->Read CMD Delay (\#mclks): Determines \#mclk cycles to insert when controller switches from write to read. This allows programmer to control the data bus contention. For RLDRAM-II(BL2): (TBL=1) For FCRAM-II (BL4): (TBL=2) For FCRAM-II (BL2 grepl=1x ONLY): (TBL=1) For FCRAM-II (BL2 grepl>=2x): (TBL=3) NOTE: When MTYTPE=1(FCRAM-II) BLEN=0(BL2 Mode), grepl>=2x, writes require redundant bunk writes which require an additional 2 cycles before slotting the next read. WR_DLY = ROUND_UP[((TWL+TBL)*2 - TSKW + FPRCH) / 2] - TRL + 1 NOTE: This should only be written to a different value during power-on SW initialization. NOTE: For aggressive(performance optimal) designs, the WR_DLY 'may' be tuned down(-1) if bus fight on W->R transitions is not pronounced. */ uint64_t rw_dly : 4; /**< Read->Write CMD Delay (\#mclks): Determines \#mclk cycles to insert when controller switches from read to write. This allows programmer to control the data bus contention. For RLDRAM-II/FCRAM-II (BL2): (TBL=1) For FCRAM-II (BL4): (TBL=2) RW_DLY = ROUND_UP[((TRL+TBL)*2 + TSKW + BPRCH+2)/2] - TWL + 1 NOTE: This should only be written to a different value during power-on SW initialization. NOTE: For aggressive(performance optimal) designs, the RW_DLY 'may' be tuned down(-1) if bus fight on R->W transitions is not pronounced. */ uint64_t sil_lat : 2; /**< Silo Latency (\#dclks): On reads, determines how many additional dclks to wait (on top of tRL+1) before pulling data out of the padring silos used for time domain boundary crossing. NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t mtype : 1; /**< Memory Type (0=RLDRAM-II/1=Network DRAM-II/FCRAM) NOTE: N3K-P1 only supports RLDRAM-II NOTE: This should only be written to a different value during power-on SW initialization. NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), only the "unidirectional DS/QS" mode is supported. (see FCRAM data sheet EMRS[A6:A5]=SS(Strobe Select) register definition. [in FCRAM 2-burst mode, we use FCRAM in a clamshell configuration such that clam0 is addressed independently of clam1, and DQ is shared for optimal performance. As such it's imperative that the QS are conditionally received (and are NOT free-running), as the N3K receive data capture silos OR the clam0/1 QS strobes. NOTE: If this bit is SET, the ASX0/1 ASX_RLD_FCRAM_MODE[MODE] bit(s) should also be SET in order for the RLD0/1-PHY(s) to support FCRAM devices. */ uint64_t reserved_2_2 : 1; uint64_t ena_p0 : 1; /**< Enable DFA RLDRAM Port#0 When enabled, this bit lets N3K be the default driver for memory port \#0. NOTE: For N3K-P1, to enable Port#0(2nd port), Port#1 MUST ALSO be enabled. NOTE: For N3K-P2, single port mode, a customer is at liberty to enable either Port#0 or Port#1. NOTE: Once a port has been disabled, it MUST NEVER be re-enabled. [the only way to enable a port is through a chip reset]. NOTE: DFA Memory Port#0 corresponds to the Octeon RLD0_* pins. */ uint64_t ena_p1 : 1; /**< Enable DFA RLDRAM Port#1 When enabled, this bit lets N3K be the default driver for memory port \#1. NOTE: For N3K-P1, If the customer wishes to use a single port, s/he must enable Port#1 (and not Port#0). NOTE: For N3K-P2, single port mode, a customer is at liberty to enable either Port#0 or Port#1. NOTE: Once a port has been disabled, it MUST NEVER be re-enabled. [the only way to enable a port is through a chip reset]. NOTE: DFA Memory Port#1 corresponds to the Octeon RLD1_* pins. */ #else uint64_t ena_p1 : 1; uint64_t ena_p0 : 1; uint64_t reserved_2_2 : 1; uint64_t mtype : 1; uint64_t sil_lat : 2; uint64_t rw_dly : 4; uint64_t wr_dly : 4; uint64_t fprch : 2; uint64_t bprch : 2; uint64_t blen : 1; uint64_t pbunk : 3; uint64_t r2r_pbunk : 1; uint64_t init_p1 : 1; uint64_t init_p0 : 1; uint64_t bunk_init : 2; uint64_t lpp_ena : 1; uint64_t reserved_28_63 : 36; #endif } cn38xx; struct cvmx_dfa_memcfg0_cn38xxp2 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_27_63 : 37; uint64_t bunk_init : 2; /**< Controls the CS_N[1:0] during a) a HW Initialization sequence (triggered by DFA_MEMCFG0[INIT_Px]) or b) during a normal refresh sequence. If the BNK_INIT[x]=1, the corresponding CS_N[x] is driven. NOTE: This is required for DRAM used in a clamshell configuration, since the address lines carry Mode Register write data that is unique per bunk(or clam). In a clamshell configuration, The N3K A[x] pin may be tied into Clam#0's A[x] and also into Clam#1's 'mirrored' address bit A[y] (eg: Clam0 sees A[5] and Clam1 sees A[15]). To support clamshell designs, SW must initiate two separate HW init sequences for the two bunks (or clams) . Before each HW init sequence is triggered, SW must preload the DFA_MEMRLD[22:0] with the data that will be driven onto the A[22:0] wires during an MRS mode register write. NOTE: After the final HW initialization sequence has been triggered, SW must wait 64K eclks before writing the BUNK_INIT[1:0] field = 3'b11 (so that CS_N[1:0] is driven during refresh sequences in normal operation. NOTE: This should only be written to a different value during power-on SW initialization. NOTE: For MTYPE=1(FCRAM) Mode, each bunk MUST BE initialized independently. In other words, a HW init must be done for Bunk#0, and then another HW init must be done for Bunk#1 at power-on. */ uint64_t init_p0 : 1; /**< When a '1' is written (and the previous value was '0'), the HW init sequence(s) for Memory Port \#0 is initiated. NOTE: To initialize memory, SW must: 1) Enable memory port(s): a) ENA_P1=1 (single port in pass 1) OR b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1) 2) Wait 100us (to ensure a stable clock to the RLDRAMs) - as per RLDRAM spec. 3) Write a '1' to the corresponding INIT_Px which will initiate a hardware initialization sequence. NOTE: After writing a '1', SW must wait 64K eclk cycles to ensure the HW init sequence has completed before writing to ANY of the DFA_MEM* registers. NOTE: This should only be written to a different value during power-on SW initialization. NOTE: DFA Memory Port#0 corresponds to the Octeon RLD0_* pins. */ uint64_t init_p1 : 1; /**< When a '1' is written (and the previous value was '0'), the HW init sequence(s) for Memory Port \#1 is initiated. NOTE: To initialize memory, SW must: 1) Enable memory port(s): a) ENA_P1=1 (single port in pass 1) OR b) ENA_P0=1/ENA_P1=1 (dual ports or single when not pass 1) 2) Wait 100us (to ensure a stable clock to the RLDRAMs) - as per RLDRAM spec. 3) Write a '1' to the corresponding INIT_Px which will initiate a hardware initialization sequence. NOTE: After writing a '1', SW must wait 64K eclk cycles to ensure the HW init sequence has completed before writing to ANY of the DFA_MEM* registers. NOTE: This should only be written to a different value during power-on SW initialization. NOTE: DFA Memory Port#1 corresponds to the Octeon RLD1_* pins. */ uint64_t r2r_pbunk : 1; /**< When enabled, an additional command bubble is inserted if back to back reads are issued to different physical bunks. This is to avoid DQ data bus collisions when references cross between physical bunks. [NOTE: the physical bunk address boundary is determined by the PBUNK bit]. NOTE: This should only be written to a different value during power-on SW initialization. When MTYPE=1(FCRAM)/BLEN=0(2-burst), R2R_PBUNK SHOULD BE ZERO(for optimal performance). However, if electrically, DQ-sharing becomes a power/heat issue, then R2R_PBUNK should be set (but at a cost to performance (1/2 BW). */ uint64_t pbunk : 3; /**< Physical Bunk address bit pointer. Specifies which address bit within the Longword Memory address MA[23:0] is used to determine the chip selects. [RLD_CS0_N corresponds to physical bunk \#0, and RLD_CS1_N corresponds to physical bunk \#1]. - 000: CS0_N = MA[19]/CS1_N = !MA[19] - 001: CS0_N = MA[20]/CS1_N = !MA[20] - 010: CS0_N = MA[21]/CS1_N = !MA[21] - 011: CS0_N = MA[22]/CS1_N = !MA[22] - 100: CS0_N = MA[23]/CS1_N = !MA[23] - 101-111: CS0_N = 0 /CS1_N = 1 Example(s): To build out a 128MB DFA memory, 4x 32Mx9 parts could be used to fill out TWO physical bunks (clamshell configuration). Each (of the two) physical bunks contains 2x 32Mx9 = 16Mx36. Each RLDRAM device also contains 8 internal banks, therefore the memory Address is 16M/8banks = 2M addresses/bunk (2^21). In this case, MA[21] would select the physical bunk. NOTE: This should only be written to a different value during power-on SW initialization. be used to determine the Chip Select(s). NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), a "Redundant Bunk" scheme is employed to provide the highest overall performance (1 Req/ MCLK cycle). In this mode, it's imperative that SW set the PBUNK field +1 'above' the highest address bit. (such that the PBUNK extracted from the address will always be zero). In this mode, the CS_N[1:0] pins are driven to each redundant bunk based on a TDM scheme: [MCLK-EVEN=Bunk#0/MCLK-ODD=Bunk#1]. */ uint64_t blen : 1; /**< Device Burst Length (0=2-burst/1=4-burst) When BLEN=0(BL2), all QW reads/writes from CP2 are decomposed into 2 separate BL2(LW) requests to the Low-Latency memory. When BLEN=1(BL4), a LW request (from CP2 or NCB) is treated as 1 BL4(QW) request to the low latency memory. NOTE: QW refers to a 64-bit LLM Load/Store (intiated by a processor core). LW refers to a 36-bit load/store. NOTE: This should only be written to a different value during power-on SW initialization before the DFA LLM (low latency memory) is used. NOTE: MTYPE=0(RLDRAM-II) MUST USE BLEN=0(2-burst) NOTE: MTYPE=1(FCRAM)/BLEN=0(BL2) requires a multi-bunk(clam) board design. NOTE: If MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=1(BL4), SW SHOULD use CP2 QW read/write requests (for optimal low-latency bus performance). [LW length read/write requests(in BL4 mode) use 50% of the available bus bandwidth] NOTE: MTYPE=1(FCRAM)/FCRAM2P=0(II)/BLEN=0(BL2) can only be used with FCRAM-II devices which support BL2 mode (see: Toshiba FCRAM-II, where DQ tristate after 2 data transfers). NOTE: MTYPE=1(FCRAM)/FCRAM2P=1(II+) does not support LW write requests (FCRAM-II+ device specification has removed the variable write mask function from the devices). As such, if this mode is used, SW must be careful to issue only PP-CP2 QW write requests. */ uint64_t bprch : 2; /**< Tristate Enable (back porch) (\#dclks) On reads, allows user to control the shape of the tristate disable back porch for the DQ data bus. This parameter is also very dependent on the RW_DLY and WR_DLY parameters and care must be taken when programming these parameters to avoid data bus contention. Valid range [0..2] NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t fprch : 2; /**< Tristate Enable (front porch) (\#dclks) On reads, allows user to control the shape of the tristate disable front porch for the DQ data bus. This parameter is also very dependent on the RW_DLY and WR_DLY parameters and care must be taken when programming these parameters to avoid data bus contention. Valid range [0..2] NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t wr_dly : 4; /**< Write->Read CMD Delay (\#mclks): Determines \#mclk cycles to insert when controller switches from write to read. This allows programmer to control the data bus contention. For RLDRAM-II(BL2): (TBL=1) For FCRAM-II (BL4): (TBL=2) For FCRAM-II (BL2 grepl=1x ONLY): (TBL=1) For FCRAM-II (BL2 grepl>=2x): (TBL=3) NOTE: When MTYTPE=1(FCRAM-II) BLEN=0(BL2 Mode), grepl>=2x, writes require redundant bunk writes which require an additional 2 cycles before slotting the next read. WR_DLY = ROUND_UP[((TWL+TBL)*2 - TSKW + FPRCH) / 2] - TRL + 1 NOTE: This should only be written to a different value during power-on SW initialization. NOTE: For aggressive(performance optimal) designs, the WR_DLY 'may' be tuned down(-1) if bus fight on W->R transitions is not pronounced. */ uint64_t rw_dly : 4; /**< Read->Write CMD Delay (\#mclks): Determines \#mclk cycles to insert when controller switches from read to write. This allows programmer to control the data bus contention. For RLDRAM-II/FCRAM-II (BL2): (TBL=1) For FCRAM-II (BL4): (TBL=2) RW_DLY = ROUND_UP[((TRL+TBL)*2 + TSKW + BPRCH+2)/2] - TWL + 1 NOTE: This should only be written to a different value during power-on SW initialization. NOTE: For aggressive(performance optimal) designs, the RW_DLY 'may' be tuned down(-1) if bus fight on R->W transitions is not pronounced. */ uint64_t sil_lat : 2; /**< Silo Latency (\#dclks): On reads, determines how many additional dclks to wait (on top of tRL+1) before pulling data out of the padring silos used for time domain boundary crossing. NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t mtype : 1; /**< Memory Type (0=RLDRAM-II/1=Network DRAM-II/FCRAM) NOTE: N3K-P1 only supports RLDRAM-II NOTE: This should only be written to a different value during power-on SW initialization. NOTE: When MTYPE=1(FCRAM)/BLEN=0(2-burst), only the "unidirectional DS/QS" mode is supported. (see FCRAM data sheet EMRS[A6:A5]=SS(Strobe Select) register definition. [in FCRAM 2-burst mode, we use FCRAM in a clamshell configuration such that clam0 is addressed independently of clam1, and DQ is shared for optimal performance. As such it's imperative that the QS are conditionally received (and are NOT free-running), as the N3K receive data capture silos OR the clam0/1 QS strobes. NOTE: If this bit is SET, the ASX0/1 ASX_RLD_FCRAM_MODE[MODE] bit(s) should also be SET in order for the RLD0/1-PHY(s) to support FCRAM devices. */ uint64_t reserved_2_2 : 1; uint64_t ena_p0 : 1; /**< Enable DFA RLDRAM Port#0 When enabled, this bit lets N3K be the default driver for memory port \#0. NOTE: For N3K-P1, to enable Port#0(2nd port), Port#1 MUST ALSO be enabled. NOTE: For N3K-P2, single port mode, a customer is at liberty to enable either Port#0 or Port#1. NOTE: Once a port has been disabled, it MUST NEVER be re-enabled. [the only way to enable a port is through a chip reset]. NOTE: DFA Memory Port#0 corresponds to the Octeon RLD0_* pins. */ uint64_t ena_p1 : 1; /**< Enable DFA RLDRAM Port#1 When enabled, this bit lets N3K be the default driver for memory port \#1. NOTE: For N3K-P1, If the customer wishes to use a single port, s/he must enable Port#1 (and not Port#0). NOTE: For N3K-P2, single port mode, a customer is at liberty to enable either Port#0 or Port#1. NOTE: Once a port has been disabled, it MUST NEVER be re-enabled. [the only way to enable a port is through a chip reset]. NOTE: DFA Memory Port#1 corresponds to the Octeon RLD1_* pins. */ #else uint64_t ena_p1 : 1; uint64_t ena_p0 : 1; uint64_t reserved_2_2 : 1; uint64_t mtype : 1; uint64_t sil_lat : 2; uint64_t rw_dly : 4; uint64_t wr_dly : 4; uint64_t fprch : 2; uint64_t bprch : 2; uint64_t blen : 1; uint64_t pbunk : 3; uint64_t r2r_pbunk : 1; uint64_t init_p1 : 1; uint64_t init_p0 : 1; uint64_t bunk_init : 2; uint64_t reserved_27_63 : 37; #endif } cn38xxp2; struct cvmx_dfa_memcfg0_s cn58xx; struct cvmx_dfa_memcfg0_s cn58xxp1; } cvmx_dfa_memcfg0_t; /** * cvmx_dfa_memcfg1 * * DFA_MEMCFG1 = RLDRAM Memory Timing Configuration * * Description: */ typedef union { uint64_t u64; struct cvmx_dfa_memcfg1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ref_intlo : 9; /**< Burst Refresh Interval[8:0] (\#dclks) For finer refresh interval granularity control. This field provides an additional level of granularity for the refresh interval. It specifies the additional \#dclks [0...511] to be added to the REF_INT[3:0] field. For RLDRAM-II: For dclk(400MHz=2.5ns): Example: 64K AREF cycles required within tREF=32ms trefint = tREF(ms)/(64K cycles/8banks) = 32ms/8K = 3.9us = 3900ns REF_INT[3:0] = ROUND_DOWN[(trefint/dclk)/512] = ROUND_DOWN[(3900/2.5)/512] = 3 REF_INTLO[8:0] = MOD[(trefint/dclk)/512] = MOD[(3900/2.5)/512] = 24 NOTE: This should only be written to a different value during power-on SW initialization. *** NOTE: PASS2 Addition */ uint64_t aref_ena : 1; /**< Auto Refresh Cycle Enable INTERNAL USE ONLY: NOTE: This mode bit is ONLY intended to be used by low-level power-on initialization routines in the event that the hardware initialization routine does not work. It allows SW to create AREF commands on the RLDRAM bus directly. When this bit is set, ALL RLDRAM writes (issued by a PP through the NCB or CP2) are converted to AREF commands on the RLDRAM bus. The write-address is presented on the A[20:0]/BA[2:0] pins (for which the RLDRAM only interprets BA[2:0]). When this bit is set, only writes are allowed and MUST use grepl=0 (1x). NOTE: This should only be written to a different value during power-on SW initialization. NOTE: MRS_ENA and AREF_ENA are mutually exclusive (SW can set one or the other, but never both!) NOTE: AREF commands generated using this method target the 'addressed' bunk. */ uint64_t mrs_ena : 1; /**< Mode Register Set Cycle Enable INTERNAL USE ONLY: NOTE: This mode bit is ONLY intended to be used by low-level power-on initialization routines in the event that the hardware initialization routine does not work. It allows SW to create MRS commands on the RLDRAM bus directly. When this bit is set, ALL RLDRAM writes (issued by a PP through the NCB or CP2) are converted to MRS commands on the RLDRAM bus. The write-address is presented on the A[20:0]/BA[2:0] pins (for which the RLDRAM only interprets A[17:0]). When this bit is set, only writes are allowed and MUST use grepl=0 (1x). NOTE: This should only be written to a different value during power-on SW initialization. NOTE: MRS_ENA and AREF_ENA are mutually exclusive (SW can set one or the other, but never both!) NOTE: MRS commands generated using this method target the 'addressed' bunk. */ uint64_t tmrsc : 3; /**< Mode Register Set Cycle Time (represented in \#mclks) - 000-001: RESERVED - 010: tMRSC = 2 mclks - 011: tMRSC = 3 mclks - ... - 111: tMRSC = 7 mclks NOTE: The device tMRSC parameter is a function of CL (which during HW initialization is not known. Its recommended to load tMRSC(MAX) value to avoid timing violations. NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t trc : 4; /**< Row Cycle Time (represented in \#mclks) see also: DFA_MEMRLD[RLCFG] field which must correspond with tRL/tWL parameter(s). - 0000-0010: RESERVED - 0011: tRC = 3 mclks - 0100: tRC = 4 mclks - 0101: tRC = 5 mclks - 0110: tRC = 6 mclks - 0111: tRC = 7 mclks - 1000: tRC = 8 mclks - 1001: tRC = 9 mclks - 1010-1111: RESERVED NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t twl : 4; /**< Write Latency (represented in \#mclks) see also: DFA_MEMRLD[RLCFG] field which must correspond with tRL/tWL parameter(s). - 0000-0001: RESERVED - 0010: Write Latency (WL=2.0 mclk) - 0011: Write Latency (WL=3.0 mclks) - 0100: Write Latency (WL=4.0 mclks) - 0101: Write Latency (WL=5.0 mclks) - 0110: Write Latency (WL=6.0 mclks) - 0111: Write Latency (WL=7.0 mclks) - 1000: Write Latency (WL=8.0 mclks) - 1001: Write Latency (WL=9.0 mclks) - 1010: Write Latency (WL=10.0 mclks) - 1011-1111: RESERVED NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t trl : 4; /**< Read Latency (represented in \#mclks) see also: DFA_MEMRLD[RLCFG] field which must correspond with tRL/tWL parameter(s). - 0000-0010: RESERVED - 0011: Read Latency = 3 mclks - 0100: Read Latency = 4 mclks - 0101: Read Latency = 5 mclks - 0110: Read Latency = 6 mclks - 0111: Read Latency = 7 mclks - 1000: Read Latency = 8 mclks - 1001: Read Latency = 9 mclks - 1010: Read Latency = 10 mclks - 1011-1111: RESERVED NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t reserved_6_7 : 2; uint64_t tskw : 2; /**< Board Skew (represented in \#dclks) Represents additional board skew of DQ/DQS. - 00: board-skew = 0 dclk - 01: board-skew = 1 dclk - 10: board-skew = 2 dclk - 11: board-skew = 3 dclk NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t ref_int : 4; /**< Refresh Interval (represented in \#of 512 dclk increments). - 0000: RESERVED - 0001: 1 * 512 = 512 dclks - ... - 1111: 15 * 512 = 7680 dclks NOTE: For finer level of granularity, refer to REF_INTLO[8:0] field. For RLDRAM-II, each refresh interval will generate a burst of 8 AREF commands, one to each of 8 explicit banks (referenced using the RLD_BA[2:0] pins. Example: For mclk=200MHz/dclk(400MHz=2.5ns): 64K AREF cycles required within tREF=32ms trefint = tREF(ms)/(64K cycles/8banks) = 32ms/8K = 3.9us = 3900ns REF_INT = ROUND_DOWN[(trefint/dclk)/512] = ROUND_DOWN[(3900/2.5)/512] = 3 NOTE: This should only be written to a different value during power-on SW initialization. */ #else uint64_t ref_int : 4; uint64_t tskw : 2; uint64_t reserved_6_7 : 2; uint64_t trl : 4; uint64_t twl : 4; uint64_t trc : 4; uint64_t tmrsc : 3; uint64_t mrs_ena : 1; uint64_t aref_ena : 1; uint64_t ref_intlo : 9; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_dfa_memcfg1_s cn38xx; struct cvmx_dfa_memcfg1_s cn38xxp2; struct cvmx_dfa_memcfg1_s cn58xx; struct cvmx_dfa_memcfg1_s cn58xxp1; } cvmx_dfa_memcfg1_t; /** * cvmx_dfa_memcfg2 * * DFA_MEMCFG2 = DFA Memory Config Register \#2 * *** NOTE: Pass2 Addition * * Description: Additional Memory Configuration CSRs to support FCRAM-II/II+ and Network DRAM-II */ typedef union { uint64_t u64; struct cvmx_dfa_memcfg2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t dteclkdis : 1; /**< DFA DTE Clock Disable When SET, the DFA clocks for DTE(thread engine) operation are disabled. NOTE: When SET, SW MUST NEVER issue ANY operations to the DFA via the NCB Bus. All DFA Operations must be issued solely through the CP2 interface. *** NOTE: PASS2 Addition NOTE: When DTECLKDIS=1, if CP2 Errors are encountered (ie: CP2SBE, CP2DBE, CP2PERR), the DFA_MEMFADR CSR does not reflect the failing address/ctl information. */ uint64_t silrst : 1; /**< LLM-PHY Silo Reset When a '1' is written (when the previous value was a '0') causes the the LLM-PHY Silo read/write pointers to be reset. NOTE: SW MUST WAIT 400 dclks after the LAST HW Init sequence was launched (ie: INIT_START 0->1 CSR write), before the SILRST can be triggered (0->1). */ uint64_t trfc : 5; /**< FCRAM-II Refresh Interval *** O9N UNSUPPORTED *** */ uint64_t refshort : 1; /**< FCRAM Short Refresh Mode *** O9N UNSUPPORTED *** */ uint64_t ua_start : 2; /**< FCRAM-II Upper Addres Start *** O9N UNSUPPORTED *** */ uint64_t maxbnk : 1; /**< Maximum Banks per-device (used by the address mapper when extracting address bits for the memory bank#. - 0: 4 banks/device - 1: 8 banks/device *** NOTE: PASS2 Addition */ uint64_t fcram2p : 1; /**< FCRAM-II+ Mode Enable *** O9N UNSUPPORTED *** */ #else uint64_t fcram2p : 1; uint64_t maxbnk : 1; uint64_t ua_start : 2; uint64_t refshort : 1; uint64_t trfc : 5; uint64_t silrst : 1; uint64_t dteclkdis : 1; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_dfa_memcfg2_s cn38xx; struct cvmx_dfa_memcfg2_s cn38xxp2; struct cvmx_dfa_memcfg2_s cn58xx; struct cvmx_dfa_memcfg2_s cn58xxp1; } cvmx_dfa_memcfg2_t; /** * cvmx_dfa_memfadr * * DFA_MEMFADR = RLDRAM Failing Address/Control Register * * Description: DFA Memory Failing Address/Control Error Capture information * This register contains useful information to help in isolating an RLDRAM memory failure. * NOTE: The first detected SEC/DED/PERR failure is captured in DFA_MEMFADR, however, a DED or PERR (which is * more severe) will always overwrite a SEC error. The user can 'infer' the source of the interrupt * via the FSRC field. * NOTE: If DFA_MEMCFG2[DTECLKDIS]=1, the contents of this register are UNDEFINED. */ typedef union { uint64_t u64; struct cvmx_dfa_memfadr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t maddr : 24; /**< Memory Address */ #else uint64_t maddr : 24; uint64_t reserved_24_63 : 40; #endif } s; struct cvmx_dfa_memfadr_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t fdst : 9; /**< Fill-Destination FSRC[1:0] | FDST[8:0] -------------+------------------------------------- 0(NCB-DTE) | [fillstart,2'b0,WIDX(1),DMODE(1),DTE(4)] 1(NCB-CSR) | [ncbSRC[8:0]] 3(CP2-PP) | [2'b0,SIZE(1),INDEX(1),PP(4),FID(1)] where: DTE: DFA Thread Engine ID# PP: Packet Processor ID# FID: Fill-ID# (unique per PP) WIDX: 16b SIMPLE Mode (index) DMODE: (0=16b SIMPLE/1=32b SIMPLE) SIZE: (0=LW Mode access/1=QW Mode Access) INDEX: (0=Low LW/1=High LW) NOTE: QW refers to a 56/64-bit LLM Load/Store (intiated by a processor core). LW refers to a 32-bit load/store. */ uint64_t fsrc : 2; /**< Fill-Source (0=NCB-DTE/1=NCB-CSR/2=RESERVED/3=PP-CP2) */ uint64_t pnum : 1; /**< Memory Port NOTE: For O2P, this bit will always return zero. */ uint64_t bnum : 3; /**< Memory Bank When DFA_DDR2_ADDR[RNK_LO]=1, BNUM[2]=RANK[0]. (RANK[1] can be inferred from MADDR[24:0]) */ uint64_t maddr : 25; /**< Memory Address */ #else uint64_t maddr : 25; uint64_t bnum : 3; uint64_t pnum : 1; uint64_t fsrc : 2; uint64_t fdst : 9; uint64_t reserved_40_63 : 24; #endif } cn31xx; struct cvmx_dfa_memfadr_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_39_63 : 25; uint64_t fdst : 9; /**< Fill-Destination FSRC[1:0] | FDST[8:0] -------------+------------------------------------- 0(NCB-DTE) | [fillstart,2'b0,WIDX(1),DMODE(1),DTE(4)] 1(NCB-CSR) | [ncbSRC[8:0]] 3(CP2-PP) | [2'b0,SIZE(1),INDEX(1),PP(4),FID(1)] where: DTE: DFA Thread Engine ID# PP: Packet Processor ID# FID: Fill-ID# (unique per PP) WIDX: 18b SIMPLE Mode (index) DMODE: (0=18b SIMPLE/1=36b SIMPLE) SIZE: (0=LW Mode access/1=QW Mode Access) INDEX: (0=Low LW/1=High LW) NOTE: QW refers to a 64-bit LLM Load/Store (intiated by a processor core). LW refers to a 36-bit load/store. */ uint64_t fsrc : 2; /**< Fill-Source (0=NCB-DTE/1=NCB-CSR/2=RESERVED/3=PP-CP2) */ uint64_t pnum : 1; /**< Memory Port NOTE: the port id's are reversed PNUM==0 => port#1 PNUM==1 => port#0 */ uint64_t bnum : 3; /**< Memory Bank */ uint64_t maddr : 24; /**< Memory Address */ #else uint64_t maddr : 24; uint64_t bnum : 3; uint64_t pnum : 1; uint64_t fsrc : 2; uint64_t fdst : 9; uint64_t reserved_39_63 : 25; #endif } cn38xx; struct cvmx_dfa_memfadr_cn38xx cn38xxp2; struct cvmx_dfa_memfadr_cn38xx cn58xx; struct cvmx_dfa_memfadr_cn38xx cn58xxp1; } cvmx_dfa_memfadr_t; /** * cvmx_dfa_memfcr * * DFA_MEMFCR = FCRAM MRS Register(s) EMRS2[14:0], EMRS1[14:0], MRS[14:0] * *** O9N UNSUPPORTED *** * * Notes: * For FCRAM-II please consult your device's data sheet for further details: * MRS Definition: * A[13:8]=0 RESERVED * A[7]=0 TEST MODE (N3K requires test mode 0:"disabled") * A[6:4] CAS LATENCY (fully programmable - SW must ensure that the value programmed * into DFA_MEM_CFG0[TRL] corresponds with this value). * A[3]=0 BURST TYPE (N3K requires 0:"Sequential" Burst Type) * A[2:0] BURST LENGTH Burst Length [1:BL2/2:BL4] (N3K only supports BL=2,4) * * In BL2 mode(for highest performance), only 1/2 the phsyical * memory is unique (ie: each bunk stores the same information). * In BL4 mode(highest capacity), all of the physical memory * is unique (ie: each bunk is uniquely addressable). * EMRS Definition: * A[13:12] REFRESH MODE (N3K Supports only 0:"Conventional" and 1:"Short" auto-refresh modes) * * (SW must ensure that the value programmed into DFA_MEMCFG2[REFSHORT] * is also reflected in the Refresh Mode encoding). * A[11:7]=0 RESERVED * A[6:5]=2 STROBE SELECT (N3K supports only 2:"Unidirectional DS/QS" mode - the read capture * silos rely on a conditional QS strobe) * A[4:3] DIC(QS) QS Drive Strength: fully programmable (consult your FCRAM-II data sheet) * [0: Normal Output Drive/1: Strong Output Drive/2: Weak output Drive] * A[2:1] DIC(DQ) DQ Drive Strength: fully programmable (consult your FCRAM-II data sheet) * [0: Normal Output Drive/1: Strong Output Drive/2: Weak output Drive] * A[0] DLL DLL Enable: Programmable [0:DLL Enable/1: DLL Disable] * * EMRS2 Definition: (for FCRAM-II+) * A[13:11]=0 RESERVED * A[10:8] ODTDS On Die Termination (DS+/-) * [0: ODT Disable /1: 15ohm termination /(2-7): RESERVED] * A[7:6]=0 MBW Multi-Bank Write: (N3K requires use of 0:"single bank" mode only) * A[5:3] ODTin On Die Termination (input pin) * [0: ODT Disable /1: 15ohm termination /(2-7): RESERVED] * A[2:0] ODTDQ On Die Termination (DQ) * [0: ODT Disable /1: 15ohm termination /(2-7): RESERVED] */ typedef union { uint64_t u64; struct cvmx_dfa_memfcr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_47_63 : 17; uint64_t emrs2 : 15; /**< Memory Address[14:0] during EMRS2(for FCRAM-II+) *** O9N UNSUPPORTED *** */ uint64_t reserved_31_31 : 1; uint64_t emrs : 15; /**< Memory Address[14:0] during EMRS *** O9N UNSUPPORTED *** A[0]=1: DLL Enabled) */ uint64_t reserved_15_15 : 1; uint64_t mrs : 15; /**< FCRAM Memory Address[14:0] during MRS *** O9N UNSUPPORTED *** A[6:4]=4 CAS LATENCY=4(default) A[3]=0 Burst Type(must be 0:Sequential) A[2:0]=2 Burst Length=4(default) */ #else uint64_t mrs : 15; uint64_t reserved_15_15 : 1; uint64_t emrs : 15; uint64_t reserved_31_31 : 1; uint64_t emrs2 : 15; uint64_t reserved_47_63 : 17; #endif } s; struct cvmx_dfa_memfcr_s cn38xx; struct cvmx_dfa_memfcr_s cn38xxp2; struct cvmx_dfa_memfcr_s cn58xx; struct cvmx_dfa_memfcr_s cn58xxp1; } cvmx_dfa_memfcr_t; /** * cvmx_dfa_memrld * * DFA_MEMRLD = DFA RLDRAM MRS Register Values * * Description: */ typedef union { uint64_t u64; struct cvmx_dfa_memrld_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t mrsdat : 23; /**< This field represents the data driven onto the A[22:0] address lines during MRS(Mode Register Set) commands (during a HW init sequence). This field corresponds with the Mode Register Bit Map from your RLDRAM-II device specific data sheet. A[17:10]: RESERVED A[9]: ODT (on die termination) A[8]: Impedance Matching A[7]: DLL Reset A[6]: UNUSED A[5]: Address Mux (for N3K: MUST BE ZERO) A[4:3]: Burst Length (for N3K: MUST BE ZERO) A[2:0]: Configuration (see data sheet for specific RLDRAM-II device). - 000-001: CFG=1 [tRC=4/tRL=4/tWL=5] - 010: CFG=2 [tRC=6/tRL=6/tWL=7] - 011: CFG=3 [tRC=8/tRL=8/tWL=9] - 100-111: RESERVED NOTE: For additional density, the RLDRAM-II parts can be 'clamshelled' (ie: two devices mounted on different sides of the PCB board), since the BGA pinout supports 'mirroring'. To support a clamshell design, SW must preload the MRSDAT[22:0] with the proper A[22:0] pin mapping which is dependent on the 'selected' bunk/clam (see also: DFA_MEMCFG0[BUNK_INIT] field). NOTE: Care MUST BE TAKEN NOT to write to this register within 64K eclk cycles of a HW INIT (see: INIT_P0/INIT_P1). NOTE: This should only be written to a different value during power-on SW initialization. */ #else uint64_t mrsdat : 23; uint64_t reserved_23_63 : 41; #endif } s; struct cvmx_dfa_memrld_s cn38xx; struct cvmx_dfa_memrld_s cn38xxp2; struct cvmx_dfa_memrld_s cn58xx; struct cvmx_dfa_memrld_s cn58xxp1; } cvmx_dfa_memrld_t; /** * cvmx_dfa_ncbctl * * DFA_NCBCTL = DFA NCB CTL Register * * Description: */ typedef union { uint64_t u64; struct cvmx_dfa_ncbctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t sbdnum : 5; /**< SBD Debug Entry# For internal use only. (DFA Scoreboard debug) Selects which one of 32 DFA Scoreboard entries is latched into the DFA_SBD_DBG[0-3] registers. */ uint64_t sbdlck : 1; /**< DFA Scoreboard LOCK Strobe For internal use only. (DFA Scoreboard debug) When written with a '1', the DFA Scoreboard Debug registers (DFA_SBD_DBG[0-3]) are all locked down. This allows SW to lock down the contents of the entire SBD for a single instant in time. All subsequent reads of the DFA scoreboard registers will return the data from that instant in time. */ uint64_t dcmode : 1; /**< DRF-CRQ/DTE Arbiter Mode DTE-DRF Arbiter (0=FP [LP=CRQ/HP=DTE],1=RR) NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t dtmode : 1; /**< DRF-DTE Arbiter Mode DTE-DRF Arbiter (0=FP [LP=DTE[15],...,HP=DTE[0]],1=RR) NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t pmode : 1; /**< NCB-NRP Arbiter Mode (0=Fixed Priority [LP=WQF,DFF,HP=RGF]/1=RR NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t qmode : 1; /**< NCB-NRQ Arbiter Mode (0=Fixed Priority [LP=IRF,RWF,PRF,HP=GRF]/1=RR NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t imode : 1; /**< NCB-Inbound Arbiter (0=FP [LP=NRQ,HP=NRP], 1=RR) NOTE: This should only be written to a different value during power-on SW initialization. */ #else uint64_t imode : 1; uint64_t qmode : 1; uint64_t pmode : 1; uint64_t dtmode : 1; uint64_t dcmode : 1; uint64_t sbdlck : 1; uint64_t sbdnum : 5; uint64_t reserved_11_63 : 53; #endif } s; struct cvmx_dfa_ncbctl_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t sbdnum : 4; /**< SBD Debug Entry# For internal use only. (DFA Scoreboard debug) Selects which one of 16 DFA Scoreboard entries is latched into the DFA_SBD_DBG[0-3] registers. */ uint64_t sbdlck : 1; /**< DFA Scoreboard LOCK Strobe For internal use only. (DFA Scoreboard debug) When written with a '1', the DFA Scoreboard Debug registers (DFA_SBD_DBG[0-3]) are all locked down. This allows SW to lock down the contents of the entire SBD for a single instant in time. All subsequent reads of the DFA scoreboard registers will return the data from that instant in time. */ uint64_t dcmode : 1; /**< DRF-CRQ/DTE Arbiter Mode DTE-DRF Arbiter (0=FP [LP=CRQ/HP=DTE],1=RR) NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t dtmode : 1; /**< DRF-DTE Arbiter Mode DTE-DRF Arbiter (0=FP [LP=DTE[15],...,HP=DTE[0]],1=RR) NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t pmode : 1; /**< NCB-NRP Arbiter Mode (0=Fixed Priority [LP=WQF,DFF,HP=RGF]/1=RR NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t qmode : 1; /**< NCB-NRQ Arbiter Mode (0=Fixed Priority [LP=IRF,RWF,PRF,HP=GRF]/1=RR NOTE: This should only be written to a different value during power-on SW initialization. */ uint64_t imode : 1; /**< NCB-Inbound Arbiter (0=FP [LP=NRQ,HP=NRP], 1=RR) NOTE: This should only be written to a different value during power-on SW initialization. */ #else uint64_t imode : 1; uint64_t qmode : 1; uint64_t pmode : 1; uint64_t dtmode : 1; uint64_t dcmode : 1; uint64_t sbdlck : 1; uint64_t sbdnum : 4; uint64_t reserved_10_63 : 54; #endif } cn38xx; struct cvmx_dfa_ncbctl_cn38xx cn38xxp2; struct cvmx_dfa_ncbctl_s cn58xx; struct cvmx_dfa_ncbctl_s cn58xxp1; } cvmx_dfa_ncbctl_t; /** * cvmx_dfa_rodt_comp_ctl * * DFA_RODT_COMP_CTL = DFA RLD Compensation control (For read "on die termination") * */ typedef union { uint64_t u64; struct cvmx_dfa_rodt_comp_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t enable : 1; /**< Read On Die Termination Enable (0=disable, 1=enable) */ uint64_t reserved_12_15 : 4; uint64_t nctl : 4; /**< Compensation control bits */ uint64_t reserved_5_7 : 3; uint64_t pctl : 5; /**< Compensation control bits */ #else uint64_t pctl : 5; uint64_t reserved_5_7 : 3; uint64_t nctl : 4; uint64_t reserved_12_15 : 4; uint64_t enable : 1; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_dfa_rodt_comp_ctl_s cn58xx; struct cvmx_dfa_rodt_comp_ctl_s cn58xxp1; } cvmx_dfa_rodt_comp_ctl_t; /** * cvmx_dfa_sbd_dbg0 * * DFA_SBD_DBG0 = DFA Scoreboard Debug \#0 Register * * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down. * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the * CSR read. * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model) * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an * instruction. */ typedef union { uint64_t u64; struct cvmx_dfa_sbd_dbg0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t sbd0 : 64; /**< DFA ScoreBoard \#0 Data For internal use only! (DFA Scoreboard Debug) [63:40] rptr[26:3]: Result Base Pointer [39:24] rwcnt[15:0] Cumulative Result Write Counter [23] lastgrdrsp: Last Gather-Rd Response [22] wtgrdrsp: Waiting Gather-Rd Response [21] wtgrdreq: Waiting for Gather-Rd Issue [20] glvld: GLPTR/GLCNT Valid [19] cmpmark: Completion Marked Node Detected [18:17] cmpcode[1:0]: Completion Code [0=PDGONE/1=PERR/2=RFULL/3=TERM] [16] cmpdet: Completion Detected [15] wthdrwrcmtrsp: Waiting for HDR RWrCmtRsp [14] wtlastwrcmtrsp: Waiting for LAST RESULT RWrCmtRsp [13] hdrwrreq: Waiting for HDR RWrReq [12] wtrwrreq: Waiting for RWrReq [11] wtwqwrreq: Waiting for WQWrReq issue [10] lastprdrspeot: Last Packet-Rd Response [9] lastprdrsp: Last Packet-Rd Response [8] wtprdrsp: Waiting for PRdRsp EOT [7] wtprdreq: Waiting for PRdReq Issue [6] lastpdvld: PDPTR/PDLEN Valid [5] pdvld: Packet Data Valid [4] wqvld: WQVLD [3] wqdone: WorkQueue Done condition a) WQWrReq issued(for WQPTR<>0) OR b) HDR RWrCmtRsp completed) [2] rwstf: Resultant write STF/P Mode [1] pdldt: Packet-Data LDT mode [0] gmode: Gather-Mode */ #else uint64_t sbd0 : 64; #endif } s; struct cvmx_dfa_sbd_dbg0_s cn31xx; struct cvmx_dfa_sbd_dbg0_s cn38xx; struct cvmx_dfa_sbd_dbg0_s cn38xxp2; struct cvmx_dfa_sbd_dbg0_s cn58xx; struct cvmx_dfa_sbd_dbg0_s cn58xxp1; } cvmx_dfa_sbd_dbg0_t; /** * cvmx_dfa_sbd_dbg1 * * DFA_SBD_DBG1 = DFA Scoreboard Debug \#1 Register * * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down. * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the * CSR read. * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model) * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an * instruction. */ typedef union { uint64_t u64; struct cvmx_dfa_sbd_dbg1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t sbd1 : 64; /**< DFA ScoreBoard \#1 Data For internal use only! (DFA Scoreboard Debug) [63:61] wqptr[35:33]: Work Queue Pointer [60:52] rptr[35:27]: Result Base Pointer [51:16] pdptr[35:0]: Packet Data Pointer [15:0] pdcnt[15:0]: Packet Data Counter */ #else uint64_t sbd1 : 64; #endif } s; struct cvmx_dfa_sbd_dbg1_s cn31xx; struct cvmx_dfa_sbd_dbg1_s cn38xx; struct cvmx_dfa_sbd_dbg1_s cn38xxp2; struct cvmx_dfa_sbd_dbg1_s cn58xx; struct cvmx_dfa_sbd_dbg1_s cn58xxp1; } cvmx_dfa_sbd_dbg1_t; /** * cvmx_dfa_sbd_dbg2 * * DFA_SBD_DBG2 = DFA Scoreboard Debug \#2 Register * * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down. * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the * CSR read. * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model) * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an * instruction. */ typedef union { uint64_t u64; struct cvmx_dfa_sbd_dbg2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t sbd2 : 64; /**< DFA ScoreBoard \#2 Data [63:49] wqptr[17:3]: Work Queue Pointer [48:16] rwptr[35:3]: Result Write Pointer [15:0] prwcnt[15:0]: Pending Result Write Counter */ #else uint64_t sbd2 : 64; #endif } s; struct cvmx_dfa_sbd_dbg2_s cn31xx; struct cvmx_dfa_sbd_dbg2_s cn38xx; struct cvmx_dfa_sbd_dbg2_s cn38xxp2; struct cvmx_dfa_sbd_dbg2_s cn58xx; struct cvmx_dfa_sbd_dbg2_s cn58xxp1; } cvmx_dfa_sbd_dbg2_t; /** * cvmx_dfa_sbd_dbg3 * * DFA_SBD_DBG3 = DFA Scoreboard Debug \#3 Register * * Description: When the DFA_NCBCTL[SBDLCK] bit is written '1', the contents of this register are locked down. * Otherwise, the contents of this register are the 'active' contents of the DFA Scoreboard at the time of the * CSR read. * VERIFICATION NOTE: Read data is unsafe. X's(undefined data) can propagate (in the behavioral model) * on the reads unless the DTE Engine specified by DFA_NCBCTL[SBDNUM] has previously been assigned an * instruction. */ typedef union { uint64_t u64; struct cvmx_dfa_sbd_dbg3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t sbd3 : 64; /**< DFA ScoreBoard \#3 Data [63:49] wqptr[32:18]: Work Queue Pointer [48:16] glptr[35:3]: Gather List Pointer [15:0] glcnt[15:0]: Gather List Counter */ #else uint64_t sbd3 : 64; #endif } s; struct cvmx_dfa_sbd_dbg3_s cn31xx; struct cvmx_dfa_sbd_dbg3_s cn38xx; struct cvmx_dfa_sbd_dbg3_s cn38xxp2; struct cvmx_dfa_sbd_dbg3_s cn58xx; struct cvmx_dfa_sbd_dbg3_s cn58xxp1; } cvmx_dfa_sbd_dbg3_t; /** * cvmx_fpa_bist_status * * FPA_BIST_STATUS = BIST Status of FPA Memories * * The result of the BIST run on the FPA memories. */ typedef union { uint64_t u64; struct cvmx_fpa_bist_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t frd : 1; /**< fpa_frd memory bist status. */ uint64_t fpf0 : 1; /**< fpa_fpf0 memory bist status. */ uint64_t fpf1 : 1; /**< fpa_fpf1 memory bist status. */ uint64_t ffr : 1; /**< fpa_ffr memory bist status. */ uint64_t fdr : 1; /**< fpa_fdr memory bist status. */ #else uint64_t fdr : 1; uint64_t ffr : 1; uint64_t fpf1 : 1; uint64_t fpf0 : 1; uint64_t frd : 1; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_fpa_bist_status_s cn30xx; struct cvmx_fpa_bist_status_s cn31xx; struct cvmx_fpa_bist_status_s cn38xx; struct cvmx_fpa_bist_status_s cn38xxp2; struct cvmx_fpa_bist_status_s cn50xx; struct cvmx_fpa_bist_status_s cn52xx; struct cvmx_fpa_bist_status_s cn52xxp1; struct cvmx_fpa_bist_status_s cn56xx; struct cvmx_fpa_bist_status_s cn56xxp1; struct cvmx_fpa_bist_status_s cn58xx; struct cvmx_fpa_bist_status_s cn58xxp1; } cvmx_fpa_bist_status_t; /** * cvmx_fpa_ctl_status * * FPA_CTL_STATUS = FPA's Control/Status Register * * The FPA's interrupt enable register. */ typedef union { uint64_t u64; struct cvmx_fpa_ctl_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t reset : 1; /**< When set causes a reset of the FPA with the exception of the RSL. */ uint64_t use_ldt : 1; /**< When clear '0' the FPA will use LDT to load pointers from the L2C. */ uint64_t use_stt : 1; /**< When clear '0' the FPA will use STT to store pointers to the L2C. */ uint64_t enb : 1; /**< Must be set to 1 AFTER writing all config registers and 10 cycles have past. If any of the config register are written after writing this bit the FPA may begin to operate incorrectly. */ uint64_t mem1_err : 7; /**< Causes a flip of the ECC bit associated 38:32 respective to bit 6:0 of this field, for FPF FIFO 1. */ uint64_t mem0_err : 7; /**< Causes a flip of the ECC bit associated 38:32 respective to bit 6:0 of this field, for FPF FIFO 0. */ #else uint64_t mem0_err : 7; uint64_t mem1_err : 7; uint64_t enb : 1; uint64_t use_stt : 1; uint64_t use_ldt : 1; uint64_t reset : 1; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_fpa_ctl_status_s cn30xx; struct cvmx_fpa_ctl_status_s cn31xx; struct cvmx_fpa_ctl_status_s cn38xx; struct cvmx_fpa_ctl_status_s cn38xxp2; struct cvmx_fpa_ctl_status_s cn50xx; struct cvmx_fpa_ctl_status_s cn52xx; struct cvmx_fpa_ctl_status_s cn52xxp1; struct cvmx_fpa_ctl_status_s cn56xx; struct cvmx_fpa_ctl_status_s cn56xxp1; struct cvmx_fpa_ctl_status_s cn58xx; struct cvmx_fpa_ctl_status_s cn58xxp1; } cvmx_fpa_ctl_status_t; /** * cvmx_fpa_fpf#_marks * * FPA_FPF1_MARKS = FPA's Queue 1 Free Page FIFO Read Write Marks * * The high and low watermark register that determines when we write and read free pages from L2C * for Queue 1. The value of FPF_RD and FPF_WR should have at least a 33 diffrence. Recommend value * is FPF_RD == (FPA_FPF#_SIZE[FPF_SIZ] * .25) and FPF_WR == (FPA_FPF#_SIZE[FPF_SIZ] * .75) */ typedef union { uint64_t u64; struct cvmx_fpa_fpfx_marks_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_22_63 : 42; uint64_t fpf_wr : 11; /**< When the number of free-page-pointers in a queue exceeds this value the FPA will write 32-page-pointers of that queue to DRAM. The MAX value for this field should be FPA_FPF0_SIZE[FPF_SIZ]-2. */ uint64_t fpf_rd : 11; /**< When the number of free-page-pointers in a queue drops below this value amd there are free-page-pointers in DRAM, the FPA will read one page (32 pointers) from DRAM. This maximum value for this field should be FPA_FPF0_SIZE[FPF_SIZ]-34. The min number for this would be 16. */ #else uint64_t fpf_rd : 11; uint64_t fpf_wr : 11; uint64_t reserved_22_63 : 42; #endif } s; struct cvmx_fpa_fpfx_marks_s cn38xx; struct cvmx_fpa_fpfx_marks_s cn38xxp2; struct cvmx_fpa_fpfx_marks_s cn56xx; struct cvmx_fpa_fpfx_marks_s cn56xxp1; struct cvmx_fpa_fpfx_marks_s cn58xx; struct cvmx_fpa_fpfx_marks_s cn58xxp1; } cvmx_fpa_fpfx_marks_t; /** * cvmx_fpa_fpf#_size * * FPA_FPFX_SIZE = FPA's Queue 1-7 Free Page FIFO Size * * The number of page pointers that will be kept local to the FPA for this Queue. FPA Queues are * assigned in order from Queue 0 to Queue 7, though only Queue 0 through Queue x can be used. * The sum of the 8 (0-7) FPA_FPF#_SIZE registers must be limited to 2048. */ typedef union { uint64_t u64; struct cvmx_fpa_fpfx_size_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t fpf_siz : 11; /**< The number of entries assigned in the FPA FIFO (used to hold page-pointers) for this Queue. The value of this register must divisable by 2, and the FPA will ignore bit [0] of this register. The total of the FPF_SIZ field of the 8 (0-7) FPA_FPF#_SIZE registers must not exceed 2048. After writing this field the FPA will need 10 core clock cycles to be ready for operation. The assignment of location in the FPA FIFO must start with Queue 0, then 1, 2, etc. The number of useable entries will be FPF_SIZ-2. */ #else uint64_t fpf_siz : 11; uint64_t reserved_11_63 : 53; #endif } s; struct cvmx_fpa_fpfx_size_s cn38xx; struct cvmx_fpa_fpfx_size_s cn38xxp2; struct cvmx_fpa_fpfx_size_s cn56xx; struct cvmx_fpa_fpfx_size_s cn56xxp1; struct cvmx_fpa_fpfx_size_s cn58xx; struct cvmx_fpa_fpfx_size_s cn58xxp1; } cvmx_fpa_fpfx_size_t; /** * cvmx_fpa_fpf0_marks * * FPA_FPF0_MARKS = FPA's Queue 0 Free Page FIFO Read Write Marks * * The high and low watermark register that determines when we write and read free pages from L2C * for Queue 0. The value of FPF_RD and FPF_WR should have at least a 33 diffrence. Recommend value * is FPF_RD == (FPA_FPF#_SIZE[FPF_SIZ] * .25) and FPF_WR == (FPA_FPF#_SIZE[FPF_SIZ] * .75) */ typedef union { uint64_t u64; struct cvmx_fpa_fpf0_marks_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t fpf_wr : 12; /**< When the number of free-page-pointers in a queue exceeds this value the FPA will write 32-page-pointers of that queue to DRAM. The MAX value for this field should be FPA_FPF0_SIZE[FPF_SIZ]-2. */ uint64_t fpf_rd : 12; /**< When the number of free-page-pointers in a queue drops below this value amd there are free-page-pointers in DRAM, the FPA will read one page (32 pointers) from DRAM. This maximum value for this field should be FPA_FPF0_SIZE[FPF_SIZ]-34. The min number for this would be 16. */ #else uint64_t fpf_rd : 12; uint64_t fpf_wr : 12; uint64_t reserved_24_63 : 40; #endif } s; struct cvmx_fpa_fpf0_marks_s cn38xx; struct cvmx_fpa_fpf0_marks_s cn38xxp2; struct cvmx_fpa_fpf0_marks_s cn56xx; struct cvmx_fpa_fpf0_marks_s cn56xxp1; struct cvmx_fpa_fpf0_marks_s cn58xx; struct cvmx_fpa_fpf0_marks_s cn58xxp1; } cvmx_fpa_fpf0_marks_t; /** * cvmx_fpa_fpf0_size * * FPA_FPF0_SIZE = FPA's Queue 0 Free Page FIFO Size * * The number of page pointers that will be kept local to the FPA for this Queue. FPA Queues are * assigned in order from Queue 0 to Queue 7, though only Queue 0 through Queue x can be used. * The sum of the 8 (0-7) FPA_FPF#_SIZE registers must be limited to 2048. */ typedef union { uint64_t u64; struct cvmx_fpa_fpf0_size_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t fpf_siz : 12; /**< The number of entries assigned in the FPA FIFO (used to hold page-pointers) for this Queue. The value of this register must divisable by 2, and the FPA will ignore bit [0] of this register. The total of the FPF_SIZ field of the 8 (0-7) FPA_FPF#_SIZE registers must not exceed 2048. After writing this field the FPA will need 10 core clock cycles to be ready for operation. The assignment of location in the FPA FIFO must start with Queue 0, then 1, 2, etc. The number of useable entries will be FPF_SIZ-2. */ #else uint64_t fpf_siz : 12; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_fpa_fpf0_size_s cn38xx; struct cvmx_fpa_fpf0_size_s cn38xxp2; struct cvmx_fpa_fpf0_size_s cn56xx; struct cvmx_fpa_fpf0_size_s cn56xxp1; struct cvmx_fpa_fpf0_size_s cn58xx; struct cvmx_fpa_fpf0_size_s cn58xxp1; } cvmx_fpa_fpf0_size_t; /** * cvmx_fpa_int_enb * * FPA_INT_ENB = FPA's Interrupt Enable * * The FPA's interrupt enable register. */ typedef union { uint64_t u64; struct cvmx_fpa_int_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t q7_perr : 1; /**< When set (1) and bit 27 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q7_coff : 1; /**< When set (1) and bit 26 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q7_und : 1; /**< When set (1) and bit 25 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q6_perr : 1; /**< When set (1) and bit 24 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q6_coff : 1; /**< When set (1) and bit 23 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q6_und : 1; /**< When set (1) and bit 22 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q5_perr : 1; /**< When set (1) and bit 21 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q5_coff : 1; /**< When set (1) and bit 20 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q5_und : 1; /**< When set (1) and bit 19 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q4_perr : 1; /**< When set (1) and bit 18 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q4_coff : 1; /**< When set (1) and bit 17 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q4_und : 1; /**< When set (1) and bit 16 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q3_perr : 1; /**< When set (1) and bit 15 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q3_coff : 1; /**< When set (1) and bit 14 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q3_und : 1; /**< When set (1) and bit 13 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q2_perr : 1; /**< When set (1) and bit 12 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q2_coff : 1; /**< When set (1) and bit 11 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q2_und : 1; /**< When set (1) and bit 10 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q1_perr : 1; /**< When set (1) and bit 9 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q1_coff : 1; /**< When set (1) and bit 8 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q1_und : 1; /**< When set (1) and bit 7 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q0_perr : 1; /**< When set (1) and bit 6 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q0_coff : 1; /**< When set (1) and bit 5 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t q0_und : 1; /**< When set (1) and bit 4 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t fed1_dbe : 1; /**< When set (1) and bit 3 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t fed1_sbe : 1; /**< When set (1) and bit 2 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t fed0_dbe : 1; /**< When set (1) and bit 1 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ uint64_t fed0_sbe : 1; /**< When set (1) and bit 0 of the FPA_INT_SUM register is asserted the FPA will assert an interrupt. */ #else uint64_t fed0_sbe : 1; uint64_t fed0_dbe : 1; uint64_t fed1_sbe : 1; uint64_t fed1_dbe : 1; uint64_t q0_und : 1; uint64_t q0_coff : 1; uint64_t q0_perr : 1; uint64_t q1_und : 1; uint64_t q1_coff : 1; uint64_t q1_perr : 1; uint64_t q2_und : 1; uint64_t q2_coff : 1; uint64_t q2_perr : 1; uint64_t q3_und : 1; uint64_t q3_coff : 1; uint64_t q3_perr : 1; uint64_t q4_und : 1; uint64_t q4_coff : 1; uint64_t q4_perr : 1; uint64_t q5_und : 1; uint64_t q5_coff : 1; uint64_t q5_perr : 1; uint64_t q6_und : 1; uint64_t q6_coff : 1; uint64_t q6_perr : 1; uint64_t q7_und : 1; uint64_t q7_coff : 1; uint64_t q7_perr : 1; uint64_t reserved_28_63 : 36; #endif } s; struct cvmx_fpa_int_enb_s cn30xx; struct cvmx_fpa_int_enb_s cn31xx; struct cvmx_fpa_int_enb_s cn38xx; struct cvmx_fpa_int_enb_s cn38xxp2; struct cvmx_fpa_int_enb_s cn50xx; struct cvmx_fpa_int_enb_s cn52xx; struct cvmx_fpa_int_enb_s cn52xxp1; struct cvmx_fpa_int_enb_s cn56xx; struct cvmx_fpa_int_enb_s cn56xxp1; struct cvmx_fpa_int_enb_s cn58xx; struct cvmx_fpa_int_enb_s cn58xxp1; } cvmx_fpa_int_enb_t; /** * cvmx_fpa_int_sum * * FPA_INT_SUM = FPA's Interrupt Summary Register * * Contains the diffrent interrupt summary bits of the FPA. */ typedef union { uint64_t u64; struct cvmx_fpa_int_sum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t q7_perr : 1; /**< Set when a Queue0 pointer read from the stack in the L2C does not have the FPA owner ship bit set. */ uint64_t q7_coff : 1; /**< Set when a Queue0 stack end tag is present and the count available is greater than than pointers present in the FPA. */ uint64_t q7_und : 1; /**< Set when a Queue0 page count available goes negative. */ uint64_t q6_perr : 1; /**< Set when a Queue0 pointer read from the stack in the L2C does not have the FPA owner ship bit set. */ uint64_t q6_coff : 1; /**< Set when a Queue0 stack end tag is present and the count available is greater than than pointers present in the FPA. */ uint64_t q6_und : 1; /**< Set when a Queue0 page count available goes negative. */ uint64_t q5_perr : 1; /**< Set when a Queue0 pointer read from the stack in the L2C does not have the FPA owner ship bit set. */ uint64_t q5_coff : 1; /**< Set when a Queue0 stack end tag is present and the count available is greater than than pointers present in the FPA. */ uint64_t q5_und : 1; /**< Set when a Queue0 page count available goes negative. */ uint64_t q4_perr : 1; /**< Set when a Queue0 pointer read from the stack in the L2C does not have the FPA owner ship bit set. */ uint64_t q4_coff : 1; /**< Set when a Queue0 stack end tag is present and the count available is greater than than pointers present in the FPA. */ uint64_t q4_und : 1; /**< Set when a Queue0 page count available goes negative. */ uint64_t q3_perr : 1; /**< Set when a Queue0 pointer read from the stack in the L2C does not have the FPA owner ship bit set. */ uint64_t q3_coff : 1; /**< Set when a Queue0 stack end tag is present and the count available is greater than than pointers present in the FPA. */ uint64_t q3_und : 1; /**< Set when a Queue0 page count available goes negative. */ uint64_t q2_perr : 1; /**< Set when a Queue0 pointer read from the stack in the L2C does not have the FPA owner ship bit set. */ uint64_t q2_coff : 1; /**< Set when a Queue0 stack end tag is present and the count available is greater than than pointers present in the FPA. */ uint64_t q2_und : 1; /**< Set when a Queue0 page count available goes negative. */ uint64_t q1_perr : 1; /**< Set when a Queue0 pointer read from the stack in the L2C does not have the FPA owner ship bit set. */ uint64_t q1_coff : 1; /**< Set when a Queue0 stack end tag is present and the count available is greater than pointers present in the FPA. */ uint64_t q1_und : 1; /**< Set when a Queue0 page count available goes negative. */ uint64_t q0_perr : 1; /**< Set when a Queue0 pointer read from the stack in the L2C does not have the FPA owner ship bit set. */ uint64_t q0_coff : 1; /**< Set when a Queue0 stack end tag is present and the count available is greater than pointers present in the FPA. */ uint64_t q0_und : 1; /**< Set when a Queue0 page count available goes negative. */ uint64_t fed1_dbe : 1; /**< Set when a Double Bit Error is detected in FPF1. */ uint64_t fed1_sbe : 1; /**< Set when a Single Bit Error is detected in FPF1. */ uint64_t fed0_dbe : 1; /**< Set when a Double Bit Error is detected in FPF0. */ uint64_t fed0_sbe : 1; /**< Set when a Single Bit Error is detected in FPF0. */ #else uint64_t fed0_sbe : 1; uint64_t fed0_dbe : 1; uint64_t fed1_sbe : 1; uint64_t fed1_dbe : 1; uint64_t q0_und : 1; uint64_t q0_coff : 1; uint64_t q0_perr : 1; uint64_t q1_und : 1; uint64_t q1_coff : 1; uint64_t q1_perr : 1; uint64_t q2_und : 1; uint64_t q2_coff : 1; uint64_t q2_perr : 1; uint64_t q3_und : 1; uint64_t q3_coff : 1; uint64_t q3_perr : 1; uint64_t q4_und : 1; uint64_t q4_coff : 1; uint64_t q4_perr : 1; uint64_t q5_und : 1; uint64_t q5_coff : 1; uint64_t q5_perr : 1; uint64_t q6_und : 1; uint64_t q6_coff : 1; uint64_t q6_perr : 1; uint64_t q7_und : 1; uint64_t q7_coff : 1; uint64_t q7_perr : 1; uint64_t reserved_28_63 : 36; #endif } s; struct cvmx_fpa_int_sum_s cn30xx; struct cvmx_fpa_int_sum_s cn31xx; struct cvmx_fpa_int_sum_s cn38xx; struct cvmx_fpa_int_sum_s cn38xxp2; struct cvmx_fpa_int_sum_s cn50xx; struct cvmx_fpa_int_sum_s cn52xx; struct cvmx_fpa_int_sum_s cn52xxp1; struct cvmx_fpa_int_sum_s cn56xx; struct cvmx_fpa_int_sum_s cn56xxp1; struct cvmx_fpa_int_sum_s cn58xx; struct cvmx_fpa_int_sum_s cn58xxp1; } cvmx_fpa_int_sum_t; /** * cvmx_fpa_que#_available * * FPA_QUEX_PAGES_AVAILABLE = FPA's Queue 0-7 Free Page Available Register * * The number of page pointers that are available in the FPA and local DRAM. */ typedef union { uint64_t u64; struct cvmx_fpa_quex_available_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t que_siz : 29; /**< The number of free pages available in this Queue. */ #else uint64_t que_siz : 29; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_fpa_quex_available_s cn30xx; struct cvmx_fpa_quex_available_s cn31xx; struct cvmx_fpa_quex_available_s cn38xx; struct cvmx_fpa_quex_available_s cn38xxp2; struct cvmx_fpa_quex_available_s cn50xx; struct cvmx_fpa_quex_available_s cn52xx; struct cvmx_fpa_quex_available_s cn52xxp1; struct cvmx_fpa_quex_available_s cn56xx; struct cvmx_fpa_quex_available_s cn56xxp1; struct cvmx_fpa_quex_available_s cn58xx; struct cvmx_fpa_quex_available_s cn58xxp1; } cvmx_fpa_quex_available_t; /** * cvmx_fpa_que#_page_index * * FPA_QUE0_PAGE_INDEX = FPA's Queue0 Page Index * * The present index page for queue 0 of the FPA. * This numbr reflests the number of pages of pointers that have been written to memory * for this queue. */ typedef union { uint64_t u64; struct cvmx_fpa_quex_page_index_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_25_63 : 39; uint64_t pg_num : 25; /**< Page number. */ #else uint64_t pg_num : 25; uint64_t reserved_25_63 : 39; #endif } s; struct cvmx_fpa_quex_page_index_s cn30xx; struct cvmx_fpa_quex_page_index_s cn31xx; struct cvmx_fpa_quex_page_index_s cn38xx; struct cvmx_fpa_quex_page_index_s cn38xxp2; struct cvmx_fpa_quex_page_index_s cn50xx; struct cvmx_fpa_quex_page_index_s cn52xx; struct cvmx_fpa_quex_page_index_s cn52xxp1; struct cvmx_fpa_quex_page_index_s cn56xx; struct cvmx_fpa_quex_page_index_s cn56xxp1; struct cvmx_fpa_quex_page_index_s cn58xx; struct cvmx_fpa_quex_page_index_s cn58xxp1; } cvmx_fpa_quex_page_index_t; /** * cvmx_fpa_que_act * * FPA_QUE_ACT = FPA's Queue# Actual Page Index * * When a INT_SUM[PERR#] occurs this will be latched with the value read from L2C. * This is latched on the first error and will not latch again unitl all errors are cleared. */ typedef union { uint64_t u64; struct cvmx_fpa_que_act_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t act_que : 3; /**< FPA-queue-number read from memory. */ uint64_t act_indx : 26; /**< Page number read from memory. */ #else uint64_t act_indx : 26; uint64_t act_que : 3; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_fpa_que_act_s cn30xx; struct cvmx_fpa_que_act_s cn31xx; struct cvmx_fpa_que_act_s cn38xx; struct cvmx_fpa_que_act_s cn38xxp2; struct cvmx_fpa_que_act_s cn50xx; struct cvmx_fpa_que_act_s cn52xx; struct cvmx_fpa_que_act_s cn52xxp1; struct cvmx_fpa_que_act_s cn56xx; struct cvmx_fpa_que_act_s cn56xxp1; struct cvmx_fpa_que_act_s cn58xx; struct cvmx_fpa_que_act_s cn58xxp1; } cvmx_fpa_que_act_t; /** * cvmx_fpa_que_exp * * FPA_QUE_EXP = FPA's Queue# Expected Page Index * * When a INT_SUM[PERR#] occurs this will be latched with the expected value. * This is latched on the first error and will not latch again unitl all errors are cleared. */ typedef union { uint64_t u64; struct cvmx_fpa_que_exp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t exp_que : 3; /**< Expected fpa-queue-number read from memory. */ uint64_t exp_indx : 26; /**< Expected page number read from memory. */ #else uint64_t exp_indx : 26; uint64_t exp_que : 3; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_fpa_que_exp_s cn30xx; struct cvmx_fpa_que_exp_s cn31xx; struct cvmx_fpa_que_exp_s cn38xx; struct cvmx_fpa_que_exp_s cn38xxp2; struct cvmx_fpa_que_exp_s cn50xx; struct cvmx_fpa_que_exp_s cn52xx; struct cvmx_fpa_que_exp_s cn52xxp1; struct cvmx_fpa_que_exp_s cn56xx; struct cvmx_fpa_que_exp_s cn56xxp1; struct cvmx_fpa_que_exp_s cn58xx; struct cvmx_fpa_que_exp_s cn58xxp1; } cvmx_fpa_que_exp_t; /** * cvmx_fpa_wart_ctl * * FPA_WART_CTL = FPA's WART Control * * Control and status for the WART block. */ typedef union { uint64_t u64; struct cvmx_fpa_wart_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t ctl : 16; /**< Control information. */ #else uint64_t ctl : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_fpa_wart_ctl_s cn30xx; struct cvmx_fpa_wart_ctl_s cn31xx; struct cvmx_fpa_wart_ctl_s cn38xx; struct cvmx_fpa_wart_ctl_s cn38xxp2; struct cvmx_fpa_wart_ctl_s cn50xx; struct cvmx_fpa_wart_ctl_s cn52xx; struct cvmx_fpa_wart_ctl_s cn52xxp1; struct cvmx_fpa_wart_ctl_s cn56xx; struct cvmx_fpa_wart_ctl_s cn56xxp1; struct cvmx_fpa_wart_ctl_s cn58xx; struct cvmx_fpa_wart_ctl_s cn58xxp1; } cvmx_fpa_wart_ctl_t; /** * cvmx_fpa_wart_status * * FPA_WART_STATUS = FPA's WART Status * * Control and status for the WART block. */ typedef union { uint64_t u64; struct cvmx_fpa_wart_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t status : 32; /**< Status information. */ #else uint64_t status : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_fpa_wart_status_s cn30xx; struct cvmx_fpa_wart_status_s cn31xx; struct cvmx_fpa_wart_status_s cn38xx; struct cvmx_fpa_wart_status_s cn38xxp2; struct cvmx_fpa_wart_status_s cn50xx; struct cvmx_fpa_wart_status_s cn52xx; struct cvmx_fpa_wart_status_s cn52xxp1; struct cvmx_fpa_wart_status_s cn56xx; struct cvmx_fpa_wart_status_s cn56xxp1; struct cvmx_fpa_wart_status_s cn58xx; struct cvmx_fpa_wart_status_s cn58xxp1; } cvmx_fpa_wart_status_t; /** * cvmx_gmx#_bad_reg * * GMX_BAD_REG = A collection of things that have gone very, very wrong * * * Notes: * In XAUI mode, only the lsb (corresponding to port0) of INB_NXA, LOSTSTAT, OUT_OVR, are used. * */ typedef union { uint64_t u64; struct cvmx_gmxx_bad_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */ uint64_t statovr : 1; /**< TX Statistics overflow */ uint64_t loststat : 4; /**< TX Statistics data was over-written (per RGM port) TX Stats are corrupted */ uint64_t reserved_18_21 : 4; uint64_t out_ovr : 16; /**< Outbound data FIFO overflow (per port) */ uint64_t ncb_ovr : 1; /**< Outbound NCB FIFO Overflow */ uint64_t out_col : 1; /**< Outbound collision occured between PKO and NCB */ #else uint64_t out_col : 1; uint64_t ncb_ovr : 1; uint64_t out_ovr : 16; uint64_t reserved_18_21 : 4; uint64_t loststat : 4; uint64_t statovr : 1; uint64_t inb_nxa : 4; uint64_t reserved_31_63 : 33; #endif } s; struct cvmx_gmxx_bad_reg_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */ uint64_t statovr : 1; /**< TX Statistics overflow */ uint64_t reserved_25_25 : 1; uint64_t loststat : 3; /**< TX Statistics data was over-written (per RGM port) TX Stats are corrupted */ uint64_t reserved_5_21 : 17; uint64_t out_ovr : 3; /**< Outbound data FIFO overflow (per port) */ uint64_t reserved_0_1 : 2; #else uint64_t reserved_0_1 : 2; uint64_t out_ovr : 3; uint64_t reserved_5_21 : 17; uint64_t loststat : 3; uint64_t reserved_25_25 : 1; uint64_t statovr : 1; uint64_t inb_nxa : 4; uint64_t reserved_31_63 : 33; #endif } cn30xx; struct cvmx_gmxx_bad_reg_cn30xx cn31xx; struct cvmx_gmxx_bad_reg_s cn38xx; struct cvmx_gmxx_bad_reg_s cn38xxp2; struct cvmx_gmxx_bad_reg_cn30xx cn50xx; struct cvmx_gmxx_bad_reg_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t inb_nxa : 4; /**< Inbound port > GMX_RX_PRTS */ uint64_t statovr : 1; /**< TX Statistics overflow The common FIFO to SGMII and XAUI had an overflow TX Stats are corrupted */ uint64_t loststat : 4; /**< TX Statistics data was over-written In SGMII, one bit per port In XAUI, only port0 is used TX Stats are corrupted */ uint64_t reserved_6_21 : 16; uint64_t out_ovr : 4; /**< Outbound data FIFO overflow (per port) */ uint64_t reserved_0_1 : 2; #else uint64_t reserved_0_1 : 2; uint64_t out_ovr : 4; uint64_t reserved_6_21 : 16; uint64_t loststat : 4; uint64_t statovr : 1; uint64_t inb_nxa : 4; uint64_t reserved_31_63 : 33; #endif } cn52xx; struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1; struct cvmx_gmxx_bad_reg_cn52xx cn56xx; struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1; struct cvmx_gmxx_bad_reg_s cn58xx; struct cvmx_gmxx_bad_reg_s cn58xxp1; } cvmx_gmxx_bad_reg_t; /** * cvmx_gmx#_bist * * GMX_BIST = GMX BIST Results * */ typedef union { uint64_t u64; struct cvmx_gmxx_bist_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t status : 17; /**< BIST Results. HW sets a bit in BIST for for memory that fails - 0: gmx#.inb.fif_bnk0 - 1: gmx#.inb.fif_bnk1 - 2: gmx#.inb.fif_bnk2 - 3: gmx#.inb.fif_bnk3 - 4: gmx#.outb.fif.fif_bnk0 - 5: gmx#.outb.fif.fif_bnk1 - 6: gmx#.outb.fif.fif_bnk2 - 7: gmx#.outb.fif.fif_bnk3 - 8: gmx#.csr.gmi0.srf8x64m1_bist - 9: gmx#.csr.gmi1.srf8x64m1_bist - 10: gmx#.csr.gmi2.srf8x64m1_bist - 11: gmx#.csr.gmi3.srf8x64m1_bist - 12: gmx#.csr.drf20x80m1_bist - 13: gmx#.outb.stat.drf16x27m1_bist - 14: gmx#.outb.stat.drf40x64m1_bist - 15: gmx#.outb.ncb.drf16x76m1_bist - 16: gmx#.outb.fif.srf32x16m2_bist */ #else uint64_t status : 17; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_gmxx_bist_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t status : 10; /**< BIST Results. HW sets a bit in BIST for for memory that fails - 0: gmx#.inb.dpr512x78m4_bist - 1: gmx#.outb.fif.dpr512x71m4_bist - 2: gmx#.csr.gmi0.srf8x64m1_bist - 3: gmx#.csr.gmi1.srf8x64m1_bist - 4: gmx#.csr.gmi2.srf8x64m1_bist - 5: 0 - 6: gmx#.csr.drf20x80m1_bist - 7: gmx#.outb.stat.drf16x27m1_bist - 8: gmx#.outb.stat.drf40x64m1_bist - 9: 0 */ #else uint64_t status : 10; uint64_t reserved_10_63 : 54; #endif } cn30xx; struct cvmx_gmxx_bist_cn30xx cn31xx; struct cvmx_gmxx_bist_cn30xx cn38xx; struct cvmx_gmxx_bist_cn30xx cn38xxp2; struct cvmx_gmxx_bist_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t status : 12; /**< BIST Results. HW sets a bit in BIST for for memory that fails */ #else uint64_t status : 12; uint64_t reserved_12_63 : 52; #endif } cn50xx; struct cvmx_gmxx_bist_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t status : 16; /**< BIST Results. HW sets a bit in BIST for for memory that fails - 0: gmx#.inb.fif_bnk0 - 1: gmx#.inb.fif_bnk1 - 2: gmx#.inb.fif_bnk2 - 3: gmx#.inb.fif_bnk3 - 4: gmx#.outb.fif.fif_bnk0 - 5: gmx#.outb.fif.fif_bnk1 - 6: gmx#.outb.fif.fif_bnk2 - 7: gmx#.outb.fif.fif_bnk3 - 8: gmx#.csr.gmi0.srf8x64m1_bist - 9: gmx#.csr.gmi1.srf8x64m1_bist - 10: gmx#.csr.gmi2.srf8x64m1_bist - 11: gmx#.csr.gmi3.srf8x64m1_bist - 12: gmx#.csr.drf20x80m1_bist - 13: gmx#.outb.stat.drf16x27m1_bist - 14: gmx#.outb.stat.drf40x64m1_bist - 15: xgmii.tx.drf16x38m1_async_bist */ #else uint64_t status : 16; uint64_t reserved_16_63 : 48; #endif } cn52xx; struct cvmx_gmxx_bist_cn52xx cn52xxp1; struct cvmx_gmxx_bist_cn52xx cn56xx; struct cvmx_gmxx_bist_cn52xx cn56xxp1; struct cvmx_gmxx_bist_s cn58xx; struct cvmx_gmxx_bist_s cn58xxp1; } cvmx_gmxx_bist_t; /** * cvmx_gmx#_clk_en * * DO NOT DOCUMENT THIS REGISTER - IT IS NOT OFFICIAL * */ typedef union { uint64_t u64; struct cvmx_gmxx_clk_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t clk_en : 1; /**< Force the clock enables on */ #else uint64_t clk_en : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_gmxx_clk_en_s cn52xx; struct cvmx_gmxx_clk_en_s cn52xxp1; struct cvmx_gmxx_clk_en_s cn56xx; struct cvmx_gmxx_clk_en_s cn56xxp1; } cvmx_gmxx_clk_en_t; /** * cvmx_gmx#_hg2_control * * Notes: * The HiGig2 TX and RX enable would normally be both set together for HiGig2 messaging. However * setting just the TX or RX bit will result in only the HG2 message transmit or the receive * capability. * PHYS_EN and LOGL_EN bits when 1, allow link pause or back pressure to PKO as per received * HiGig2 message. When 0, link pause and back pressure to PKO in response to received messages * are disabled. * * GMX*_TX_XAUI_CTL[HG_EN] must be set to one(to enable HiGig) whenever either HG2TX_EN or HG2RX_EN * are set. * * GMX*_RX0_UDD_SKP[LEN] must be set to 16 (to select HiGig2) whenever either HG2TX_EN or HG2RX_EN * are set. * * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol when * GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by GMX*_TX_XAUI_CTL[HG_EN]=1 * and GMX*_RX0_UDD_SKP[LEN]=16.) The HW can only auto-generate backpressure via HiGig2 messages * (optionally, when HG2TX_EN=1) with the HiGig2 protocol. */ typedef union { uint64_t u64; struct cvmx_gmxx_hg2_control_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t hg2tx_en : 1; /**< Enable Transmission of HG2 phys and logl messages When set, also disables HW auto-generated (802.3 and CBFC) pause frames. (OCTEON cannot generate proper 802.3 or CBFC pause frames in HiGig2 mode.) */ uint64_t hg2rx_en : 1; /**< Enable extraction and processing of HG2 message packet from RX flow. Physical logical pause info is used to pause physical link, back pressure PKO HG2RX_EN must be set when HiGig2 messages are present in the receive stream. */ uint64_t phys_en : 1; /**< 1 bit physical link pause enable for recevied HiGig2 physical pause message */ uint64_t logl_en : 16; /**< 16 bit xof enables for recevied HiGig2 messages or CBFC packets */ #else uint64_t logl_en : 16; uint64_t phys_en : 1; uint64_t hg2rx_en : 1; uint64_t hg2tx_en : 1; uint64_t reserved_19_63 : 45; #endif } s; struct cvmx_gmxx_hg2_control_s cn52xx; struct cvmx_gmxx_hg2_control_s cn52xxp1; struct cvmx_gmxx_hg2_control_s cn56xx; } cvmx_gmxx_hg2_control_t; /** * cvmx_gmx#_inf_mode * * GMX_INF_MODE = Interface Mode * */ typedef union { uint64_t u64; struct cvmx_gmxx_inf_mode_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t speed : 2; /**< Interface Speed - 0: 1.250GHz - 1: 2.500GHz - 2: 3.125GHz - 3: 3.750GHz */ uint64_t reserved_6_7 : 2; uint64_t mode : 2; /**< Interface Electrical Operating Mode - 0: Disabled (PCIe) - 1: XAUI (IEEE 802.3-2005) - 2: SGMII (v1.8) - 3: PICMG3.1 */ uint64_t reserved_3_3 : 1; uint64_t p0mii : 1; /**< Port 0 Interface Mode - 0: Port 0 is RGMII - 1: Port 0 is MII */ uint64_t en : 1; /**< Interface Enable */ uint64_t type : 1; /**< Interface Mode - 0: RGMII Mode - 1: Spi4 Mode */ #else uint64_t type : 1; uint64_t en : 1; uint64_t p0mii : 1; uint64_t reserved_3_3 : 1; uint64_t mode : 2; uint64_t reserved_6_7 : 2; uint64_t speed : 2; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_gmxx_inf_mode_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t p0mii : 1; /**< Port 0 Interface Mode - 0: Port 0 is RGMII - 1: Port 0 is MII */ uint64_t en : 1; /**< Interface Enable Must be set to enable the packet interface. Should be enabled before any other requests to GMX including enabling port back pressure with IPD_CTL_STATUS[PBP_EN] */ uint64_t type : 1; /**< Port 1/2 Interface Mode - 0: Ports 1 and 2 are RGMII - 1: Port 1 is GMII/MII, Port 2 is unused GMII/MII is selected by GMX_PRT1_CFG[SPEED] */ #else uint64_t type : 1; uint64_t en : 1; uint64_t p0mii : 1; uint64_t reserved_3_63 : 61; #endif } cn30xx; struct cvmx_gmxx_inf_mode_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t en : 1; /**< Interface Enable Must be set to enable the packet interface. Should be enabled before any other requests to GMX including enabling port back pressure with IPD_CTL_STATUS[PBP_EN] */ uint64_t type : 1; /**< Interface Mode - 0: All three ports are RGMII ports - 1: prt0 is RGMII, prt1 is GMII, and prt2 is unused */ #else uint64_t type : 1; uint64_t en : 1; uint64_t reserved_2_63 : 62; #endif } cn31xx; struct cvmx_gmxx_inf_mode_cn31xx cn38xx; struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2; struct cvmx_gmxx_inf_mode_cn30xx cn50xx; struct cvmx_gmxx_inf_mode_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t speed : 2; /**< Interface Speed - 0: 1.250GHz - 1: 2.500GHz - 2: 3.125GHz - 3: 3.750GHz */ uint64_t reserved_6_7 : 2; uint64_t mode : 2; /**< Interface Electrical Operating Mode - 0: Disabled (PCIe) - 1: XAUI (IEEE 802.3-2005) - 2: SGMII (v1.8) - 3: PICMG3.1 */ uint64_t reserved_2_3 : 2; uint64_t en : 1; /**< Interface Enable Must be set to enable the packet interface. Should be enabled before any other requests to GMX including enabling port back pressure with IPD_CTL_STATUS[PBP_EN] */ uint64_t type : 1; /**< Interface Protocol Type - 0: SGMII/1000Base-X - 1: XAUI */ #else uint64_t type : 1; uint64_t en : 1; uint64_t reserved_2_3 : 2; uint64_t mode : 2; uint64_t reserved_6_7 : 2; uint64_t speed : 2; uint64_t reserved_10_63 : 54; #endif } cn52xx; struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1; struct cvmx_gmxx_inf_mode_cn52xx cn56xx; struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1; struct cvmx_gmxx_inf_mode_cn31xx cn58xx; struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1; } cvmx_gmxx_inf_mode_t; /** * cvmx_gmx#_nxa_adr * * GMX_NXA_ADR = NXA Port Address * */ typedef union { uint64_t u64; struct cvmx_gmxx_nxa_adr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t prt : 6; /**< Logged address for NXA exceptions The logged address will be from the first exception that caused the problem. NCB has higher priority than PKO and will win. */ #else uint64_t prt : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_gmxx_nxa_adr_s cn30xx; struct cvmx_gmxx_nxa_adr_s cn31xx; struct cvmx_gmxx_nxa_adr_s cn38xx; struct cvmx_gmxx_nxa_adr_s cn38xxp2; struct cvmx_gmxx_nxa_adr_s cn50xx; struct cvmx_gmxx_nxa_adr_s cn52xx; struct cvmx_gmxx_nxa_adr_s cn52xxp1; struct cvmx_gmxx_nxa_adr_s cn56xx; struct cvmx_gmxx_nxa_adr_s cn56xxp1; struct cvmx_gmxx_nxa_adr_s cn58xx; struct cvmx_gmxx_nxa_adr_s cn58xxp1; } cvmx_gmxx_nxa_adr_t; /** * cvmx_gmx#_prt#_cbfc_ctl * * ** HG2 message CSRs end * * * Notes: * XOFF for a specific port is XOFF = (PHYS_EN & PHYS_BP) | (LOGL_EN & LOGL_BP) * */ typedef union { uint64_t u64; struct cvmx_gmxx_prtx_cbfc_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t phys_en : 16; /**< Determines which ports will have physical backpressure pause packets. The value pplaced in the Class Enable Vector field of the CBFC pause packet will be PHYS_EN | LOGL_EN */ uint64_t logl_en : 16; /**< Determines which ports will have logical backpressure pause packets. The value pplaced in the Class Enable Vector field of the CBFC pause packet will be PHYS_EN | LOGL_EN */ uint64_t phys_bp : 16; /**< When RX_EN is set and the HW is backpressuring any ports (from either CBFC pause packets or the GMX_TX_OVR_BP[TX_PRT_BP] register) and all ports indiciated by PHYS_BP are backpressured, simulate physical backpressure by defering all packets on the transmitter. */ uint64_t reserved_4_15 : 12; uint64_t bck_en : 1; /**< Forward CBFC Pause information to BP block */ uint64_t drp_en : 1; /**< Drop Control CBFC Pause Frames */ uint64_t tx_en : 1; /**< When set, allow for CBFC Pause Packets Must be clear in HiGig2 mode i.e. when GMX_TX_XAUI_CTL[HG_EN]=1 and GMX_RX_UDD_SKP[SKIP]=16. */ uint64_t rx_en : 1; /**< When set, allow for CBFC Pause Packets Must be clear in HiGig2 mode i.e. when GMX_TX_XAUI_CTL[HG_EN]=1 and GMX_RX_UDD_SKP[SKIP]=16. */ #else uint64_t rx_en : 1; uint64_t tx_en : 1; uint64_t drp_en : 1; uint64_t bck_en : 1; uint64_t reserved_4_15 : 12; uint64_t phys_bp : 16; uint64_t logl_en : 16; uint64_t phys_en : 16; #endif } s; struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx; struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx; } cvmx_gmxx_prtx_cbfc_ctl_t; /** * cvmx_gmx#_prt#_cfg * * GMX_PRT_CFG = Port description * */ typedef union { uint64_t u64; struct cvmx_gmxx_prtx_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t tx_idle : 1; /**< TX Machine is idle */ uint64_t rx_idle : 1; /**< RX Machine is idle */ uint64_t reserved_9_11 : 3; uint64_t speed_msb : 1; /**< Link Speed MSB [SPEED_MSB:SPEED] 10 = 10Mbs operation 00 = 100Mbs operation 01 = 1000Mbs operation 11 = Reserved (SGMII/1000Base-X only) */ uint64_t reserved_4_7 : 4; uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation 0 = 512 bitimes (10/100Mbs operation) 1 = 4096 bitimes (1000Mbs operation) */ uint64_t duplex : 1; /**< Duplex 0 = Half Duplex (collisions/extentions/bursts) 1 = Full Duplex */ uint64_t speed : 1; /**< Link Speed 0 = 10/100Mbs operation (GMX_TX_CLK[CLK_CNT] > 1) 1 = 1000Mbs operation */ uint64_t en : 1; /**< Link Enable When EN is clear, packets will not be received or transmitted (including PAUSE and JAM packets). If EN is cleared while a packet is currently being received or transmitted, the packet will be allowed to complete before the bus is idled. On the RX side, subsequent packets in a burst will be ignored. */ #else uint64_t en : 1; uint64_t speed : 1; uint64_t duplex : 1; uint64_t slottime : 1; uint64_t reserved_4_7 : 4; uint64_t speed_msb : 1; uint64_t reserved_9_11 : 3; uint64_t rx_idle : 1; uint64_t tx_idle : 1; uint64_t reserved_14_63 : 50; #endif } s; struct cvmx_gmxx_prtx_cfg_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t slottime : 1; /**< Slot Time for Half-Duplex operation 0 = 512 bitimes (10/100Mbs operation) 1 = 4096 bitimes (1000Mbs operation) */ uint64_t duplex : 1; /**< Duplex 0 = Half Duplex (collisions/extentions/bursts) 1 = Full Duplex */ uint64_t speed : 1; /**< Link Speed 0 = 10/100Mbs operation (in RGMII mode, GMX_TX_CLK[CLK_CNT] > 1) (in MII mode, GMX_TX_CLK[CLK_CNT] == 1) 1 = 1000Mbs operation */ uint64_t en : 1; /**< Link Enable When EN is clear, packets will not be received or transmitted (including PAUSE and JAM packets). If EN is cleared while a packet is currently being received or transmitted, the packet will be allowed to complete before the bus is idled. On the RX side, subsequent packets in a burst will be ignored. */ #else uint64_t en : 1; uint64_t speed : 1; uint64_t duplex : 1; uint64_t slottime : 1; uint64_t reserved_4_63 : 60; #endif } cn30xx; struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx; struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx; struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2; struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx; struct cvmx_gmxx_prtx_cfg_s cn52xx; struct cvmx_gmxx_prtx_cfg_s cn52xxp1; struct cvmx_gmxx_prtx_cfg_s cn56xx; struct cvmx_gmxx_prtx_cfg_s cn56xxp1; struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx; struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1; } cvmx_gmxx_prtx_cfg_t; /** * cvmx_gmx#_rx#_adr_cam0 * * GMX_RX_ADR_CAM = Address Filtering Control * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_adr_cam0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to GMX_RX_ADR_CAM will not change the CSR when GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_gmxx_rxx_adr_cam0_s cn30xx; struct cvmx_gmxx_rxx_adr_cam0_s cn31xx; struct cvmx_gmxx_rxx_adr_cam0_s cn38xx; struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2; struct cvmx_gmxx_rxx_adr_cam0_s cn50xx; struct cvmx_gmxx_rxx_adr_cam0_s cn52xx; struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1; struct cvmx_gmxx_rxx_adr_cam0_s cn56xx; struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1; struct cvmx_gmxx_rxx_adr_cam0_s cn58xx; struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1; } cvmx_gmxx_rxx_adr_cam0_t; /** * cvmx_gmx#_rx#_adr_cam1 * * GMX_RX_ADR_CAM = Address Filtering Control * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_adr_cam1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to GMX_RX_ADR_CAM will not change the CSR when GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_gmxx_rxx_adr_cam1_s cn30xx; struct cvmx_gmxx_rxx_adr_cam1_s cn31xx; struct cvmx_gmxx_rxx_adr_cam1_s cn38xx; struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2; struct cvmx_gmxx_rxx_adr_cam1_s cn50xx; struct cvmx_gmxx_rxx_adr_cam1_s cn52xx; struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1; struct cvmx_gmxx_rxx_adr_cam1_s cn56xx; struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1; struct cvmx_gmxx_rxx_adr_cam1_s cn58xx; struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1; } cvmx_gmxx_rxx_adr_cam1_t; /** * cvmx_gmx#_rx#_adr_cam2 * * GMX_RX_ADR_CAM = Address Filtering Control * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_adr_cam2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to GMX_RX_ADR_CAM will not change the CSR when GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_gmxx_rxx_adr_cam2_s cn30xx; struct cvmx_gmxx_rxx_adr_cam2_s cn31xx; struct cvmx_gmxx_rxx_adr_cam2_s cn38xx; struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2; struct cvmx_gmxx_rxx_adr_cam2_s cn50xx; struct cvmx_gmxx_rxx_adr_cam2_s cn52xx; struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1; struct cvmx_gmxx_rxx_adr_cam2_s cn56xx; struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1; struct cvmx_gmxx_rxx_adr_cam2_s cn58xx; struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1; } cvmx_gmxx_rxx_adr_cam2_t; /** * cvmx_gmx#_rx#_adr_cam3 * * GMX_RX_ADR_CAM = Address Filtering Control * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_adr_cam3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to GMX_RX_ADR_CAM will not change the CSR when GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_gmxx_rxx_adr_cam3_s cn30xx; struct cvmx_gmxx_rxx_adr_cam3_s cn31xx; struct cvmx_gmxx_rxx_adr_cam3_s cn38xx; struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2; struct cvmx_gmxx_rxx_adr_cam3_s cn50xx; struct cvmx_gmxx_rxx_adr_cam3_s cn52xx; struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1; struct cvmx_gmxx_rxx_adr_cam3_s cn56xx; struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1; struct cvmx_gmxx_rxx_adr_cam3_s cn58xx; struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1; } cvmx_gmxx_rxx_adr_cam3_t; /** * cvmx_gmx#_rx#_adr_cam4 * * GMX_RX_ADR_CAM = Address Filtering Control * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_adr_cam4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to GMX_RX_ADR_CAM will not change the CSR when GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_gmxx_rxx_adr_cam4_s cn30xx; struct cvmx_gmxx_rxx_adr_cam4_s cn31xx; struct cvmx_gmxx_rxx_adr_cam4_s cn38xx; struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2; struct cvmx_gmxx_rxx_adr_cam4_s cn50xx; struct cvmx_gmxx_rxx_adr_cam4_s cn52xx; struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1; struct cvmx_gmxx_rxx_adr_cam4_s cn56xx; struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1; struct cvmx_gmxx_rxx_adr_cam4_s cn58xx; struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1; } cvmx_gmxx_rxx_adr_cam4_t; /** * cvmx_gmx#_rx#_adr_cam5 * * GMX_RX_ADR_CAM = Address Filtering Control * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_adr_cam5_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t adr : 64; /**< The DMAC address to match on Each entry contributes 8bits to one of 8 matchers Write transactions to GMX_RX_ADR_CAM will not change the CSR when GMX_PRT_CFG[EN] is enabled The CAM matches against unicst or multicst DMAC addresses. */ #else uint64_t adr : 64; #endif } s; struct cvmx_gmxx_rxx_adr_cam5_s cn30xx; struct cvmx_gmxx_rxx_adr_cam5_s cn31xx; struct cvmx_gmxx_rxx_adr_cam5_s cn38xx; struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2; struct cvmx_gmxx_rxx_adr_cam5_s cn50xx; struct cvmx_gmxx_rxx_adr_cam5_s cn52xx; struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1; struct cvmx_gmxx_rxx_adr_cam5_s cn56xx; struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1; struct cvmx_gmxx_rxx_adr_cam5_s cn58xx; struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1; } cvmx_gmxx_rxx_adr_cam5_t; /** * cvmx_gmx#_rx#_adr_cam_en * * GMX_RX_ADR_CAM_EN = Address Filtering Control Enable * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_adr_cam_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t en : 8; /**< CAM Entry Enables */ #else uint64_t en : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx; struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx; struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx; struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2; struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx; struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx; struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1; struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx; struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1; struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx; struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1; } cvmx_gmxx_rxx_adr_cam_en_t; /** * cvmx_gmx#_rx#_adr_ctl * * GMX_RX_ADR_CTL = Address Filtering Control * * * Notes: * * ALGORITHM * Here is some pseudo code that represents the address filter behavior. * * @verbatim * bool dmac_addr_filter(uint8 prt, uint48 dmac) [ * ASSERT(prt >= 0 && prt <= 3); * if (is_bcst(dmac)) // broadcast accept * return (GMX_RX[prt]_ADR_CTL[BCST] ? ACCEPT : REJECT); * if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 1) // multicast reject * return REJECT; * if (is_mcst(dmac) & GMX_RX[prt]_ADR_CTL[MCST] == 2) // multicast accept * return ACCEPT; * * cam_hit = 0; * * for (i=0; i<8; i++) [ * if (GMX_RX[prt]_ADR_CAM_EN[EN] == 0) * continue; * uint48 unswizzled_mac_adr = 0x0; * for (j=5; j>=0; j--) [ * unswizzled_mac_adr = (unswizzled_mac_adr << 8) | GMX_RX[prt]_ADR_CAM[j][ADR]; * ] * if (unswizzled_mac_adr == dmac) [ * cam_hit = 1; * break; * ] * ] * * if (cam_hit) * return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? ACCEPT : REJECT); * else * return (GMX_RX[prt]_ADR_CTL[CAM_MODE] ? REJECT : ACCEPT); * ] * @endverbatim */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_adr_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t cam_mode : 1; /**< Allow or deny DMAC address filter 0 = reject the packet on DMAC address match 1 = accept the packet on DMAC address match */ uint64_t mcst : 2; /**< Multicast Mode 0 = Use the Address Filter CAM 1 = Force reject all multicast packets 2 = Force accept all multicast packets 3 = Reserved */ uint64_t bcst : 1; /**< Accept All Broadcast Packets */ #else uint64_t bcst : 1; uint64_t mcst : 2; uint64_t cam_mode : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_gmxx_rxx_adr_ctl_s cn30xx; struct cvmx_gmxx_rxx_adr_ctl_s cn31xx; struct cvmx_gmxx_rxx_adr_ctl_s cn38xx; struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2; struct cvmx_gmxx_rxx_adr_ctl_s cn50xx; struct cvmx_gmxx_rxx_adr_ctl_s cn52xx; struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1; struct cvmx_gmxx_rxx_adr_ctl_s cn56xx; struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1; struct cvmx_gmxx_rxx_adr_ctl_s cn58xx; struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1; } cvmx_gmxx_rxx_adr_ctl_t; /** * cvmx_gmx#_rx#_decision * * GMX_RX_DECISION = The byte count to decide when to accept or filter a packet * * * Notes: * As each byte in a packet is received by GMX, the L2 byte count is compared * against the GMX_RX_DECISION[CNT]. The L2 byte count is the number of bytes * from the beginning of the L2 header (DMAC). In normal operation, the L2 * header begins after the PREAMBLE+SFD (GMX_RX_FRM_CTL[PRE_CHK]=1) and any * optional UDD skip data (GMX_RX_UDD_SKP[LEN]). * * When GMX_RX_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the * packet and would require UDD skip length to account for them. * * L2 Size * Port Mode =GMX_RX_DECISION bytes (default=24) * * Full Duplex accept packet apply filters * no filtering is applied accept packet based on DMAC and PAUSE packet filters * * Half Duplex drop packet apply filters * packet is unconditionally dropped accept packet based on DMAC * * where l2_size = MAX(0, total_packet_size - GMX_RX_UDD_SKP[LEN] - ((GMX_RX_FRM_CTL[PRE_CHK]==1)*8) */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_decision_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t cnt : 5; /**< The byte count to decide when to accept or filter a packet. */ #else uint64_t cnt : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_gmxx_rxx_decision_s cn30xx; struct cvmx_gmxx_rxx_decision_s cn31xx; struct cvmx_gmxx_rxx_decision_s cn38xx; struct cvmx_gmxx_rxx_decision_s cn38xxp2; struct cvmx_gmxx_rxx_decision_s cn50xx; struct cvmx_gmxx_rxx_decision_s cn52xx; struct cvmx_gmxx_rxx_decision_s cn52xxp1; struct cvmx_gmxx_rxx_decision_s cn56xx; struct cvmx_gmxx_rxx_decision_s cn56xxp1; struct cvmx_gmxx_rxx_decision_s cn58xx; struct cvmx_gmxx_rxx_decision_s cn58xxp1; } cvmx_gmxx_rxx_decision_t; /** * cvmx_gmx#_rx#_frm_chk * * GMX_RX_FRM_CHK = Which frame errors will set the ERR bit of the frame * * * Notes: * If GMX_RX_UDD_SKP[LEN] != 0, then LENERR will be forced to zero in HW. * * In XAUI mode prt0 is used for checking. */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_frm_chk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t carext : 1; /**< RGMII carrier extend error */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ #else uint64_t minerr : 1; uint64_t carext : 1; uint64_t maxerr : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t lenerr : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t niberr : 1; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_gmxx_rxx_frm_chk_s cn30xx; struct cvmx_gmxx_rxx_frm_chk_s cn31xx; struct cvmx_gmxx_rxx_frm_chk_s cn38xx; struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2; struct cvmx_gmxx_rxx_frm_chk_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ uint64_t reserved_6_6 : 1; uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t reserved_2_2 : 1; uint64_t carext : 1; /**< RGMII carrier extend error */ uint64_t reserved_0_0 : 1; #else uint64_t reserved_0_0 : 1; uint64_t carext : 1; uint64_t reserved_2_2 : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t reserved_6_6 : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t niberr : 1; uint64_t reserved_10_63 : 54; #endif } cn50xx; struct cvmx_gmxx_rxx_frm_chk_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with Data reception error */ uint64_t reserved_5_6 : 2; uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t reserved_2_2 : 1; uint64_t carext : 1; /**< Carrier extend error (SGMII/1000Base-X only) */ uint64_t reserved_0_0 : 1; #else uint64_t reserved_0_0 : 1; uint64_t carext : 1; uint64_t reserved_2_2 : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t reserved_5_6 : 2; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t reserved_9_63 : 55; #endif } cn52xx; struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1; struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx; struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1; struct cvmx_gmxx_rxx_frm_chk_s cn58xx; struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1; } cvmx_gmxx_rxx_frm_chk_t; /** * cvmx_gmx#_rx#_frm_ctl * * GMX_RX_FRM_CTL = Frame Control * * * Notes: * * PRE_CHK * When set, the RX state expects a typical frame consisting of * INTER_FRAME=>PREAMBLE(x7)=>SFD(x1)=>DAT. The state machine watches for * this exact sequence in order to recognize a valid frame and push frame * data into the Octane. There must be exactly 7 PREAMBLE cycles followed by * the single SFD cycle for the frame to be accepted. * * When a problem does occur within the PREAMBLE seqeunce, the frame is * marked as bad and not sent into the core. The GMX_RX_INT_REG[PCTERR] * interrupt is also raised. * * * PRE_STRP * When PRE_CHK is set (indicating that the PREAMBLE will be sent), PRE_STRP * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane * core as part of the packet. * * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet * size when checking against the MIN and MAX bounds. Furthermore, the bytes * are skipped when locating the start of the L2 header for DMAC and Control * frame recognition. * * * CTL_BCK/CTL_DRP * These bits control how the HW handles incoming PAUSE packets. Here are * the most common modes of operation: * CTL_BCK=1,CTL_DRP=1 - HW does it all * CTL_BCK=0,CTL_DRP=0 - SW sees all pause frames * CTL_BCK=0,CTL_DRP=1 - all pause frames are completely ignored * * These control bits should be set to CTL_BCK=0,CTL_DRP=0 in halfdup mode. * Since PAUSE packets only apply to fulldup operation, any PAUSE packet * would constitute an exception which should be handled by the processing * cores. PAUSE packets should not be forwarded. */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_frm_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks due to PARITAL packets In spi4 mode, all ports use prt0 for checking. */ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte regardless of the number of previous PREAMBLE nibbles. In this mode, PREAMBLE can be consumed by the HW so when PRE_ALIGN is set, PRE_FREE, PRE_STRP must be set for correct operation. PRE_CHK must be set to enable this and all PREAMBLE features. */ uint64_t pad_len : 1; /**< When set, disables the length check for non-min sized pkts with padding in the client data (PASS3 Only) */ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts (PASS2 only) */ uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict. 0 - 254 cycles of PREAMBLE followed by SFD */ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign Multicast address */ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 0=PREAMBLE+SFD is sent to core as part of frame 1=PREAMBLE+SFD is dropped */ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD to begin every frame. GMX checks that the PREAMBLE is sent correctly */ #else uint64_t pre_chk : 1; uint64_t pre_strp : 1; uint64_t ctl_drp : 1; uint64_t ctl_bck : 1; uint64_t ctl_mcst : 1; uint64_t ctl_smac : 1; uint64_t pre_free : 1; uint64_t vlan_len : 1; uint64_t pad_len : 1; uint64_t pre_align : 1; uint64_t null_dis : 1; uint64_t reserved_11_63 : 53; #endif } s; struct cvmx_gmxx_rxx_frm_ctl_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t pad_len : 1; /**< When set, disables the length check for non-min sized pkts with padding in the client data */ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */ uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking. 0-7 cycles of PREAMBLE followed by SFD (pass 1.0) 0-254 cycles of PREAMBLE followed by SFD (else) */ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign Multicast address */ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 0=PREAMBLE+SFD is sent to core as part of frame 1=PREAMBLE+SFD is dropped */ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD to begin every frame. GMX checks that the PREAMBLE is sent correctly */ #else uint64_t pre_chk : 1; uint64_t pre_strp : 1; uint64_t ctl_drp : 1; uint64_t ctl_bck : 1; uint64_t ctl_mcst : 1; uint64_t ctl_smac : 1; uint64_t pre_free : 1; uint64_t vlan_len : 1; uint64_t pad_len : 1; uint64_t reserved_9_63 : 55; #endif } cn30xx; struct cvmx_gmxx_rxx_frm_ctl_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */ uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking. 0 - 7 cycles of PREAMBLE followed by SFD (pass1.0) 0 - 254 cycles of PREAMBLE followed by SFD (else) */ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign Multicast address */ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 0=PREAMBLE+SFD is sent to core as part of frame 1=PREAMBLE+SFD is dropped */ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD to begin every frame. GMX checks that the PREAMBLE is sent correctly */ #else uint64_t pre_chk : 1; uint64_t pre_strp : 1; uint64_t ctl_drp : 1; uint64_t ctl_bck : 1; uint64_t ctl_mcst : 1; uint64_t ctl_smac : 1; uint64_t pre_free : 1; uint64_t vlan_len : 1; uint64_t reserved_8_63 : 56; #endif } cn31xx; struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx; struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2; struct cvmx_gmxx_rxx_frm_ctl_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t null_dis : 1; /**< When set, do not modify the MOD bits on NULL ticks due to PARITAL packets */ uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte regardless of the number of previous PREAMBLE nibbles. In this mode, PREAMBLE can be consumed by the HW so when PRE_ALIGN is set, PRE_FREE, PRE_STRP must be set for correct operation. PRE_CHK must be set to enable this and all PREAMBLE features. */ uint64_t reserved_7_8 : 2; uint64_t pre_free : 1; /**< Allows for less strict PREAMBLE checking. 0-254 cycles of PREAMBLE followed by SFD */ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign Multicast address */ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 0=PREAMBLE+SFD is sent to core as part of frame 1=PREAMBLE+SFD is dropped */ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD to begin every frame. GMX checks that the PREAMBLE is sent correctly */ #else uint64_t pre_chk : 1; uint64_t pre_strp : 1; uint64_t ctl_drp : 1; uint64_t ctl_bck : 1; uint64_t ctl_mcst : 1; uint64_t ctl_smac : 1; uint64_t pre_free : 1; uint64_t reserved_7_8 : 2; uint64_t pre_align : 1; uint64_t null_dis : 1; uint64_t reserved_11_63 : 53; #endif } cn50xx; struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx; struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1; struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx; struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t pre_align : 1; /**< When set, PREAMBLE parser aligns the the SFD byte regardless of the number of previous PREAMBLE nibbles. In this mode, PRE_STRP should be set to account for the variable nature of the PREAMBLE. PRE_CHK must be set to enable this and all PREAMBLE features. (SGMII at 10/100Mbs only) */ uint64_t reserved_7_8 : 2; uint64_t pre_free : 1; /**< When set, PREAMBLE checking is less strict. 0 - 254 cycles of PREAMBLE followed by SFD PRE_CHK must be set to enable this and all PREAMBLE features. (SGMII/1000Base-X only) */ uint64_t ctl_smac : 1; /**< Control Pause Frames can match station SMAC */ uint64_t ctl_mcst : 1; /**< Control Pause Frames can match globally assign Multicast address */ uint64_t ctl_bck : 1; /**< Forward pause information to TX block */ uint64_t ctl_drp : 1; /**< Drop Control Pause Frames */ uint64_t pre_strp : 1; /**< Strip off the preamble (when present) 0=PREAMBLE+SFD is sent to core as part of frame 1=PREAMBLE+SFD is dropped PRE_CHK must be set to enable this and all PREAMBLE features. */ uint64_t pre_chk : 1; /**< This port is configured to send PREAMBLE+SFD to begin every frame. GMX checks that the PREAMBLE is sent correctly. When GMX_TX_XAUI_CTL[HG_EN] is set, PRE_CHK must be zero. */ #else uint64_t pre_chk : 1; uint64_t pre_strp : 1; uint64_t ctl_drp : 1; uint64_t ctl_bck : 1; uint64_t ctl_mcst : 1; uint64_t ctl_smac : 1; uint64_t pre_free : 1; uint64_t reserved_7_8 : 2; uint64_t pre_align : 1; uint64_t reserved_10_63 : 54; #endif } cn56xxp1; struct cvmx_gmxx_rxx_frm_ctl_s cn58xx; struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1; } cvmx_gmxx_rxx_frm_ctl_t; /** * cvmx_gmx#_rx#_frm_max * * GMX_RX_FRM_MAX = Frame Max length * * * Notes: * In spi4 mode, all spi4 ports use prt0 for checking. * * When changing the LEN field, be sure that LEN does not exceed * GMX_RX_JABBER[CNT]. Failure to meet this constraint will cause packets that * are within the maximum length parameter to be rejected because they exceed * the GMX_RX_JABBER[CNT] limit. */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_frm_max_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t len : 16; /**< Byte count for Max-sized frame check Failing packets set the MAXERR interrupt and are optionally sent with opcode==MAXERR LEN =< GMX_RX_JABBER[CNT] */ #else uint64_t len : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_rxx_frm_max_s cn30xx; struct cvmx_gmxx_rxx_frm_max_s cn31xx; struct cvmx_gmxx_rxx_frm_max_s cn38xx; struct cvmx_gmxx_rxx_frm_max_s cn38xxp2; struct cvmx_gmxx_rxx_frm_max_s cn58xx; struct cvmx_gmxx_rxx_frm_max_s cn58xxp1; } cvmx_gmxx_rxx_frm_max_t; /** * cvmx_gmx#_rx#_frm_min * * GMX_RX_FRM_MIN = Frame Min length * * * Notes: * In spi4 mode, all spi4 ports use prt0 for checking. * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_frm_min_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t len : 16; /**< Byte count for Min-sized frame check Failing packets set the MINERR interrupt and are optionally sent with opcode==MINERR */ #else uint64_t len : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_rxx_frm_min_s cn30xx; struct cvmx_gmxx_rxx_frm_min_s cn31xx; struct cvmx_gmxx_rxx_frm_min_s cn38xx; struct cvmx_gmxx_rxx_frm_min_s cn38xxp2; struct cvmx_gmxx_rxx_frm_min_s cn58xx; struct cvmx_gmxx_rxx_frm_min_s cn58xxp1; } cvmx_gmxx_rxx_frm_min_t; /** * cvmx_gmx#_rx#_ifg * * GMX_RX_IFG = RX Min IFG * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_ifg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t ifg : 4; /**< Min IFG between packets used to determine IFGERR 1000Mbs, IFG==0.096us or 12 clks 100Mbs, IFG==0.96us or 24 clks 10Mbs, IFG==9.6us or 24 clks In order to simplify the programming model, IFG is doubled internally when GMX_PRT_CFG[SPEED]==0. */ #else uint64_t ifg : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_gmxx_rxx_ifg_s cn30xx; struct cvmx_gmxx_rxx_ifg_s cn31xx; struct cvmx_gmxx_rxx_ifg_s cn38xx; struct cvmx_gmxx_rxx_ifg_s cn38xxp2; struct cvmx_gmxx_rxx_ifg_s cn50xx; struct cvmx_gmxx_rxx_ifg_s cn52xx; struct cvmx_gmxx_rxx_ifg_s cn52xxp1; struct cvmx_gmxx_rxx_ifg_s cn56xx; struct cvmx_gmxx_rxx_ifg_s cn56xxp1; struct cvmx_gmxx_rxx_ifg_s cn58xx; struct cvmx_gmxx_rxx_ifg_s cn58xxp1; } cvmx_gmxx_rxx_ifg_t; /** * cvmx_gmx#_rx#_int_en * * GMX_RX_INT_EN = Interrupt Enable * * * Notes: * In XAUI mode prt0 is used for checking. * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_int_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */ uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */ uint64_t undat : 1; /**< Unexpected Data (XAUI Mode only) */ uint64_t uneop : 1; /**< Unexpected EOP (XAUI Mode only) */ uint64_t unsop : 1; /**< Unexpected SOP (XAUI Mode only) */ uint64_t bad_term : 1; /**< Frame is terminated by control character other than /T/. The error propagation control character /E/ will be included as part of the frame and does not cause a frame termination. (XAUI Mode only) */ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted (XAUI Mode only) */ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted (XAUI Mode only) */ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted (XAUI Mode only) */ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ uint64_t ifgerr : 1; /**< Interframe Gap Violation */ uint64_t coldet : 1; /**< Collision Detection */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ uint64_t rsverr : 1; /**< RGMII reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t carext : 1; /**< RGMII carrier extend error */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ #else uint64_t minerr : 1; uint64_t carext : 1; uint64_t maxerr : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t lenerr : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t niberr : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t phy_link : 1; uint64_t phy_spd : 1; uint64_t phy_dupx : 1; uint64_t pause_drp : 1; uint64_t loc_fault : 1; uint64_t rem_fault : 1; uint64_t bad_seq : 1; uint64_t bad_term : 1; uint64_t unsop : 1; uint64_t uneop : 1; uint64_t undat : 1; uint64_t hg2fld : 1; uint64_t hg2cc : 1; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_gmxx_rxx_int_en_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ uint64_t ifgerr : 1; /**< Interframe Gap Violation */ uint64_t coldet : 1; /**< Collision Detection */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ uint64_t rsverr : 1; /**< RGMII reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t carext : 1; /**< RGMII carrier extend error */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ #else uint64_t minerr : 1; uint64_t carext : 1; uint64_t maxerr : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t lenerr : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t niberr : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t phy_link : 1; uint64_t phy_spd : 1; uint64_t phy_dupx : 1; uint64_t reserved_19_63 : 45; #endif } cn30xx; struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx; struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx; struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2; struct cvmx_gmxx_rxx_int_en_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ uint64_t ifgerr : 1; /**< Interframe Gap Violation */ uint64_t coldet : 1; /**< Collision Detection */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ uint64_t rsverr : 1; /**< RGMII reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ uint64_t reserved_6_6 : 1; uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t reserved_2_2 : 1; uint64_t carext : 1; /**< RGMII carrier extend error */ uint64_t reserved_0_0 : 1; #else uint64_t reserved_0_0 : 1; uint64_t carext : 1; uint64_t reserved_2_2 : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t reserved_6_6 : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t niberr : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t phy_link : 1; uint64_t phy_spd : 1; uint64_t phy_dupx : 1; uint64_t pause_drp : 1; uint64_t reserved_20_63 : 44; #endif } cn50xx; struct cvmx_gmxx_rxx_int_en_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t hg2cc : 1; /**< HiGig2 CRC8 or Control char error interrupt enable */ uint64_t hg2fld : 1; /**< HiGig2 Bad field error interrupt enable */ uint64_t undat : 1; /**< Unexpected Data (XAUI Mode only) */ uint64_t uneop : 1; /**< Unexpected EOP (XAUI Mode only) */ uint64_t unsop : 1; /**< Unexpected SOP (XAUI Mode only) */ uint64_t bad_term : 1; /**< Frame is terminated by control character other than /T/. The error propagation control character /E/ will be included as part of the frame and does not cause a frame termination. (XAUI Mode only) */ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted (XAUI Mode only) */ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted (XAUI Mode only) */ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted (XAUI Mode only) */ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t reserved_16_18 : 3; uint64_t ifgerr : 1; /**< Interframe Gap Violation (SGMII/1000Base-X only) */ uint64_t coldet : 1; /**< Collision Detection (SGMII/1000Base-X half-duplex only) */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime (SGMII/1000Base-X only) */ uint64_t rsverr : 1; /**< Reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow (SGMII/1000Base-X only) */ uint64_t reserved_9_9 : 1; uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with Data reception error */ uint64_t reserved_5_6 : 2; uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t reserved_2_2 : 1; uint64_t carext : 1; /**< Carrier extend error (SGMII/1000Base-X only) */ uint64_t reserved_0_0 : 1; #else uint64_t reserved_0_0 : 1; uint64_t carext : 1; uint64_t reserved_2_2 : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t reserved_5_6 : 2; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t reserved_9_9 : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t reserved_16_18 : 3; uint64_t pause_drp : 1; uint64_t loc_fault : 1; uint64_t rem_fault : 1; uint64_t bad_seq : 1; uint64_t bad_term : 1; uint64_t unsop : 1; uint64_t uneop : 1; uint64_t undat : 1; uint64_t hg2fld : 1; uint64_t hg2cc : 1; uint64_t reserved_29_63 : 35; #endif } cn52xx; struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1; struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx; struct cvmx_gmxx_rxx_int_en_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_27_63 : 37; uint64_t undat : 1; /**< Unexpected Data (XAUI Mode only) */ uint64_t uneop : 1; /**< Unexpected EOP (XAUI Mode only) */ uint64_t unsop : 1; /**< Unexpected SOP (XAUI Mode only) */ uint64_t bad_term : 1; /**< Frame is terminated by control character other than /T/. The error propagation control character /E/ will be included as part of the frame and does not cause a frame termination. (XAUI Mode only) */ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted (XAUI Mode only) */ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted (XAUI Mode only) */ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted (XAUI Mode only) */ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t reserved_16_18 : 3; uint64_t ifgerr : 1; /**< Interframe Gap Violation (SGMII/1000Base-X only) */ uint64_t coldet : 1; /**< Collision Detection (SGMII/1000Base-X half-duplex only) */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime (SGMII/1000Base-X only) */ uint64_t rsverr : 1; /**< Reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow (SGMII/1000Base-X only) */ uint64_t reserved_9_9 : 1; uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with Data reception error */ uint64_t reserved_5_6 : 2; uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t reserved_2_2 : 1; uint64_t carext : 1; /**< Carrier extend error (SGMII/1000Base-X only) */ uint64_t reserved_0_0 : 1; #else uint64_t reserved_0_0 : 1; uint64_t carext : 1; uint64_t reserved_2_2 : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t reserved_5_6 : 2; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t reserved_9_9 : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t reserved_16_18 : 3; uint64_t pause_drp : 1; uint64_t loc_fault : 1; uint64_t rem_fault : 1; uint64_t bad_seq : 1; uint64_t bad_term : 1; uint64_t unsop : 1; uint64_t uneop : 1; uint64_t undat : 1; uint64_t reserved_27_63 : 37; #endif } cn56xxp1; struct cvmx_gmxx_rxx_int_en_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ uint64_t ifgerr : 1; /**< Interframe Gap Violation */ uint64_t coldet : 1; /**< Collision Detection */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ uint64_t rsverr : 1; /**< RGMII reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow */ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t carext : 1; /**< RGMII carrier extend error */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ #else uint64_t minerr : 1; uint64_t carext : 1; uint64_t maxerr : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t lenerr : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t niberr : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t phy_link : 1; uint64_t phy_spd : 1; uint64_t phy_dupx : 1; uint64_t pause_drp : 1; uint64_t reserved_20_63 : 44; #endif } cn58xx; struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1; } cvmx_gmxx_rxx_int_en_t; /** * cvmx_gmx#_rx#_int_reg * * GMX_RX_INT_REG = Interrupt Register * * * Notes: * (1) exceptions will only be raised to the control processor if the * corresponding bit in the GMX_RX_INT_EN register is set. * * (2) exception conditions 10:0 can also set the rcv/opcode in the received * packet's workQ entry. The GMX_RX_FRM_CHK register provides a bit mask * for configuring which conditions set the error. * * (3) in half duplex operation, the expectation is that collisions will appear * as either MINERR o r CAREXT errors. * * (4) JABBER - An RX Jabber error indicates that a packet was received which * is longer than the maximum allowed packet as defined by the * system. GMX will truncate the packet at the JABBER count. * Failure to do so could lead to system instabilty. * * (5) NIBERR - This error is illegal at 1000Mbs speeds * (GMX_RX_PRT_CFG[SPEED]==0) and will never assert. * * (6) MAXERR - for untagged frames, the total frame DA+SA+TL+DATA+PAD+FCS > * GMX_RX_FRM_MAX. For tagged frames, DA+SA+VLAN+TL+DATA+PAD+FCS * > GMX_RX_FRM_MAX + 4*VLAN_VAL + 4*VLAN_STACKED. * * (7) MINERR - total frame DA+SA+TL+DATA+PAD+FCS < GMX_RX_FRM_MIN. * * (8) ALNERR - Indicates that the packet received was not an integer number of * bytes. If FCS checking is enabled, ALNERR will only assert if * the FCS is bad. If FCS checking is disabled, ALNERR will * assert in all non-integer frame cases. * * (9) Collisions - Collisions can only occur in half-duplex mode. A collision * is assumed by the receiver when the slottime * (GMX_PRT_CFG[SLOTTIME]) is not satisfied. In 10/100 mode, * this will result in a frame < SLOTTIME. In 1000 mode, it * could result either in frame < SLOTTIME or a carrier extend * error with the SLOTTIME. These conditions are visible by... * * . transfer ended before slottime - COLDET * . carrier extend error - CAREXT * * (A) LENERR - Length errors occur when the received packet does not match the * length field. LENERR is only checked for packets between 64 * and 1500 bytes. For untagged frames, the length must exact * match. For tagged frames the length or length+4 must match. * * (B) PCTERR - checks that the frame transtions from PREAMBLE=>SFD=>DATA. * Does not check the number of PREAMBLE cycles. * * (C) OVRERR - Not to be included in the HRM * * OVRERR is an architectural assertion check internal to GMX to * make sure no assumption was violated. In a correctly operating * system, this interrupt can never fire. * * GMX has an internal arbiter which selects which of 4 ports to * buffer in the main RX FIFO. If we normally buffer 8 bytes, * then each port will typically push a tick every 8 cycles - if * the packet interface is going as fast as possible. If there * are four ports, they push every two cycles. So that's the * assumption. That the inbound module will always be able to * consume the tick before another is produced. If that doesn't * happen - that's when OVRERR will assert. * * (D) In XAUI mode prt0 is used for interrupt logging. */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_int_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error Set when either CRC8 error detected or when a Control Character is found in the message bytes after the K.SOM NOTE: HG2CC has higher priority than HG2FLD i.e. a HiGig2 message that results in HG2CC getting set, will never set HG2FLD. */ uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below 1) MSG_TYPE field not 6'b00_0000 i.e. it is not a FLOW CONTROL message, which is the only defined type for HiGig2 2) FWD_TYPE field not 2'b00 i.e. Link Level msg which is the only defined type for HiGig2 3) FC_OBJECT field is neither 4'b0000 for Physical Link nor 4'b0010 for Logical Link. Those are the only two defined types in HiGig2 */ uint64_t undat : 1; /**< Unexpected Data (XAUI Mode only) */ uint64_t uneop : 1; /**< Unexpected EOP (XAUI Mode only) */ uint64_t unsop : 1; /**< Unexpected SOP (XAUI Mode only) */ uint64_t bad_term : 1; /**< Frame is terminated by control character other than /T/. The error propagation control character /E/ will be included as part of the frame and does not cause a frame termination. (XAUI Mode only) */ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted (XAUI Mode only) */ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted (XAUI Mode only) */ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted (XAUI Mode only) */ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ uint64_t ifgerr : 1; /**< Interframe Gap Violation Does not necessarily indicate a failure */ uint64_t coldet : 1; /**< Collision Detection */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ uint64_t rsverr : 1; /**< RGMII reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow This interrupt should never assert */ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t carext : 1; /**< RGMII carrier extend error */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ #else uint64_t minerr : 1; uint64_t carext : 1; uint64_t maxerr : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t lenerr : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t niberr : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t phy_link : 1; uint64_t phy_spd : 1; uint64_t phy_dupx : 1; uint64_t pause_drp : 1; uint64_t loc_fault : 1; uint64_t rem_fault : 1; uint64_t bad_seq : 1; uint64_t bad_term : 1; uint64_t unsop : 1; uint64_t uneop : 1; uint64_t undat : 1; uint64_t hg2fld : 1; uint64_t hg2cc : 1; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_gmxx_rxx_int_reg_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ uint64_t ifgerr : 1; /**< Interframe Gap Violation Does not necessarily indicate a failure */ uint64_t coldet : 1; /**< Collision Detection */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ uint64_t rsverr : 1; /**< RGMII reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow This interrupt should never assert */ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t carext : 1; /**< RGMII carrier extend error */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ #else uint64_t minerr : 1; uint64_t carext : 1; uint64_t maxerr : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t lenerr : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t niberr : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t phy_link : 1; uint64_t phy_spd : 1; uint64_t phy_dupx : 1; uint64_t reserved_19_63 : 45; #endif } cn30xx; struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx; struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx; struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2; struct cvmx_gmxx_rxx_int_reg_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ uint64_t ifgerr : 1; /**< Interframe Gap Violation Does not necessarily indicate a failure */ uint64_t coldet : 1; /**< Collision Detection */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ uint64_t rsverr : 1; /**< RGMII reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow This interrupt should never assert */ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ uint64_t reserved_6_6 : 1; uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t reserved_2_2 : 1; uint64_t carext : 1; /**< RGMII carrier extend error */ uint64_t reserved_0_0 : 1; #else uint64_t reserved_0_0 : 1; uint64_t carext : 1; uint64_t reserved_2_2 : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t reserved_6_6 : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t niberr : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t phy_link : 1; uint64_t phy_spd : 1; uint64_t phy_dupx : 1; uint64_t pause_drp : 1; uint64_t reserved_20_63 : 44; #endif } cn50xx; struct cvmx_gmxx_rxx_int_reg_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t hg2cc : 1; /**< HiGig2 received message CRC or Control char error Set when either CRC8 error detected or when a Control Character is found in the message bytes after the K.SOM NOTE: HG2CC has higher priority than HG2FLD i.e. a HiGig2 message that results in HG2CC getting set, will never set HG2FLD. */ uint64_t hg2fld : 1; /**< HiGig2 received message field error, as below 1) MSG_TYPE field not 6'b00_0000 i.e. it is not a FLOW CONTROL message, which is the only defined type for HiGig2 2) FWD_TYPE field not 2'b00 i.e. Link Level msg which is the only defined type for HiGig2 3) FC_OBJECT field is neither 4'b0000 for Physical Link nor 4'b0010 for Logical Link. Those are the only two defined types in HiGig2 */ uint64_t undat : 1; /**< Unexpected Data (XAUI Mode only) */ uint64_t uneop : 1; /**< Unexpected EOP (XAUI Mode only) */ uint64_t unsop : 1; /**< Unexpected SOP (XAUI Mode only) */ uint64_t bad_term : 1; /**< Frame is terminated by control character other than /T/. The error propagation control character /E/ will be included as part of the frame and does not cause a frame termination. (XAUI Mode only) */ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted (XAUI Mode only) */ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted (XAUI Mode only) */ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted (XAUI Mode only) */ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t reserved_16_18 : 3; uint64_t ifgerr : 1; /**< Interframe Gap Violation Does not necessarily indicate a failure (SGMII/1000Base-X only) */ uint64_t coldet : 1; /**< Collision Detection (SGMII/1000Base-X half-duplex only) */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime (SGMII/1000Base-X only) */ uint64_t rsverr : 1; /**< Reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol In XAUI mode, the column of data that was bad will be logged in GMX_RX_XAUI_BAD_COL */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow This interrupt should never assert (SGMII/1000Base-X only) */ uint64_t reserved_9_9 : 1; uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with Data reception error */ uint64_t reserved_5_6 : 2; uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t reserved_2_2 : 1; uint64_t carext : 1; /**< Carrier extend error (SGMII/1000Base-X only) */ uint64_t reserved_0_0 : 1; #else uint64_t reserved_0_0 : 1; uint64_t carext : 1; uint64_t reserved_2_2 : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t reserved_5_6 : 2; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t reserved_9_9 : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t reserved_16_18 : 3; uint64_t pause_drp : 1; uint64_t loc_fault : 1; uint64_t rem_fault : 1; uint64_t bad_seq : 1; uint64_t bad_term : 1; uint64_t unsop : 1; uint64_t uneop : 1; uint64_t undat : 1; uint64_t hg2fld : 1; uint64_t hg2cc : 1; uint64_t reserved_29_63 : 35; #endif } cn52xx; struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1; struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx; struct cvmx_gmxx_rxx_int_reg_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_27_63 : 37; uint64_t undat : 1; /**< Unexpected Data (XAUI Mode only) */ uint64_t uneop : 1; /**< Unexpected EOP (XAUI Mode only) */ uint64_t unsop : 1; /**< Unexpected SOP (XAUI Mode only) */ uint64_t bad_term : 1; /**< Frame is terminated by control character other than /T/. The error propagation control character /E/ will be included as part of the frame and does not cause a frame termination. (XAUI Mode only) */ uint64_t bad_seq : 1; /**< Reserved Sequence Deteted (XAUI Mode only) */ uint64_t rem_fault : 1; /**< Remote Fault Sequence Deteted (XAUI Mode only) */ uint64_t loc_fault : 1; /**< Local Fault Sequence Deteted (XAUI Mode only) */ uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t reserved_16_18 : 3; uint64_t ifgerr : 1; /**< Interframe Gap Violation Does not necessarily indicate a failure (SGMII/1000Base-X only) */ uint64_t coldet : 1; /**< Collision Detection (SGMII/1000Base-X half-duplex only) */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime (SGMII/1000Base-X only) */ uint64_t rsverr : 1; /**< Reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol In XAUI mode, the column of data that was bad will be logged in GMX_RX_XAUI_BAD_COL */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow This interrupt should never assert (SGMII/1000Base-X only) */ uint64_t reserved_9_9 : 1; uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with Data reception error */ uint64_t reserved_5_6 : 2; uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t reserved_2_2 : 1; uint64_t carext : 1; /**< Carrier extend error (SGMII/1000Base-X only) */ uint64_t reserved_0_0 : 1; #else uint64_t reserved_0_0 : 1; uint64_t carext : 1; uint64_t reserved_2_2 : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t reserved_5_6 : 2; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t reserved_9_9 : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t reserved_16_18 : 3; uint64_t pause_drp : 1; uint64_t loc_fault : 1; uint64_t rem_fault : 1; uint64_t bad_seq : 1; uint64_t bad_term : 1; uint64_t unsop : 1; uint64_t uneop : 1; uint64_t undat : 1; uint64_t reserved_27_63 : 37; #endif } cn56xxp1; struct cvmx_gmxx_rxx_int_reg_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t pause_drp : 1; /**< Pause packet was dropped due to full GMX RX FIFO */ uint64_t phy_dupx : 1; /**< Change in the RMGII inbound LinkDuplex */ uint64_t phy_spd : 1; /**< Change in the RMGII inbound LinkSpeed */ uint64_t phy_link : 1; /**< Change in the RMGII inbound LinkStatus */ uint64_t ifgerr : 1; /**< Interframe Gap Violation Does not necessarily indicate a failure */ uint64_t coldet : 1; /**< Collision Detection */ uint64_t falerr : 1; /**< False carrier error or extend error after slottime */ uint64_t rsverr : 1; /**< RGMII reserved opcodes */ uint64_t pcterr : 1; /**< Bad Preamble / Protocol */ uint64_t ovrerr : 1; /**< Internal Data Aggregation Overflow This interrupt should never assert */ uint64_t niberr : 1; /**< Nibble error (hi_nibble != lo_nibble) */ uint64_t skperr : 1; /**< Skipper error */ uint64_t rcverr : 1; /**< Frame was received with RMGII Data reception error */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t alnerr : 1; /**< Frame was received with an alignment error */ uint64_t fcserr : 1; /**< Frame was received with FCS/CRC error */ uint64_t jabber : 1; /**< Frame was received with length > sys_length */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t carext : 1; /**< RGMII carrier extend error */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ #else uint64_t minerr : 1; uint64_t carext : 1; uint64_t maxerr : 1; uint64_t jabber : 1; uint64_t fcserr : 1; uint64_t alnerr : 1; uint64_t lenerr : 1; uint64_t rcverr : 1; uint64_t skperr : 1; uint64_t niberr : 1; uint64_t ovrerr : 1; uint64_t pcterr : 1; uint64_t rsverr : 1; uint64_t falerr : 1; uint64_t coldet : 1; uint64_t ifgerr : 1; uint64_t phy_link : 1; uint64_t phy_spd : 1; uint64_t phy_dupx : 1; uint64_t pause_drp : 1; uint64_t reserved_20_63 : 44; #endif } cn58xx; struct cvmx_gmxx_rxx_int_reg_cn58xx cn58xxp1; } cvmx_gmxx_rxx_int_reg_t; /** * cvmx_gmx#_rx#_jabber * * GMX_RX_JABBER = The max size packet after which GMX will truncate * * * Notes: * CNT must be 8-byte aligned such that CNT[2:0] == 0 * * The packet that will be sent to the packet input logic will have an * additionl 8 bytes if GMX_RX_FRM_CTL[PRE_CHK] is set and * GMX_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is * defined as... * * max_sized_packet = GMX_RX_JABBER[CNT]+((GMX_RX_FRM_CTL[PRE_CHK] & !GMX_RX_FRM_CTL[PRE_STRP])*8) * * In XAUI mode prt0 is used for checking. */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_jabber_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt : 16; /**< Byte count for jabber check Failing packets set the JABBER interrupt and are optionally sent with opcode==JABBER GMX will truncate the packet to CNT bytes CNT >= GMX_RX_FRM_MAX[LEN] */ #else uint64_t cnt : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_rxx_jabber_s cn30xx; struct cvmx_gmxx_rxx_jabber_s cn31xx; struct cvmx_gmxx_rxx_jabber_s cn38xx; struct cvmx_gmxx_rxx_jabber_s cn38xxp2; struct cvmx_gmxx_rxx_jabber_s cn50xx; struct cvmx_gmxx_rxx_jabber_s cn52xx; struct cvmx_gmxx_rxx_jabber_s cn52xxp1; struct cvmx_gmxx_rxx_jabber_s cn56xx; struct cvmx_gmxx_rxx_jabber_s cn56xxp1; struct cvmx_gmxx_rxx_jabber_s cn58xx; struct cvmx_gmxx_rxx_jabber_s cn58xxp1; } cvmx_gmxx_rxx_jabber_t; /** * cvmx_gmx#_rx#_pause_drop_time * * GMX_RX_PAUSE_DROP_TIME = The TIME field in a PAUSE Packet which was dropped due to GMX RX FIFO full condition * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_pause_drop_time_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t status : 16; /**< Time extracted from the dropped PAUSE packet */ #else uint64_t status : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx; struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx; struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1; struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx; struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1; struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx; struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1; } cvmx_gmxx_rxx_pause_drop_time_t; /** * cvmx_gmx#_rx#_rx_inbnd * * GMX_RX_INBND = RGMII InBand Link Status * * * Notes: * These fields are only valid if the attached PHY is operating in RGMII mode * and supports the optional in-band status (see section 3.4.1 of the RGMII * specification, version 1.3 for more information). */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_rx_inbnd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t duplex : 1; /**< RGMII Inbound LinkDuplex 0=half-duplex 1=full-duplex */ uint64_t speed : 2; /**< RGMII Inbound LinkSpeed 00=2.5MHz 01=25MHz 10=125MHz 11=Reserved */ uint64_t status : 1; /**< RGMII Inbound LinkStatus 0=down 1=up */ #else uint64_t status : 1; uint64_t speed : 2; uint64_t duplex : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_gmxx_rxx_rx_inbnd_s cn30xx; struct cvmx_gmxx_rxx_rx_inbnd_s cn31xx; struct cvmx_gmxx_rxx_rx_inbnd_s cn38xx; struct cvmx_gmxx_rxx_rx_inbnd_s cn38xxp2; struct cvmx_gmxx_rxx_rx_inbnd_s cn50xx; struct cvmx_gmxx_rxx_rx_inbnd_s cn58xx; struct cvmx_gmxx_rxx_rx_inbnd_s cn58xxp1; } cvmx_gmxx_rxx_rx_inbnd_t; /** * cvmx_gmx#_rx#_stats_ctl * * GMX_RX_STATS_CTL = RX Stats Control register * */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_stats_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t rd_clr : 1; /**< RX Stats registers will clear on reads */ #else uint64_t rd_clr : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_gmxx_rxx_stats_ctl_s cn30xx; struct cvmx_gmxx_rxx_stats_ctl_s cn31xx; struct cvmx_gmxx_rxx_stats_ctl_s cn38xx; struct cvmx_gmxx_rxx_stats_ctl_s cn38xxp2; struct cvmx_gmxx_rxx_stats_ctl_s cn50xx; struct cvmx_gmxx_rxx_stats_ctl_s cn52xx; struct cvmx_gmxx_rxx_stats_ctl_s cn52xxp1; struct cvmx_gmxx_rxx_stats_ctl_s cn56xx; struct cvmx_gmxx_rxx_stats_ctl_s cn56xxp1; struct cvmx_gmxx_rxx_stats_ctl_s cn58xx; struct cvmx_gmxx_rxx_stats_ctl_s cn58xxp1; } cvmx_gmxx_rxx_stats_ctl_t; /** * cvmx_gmx#_rx#_stats_octs * * Notes: * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_stats_octs_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t cnt : 48; /**< Octet count of received good packets */ #else uint64_t cnt : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_gmxx_rxx_stats_octs_s cn30xx; struct cvmx_gmxx_rxx_stats_octs_s cn31xx; struct cvmx_gmxx_rxx_stats_octs_s cn38xx; struct cvmx_gmxx_rxx_stats_octs_s cn38xxp2; struct cvmx_gmxx_rxx_stats_octs_s cn50xx; struct cvmx_gmxx_rxx_stats_octs_s cn52xx; struct cvmx_gmxx_rxx_stats_octs_s cn52xxp1; struct cvmx_gmxx_rxx_stats_octs_s cn56xx; struct cvmx_gmxx_rxx_stats_octs_s cn56xxp1; struct cvmx_gmxx_rxx_stats_octs_s cn58xx; struct cvmx_gmxx_rxx_stats_octs_s cn58xxp1; } cvmx_gmxx_rxx_stats_octs_t; /** * cvmx_gmx#_rx#_stats_octs_ctl * * Notes: * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_stats_octs_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t cnt : 48; /**< Octet count of received pause packets */ #else uint64_t cnt : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx; struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx; struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx; struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2; struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx; struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx; struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1; struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx; struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1; struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx; struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1; } cvmx_gmxx_rxx_stats_octs_ctl_t; /** * cvmx_gmx#_rx#_stats_octs_dmac * * Notes: * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_stats_octs_dmac_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t cnt : 48; /**< Octet count of filtered dmac packets */ #else uint64_t cnt : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx; struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx; struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx; struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2; struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx; struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx; struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1; struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx; struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1; struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx; struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1; } cvmx_gmxx_rxx_stats_octs_dmac_t; /** * cvmx_gmx#_rx#_stats_octs_drp * * Notes: * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_stats_octs_drp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t cnt : 48; /**< Octet count of dropped packets */ #else uint64_t cnt : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx; struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx; struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx; struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2; struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx; struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx; struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1; struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx; struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1; struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx; struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1; } cvmx_gmxx_rxx_stats_octs_drp_t; /** * cvmx_gmx#_rx#_stats_pkts * * GMX_RX_STATS_PKTS * * Count of good received packets - packets that are not recognized as PAUSE * packets, dropped due the DMAC filter, dropped due FIFO full status, or * have any other OPCODE (FCS, Length, etc). * * Notes: * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_stats_pkts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Count of received good packets */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_gmxx_rxx_stats_pkts_s cn30xx; struct cvmx_gmxx_rxx_stats_pkts_s cn31xx; struct cvmx_gmxx_rxx_stats_pkts_s cn38xx; struct cvmx_gmxx_rxx_stats_pkts_s cn38xxp2; struct cvmx_gmxx_rxx_stats_pkts_s cn50xx; struct cvmx_gmxx_rxx_stats_pkts_s cn52xx; struct cvmx_gmxx_rxx_stats_pkts_s cn52xxp1; struct cvmx_gmxx_rxx_stats_pkts_s cn56xx; struct cvmx_gmxx_rxx_stats_pkts_s cn56xxp1; struct cvmx_gmxx_rxx_stats_pkts_s cn58xx; struct cvmx_gmxx_rxx_stats_pkts_s cn58xxp1; } cvmx_gmxx_rxx_stats_pkts_t; /** * cvmx_gmx#_rx#_stats_pkts_bad * * GMX_RX_STATS_PKTS_BAD * * Count of all packets received with some error that were not dropped * either due to the dmac filter or lack of room in the receive FIFO. * * Notes: * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_stats_pkts_bad_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Count of bad packets */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx; struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx; struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx; struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2; struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx; struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx; struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1; struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx; struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1; struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx; struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1; } cvmx_gmxx_rxx_stats_pkts_bad_t; /** * cvmx_gmx#_rx#_stats_pkts_ctl * * GMX_RX_STATS_PKTS_CTL * * Count of all packets received that were recognized as Flow Control or * PAUSE packets. PAUSE packets with any kind of error are counted in * GMX_RX_STATS_PKTS_BAD. Pause packets can be optionally dropped or * forwarded based on the GMX_RX_FRM_CTL[CTL_DRP] bit. This count * increments regardless of whether the packet is dropped. Pause packets * will never be counted in GMX_RX_STATS_PKTS. Packets dropped due the dmac * filter will be counted in GMX_RX_STATS_PKTS_DMAC and not here. * * Notes: * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_stats_pkts_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Count of received pause packets */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx; struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx; struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx; struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2; struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx; struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx; struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1; struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx; struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1; struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx; struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1; } cvmx_gmxx_rxx_stats_pkts_ctl_t; /** * cvmx_gmx#_rx#_stats_pkts_dmac * * GMX_RX_STATS_PKTS_DMAC * * Count of all packets received that were dropped by the dmac filter. * Packets that match the DMAC will be dropped and counted here regardless * of if they were bad packets. These packets will never be counted in * GMX_RX_STATS_PKTS. * * Some packets that were not able to satisify the DECISION_CNT may not * actually be dropped by Octeon, but they will be counted here as if they * were dropped. * * Notes: * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_stats_pkts_dmac_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Count of filtered dmac packets */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx; struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx; struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx; struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2; struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx; struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx; struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1; struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx; struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1; struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx; struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1; } cvmx_gmxx_rxx_stats_pkts_dmac_t; /** * cvmx_gmx#_rx#_stats_pkts_drp * * GMX_RX_STATS_PKTS_DRP * * Count of all packets received that were dropped due to a full receive * FIFO. This counts good and bad packets received - all packets dropped by * the FIFO. It does not count packets dropped by the dmac or pause packet * filters. * * Notes: * - Cleared either by a write (of any value) or a read when GMX_RX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_stats_pkts_drp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Count of dropped packets */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx; struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx; struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx; struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2; struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx; struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx; struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1; struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx; struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1; struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx; struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1; } cvmx_gmxx_rxx_stats_pkts_drp_t; /** * cvmx_gmx#_rx#_udd_skp * * GMX_RX_UDD_SKP = Amount of User-defined data before the start of the L2 data * * * Notes: * (1) The skip bytes are part of the packet and will be sent down the NCB * packet interface and will be handled by PKI. * * (2) The system can determine if the UDD bytes are included in the FCS check * by using the FCSSEL field - if the FCS check is enabled. * * (3) Assume that the preamble/sfd is always at the start of the frame - even * before UDD bytes. In most cases, there will be no preamble in these * cases since it will be packet interface in direct communication to * another packet interface (MAC to MAC) without a PHY involved. * * (4) We can still do address filtering and control packet filtering is the * user desires. * * (5) UDD_SKP must be 0 in half-duplex operation unless * GMX_RX_FRM_CTL[PRE_CHK] is clear. If GMX_RX_FRM_CTL[PRE_CHK] is clear, * then UDD_SKP will normally be 8. * * (6) In all cases, the UDD bytes will be sent down the packet interface as * part of the packet. The UDD bytes are never stripped from the actual * packet. * * (7) If LEN != 0, then GMX_RX_FRM_CHK[LENERR] will be disabled and GMX_RX_INT_REG[LENERR] will be zero */ typedef union { uint64_t u64; struct cvmx_gmxx_rxx_udd_skp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t fcssel : 1; /**< Include the skip bytes in the FCS calculation 0 = all skip bytes are included in FCS 1 = the skip bytes are not included in FCS */ uint64_t reserved_7_7 : 1; uint64_t len : 7; /**< Amount of User-defined data before the start of the L2 data. Zero means L2 comes first. Max value is 64. */ #else uint64_t len : 7; uint64_t reserved_7_7 : 1; uint64_t fcssel : 1; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_gmxx_rxx_udd_skp_s cn30xx; struct cvmx_gmxx_rxx_udd_skp_s cn31xx; struct cvmx_gmxx_rxx_udd_skp_s cn38xx; struct cvmx_gmxx_rxx_udd_skp_s cn38xxp2; struct cvmx_gmxx_rxx_udd_skp_s cn50xx; struct cvmx_gmxx_rxx_udd_skp_s cn52xx; struct cvmx_gmxx_rxx_udd_skp_s cn52xxp1; struct cvmx_gmxx_rxx_udd_skp_s cn56xx; struct cvmx_gmxx_rxx_udd_skp_s cn56xxp1; struct cvmx_gmxx_rxx_udd_skp_s cn58xx; struct cvmx_gmxx_rxx_udd_skp_s cn58xxp1; } cvmx_gmxx_rxx_udd_skp_t; /** * cvmx_gmx#_rx_bp_drop# * * GMX_RX_BP_DROP = FIFO mark for packet drop * * * Notes: * The actual watermark is dynamic with respect to the GMX_RX_PRTS * register. The GMX_RX_PRTS controls the depth of the port's * FIFO so as ports are added or removed, the drop point may change. * * In XAUI mode prt0 is used for checking. */ typedef union { uint64_t u64; struct cvmx_gmxx_rx_bp_dropx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t mark : 6; /**< Number of 8B ticks to reserve in the RX FIFO. When the FIFO exceeds this count, packets will be dropped and not buffered. MARK should typically be programmed to ports+1. Failure to program correctly can lead to system instability. Reset value for RGMII mode = 2 Reset value for Spi4 mode = 17 */ #else uint64_t mark : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_gmxx_rx_bp_dropx_s cn30xx; struct cvmx_gmxx_rx_bp_dropx_s cn31xx; struct cvmx_gmxx_rx_bp_dropx_s cn38xx; struct cvmx_gmxx_rx_bp_dropx_s cn38xxp2; struct cvmx_gmxx_rx_bp_dropx_s cn50xx; struct cvmx_gmxx_rx_bp_dropx_s cn52xx; struct cvmx_gmxx_rx_bp_dropx_s cn52xxp1; struct cvmx_gmxx_rx_bp_dropx_s cn56xx; struct cvmx_gmxx_rx_bp_dropx_s cn56xxp1; struct cvmx_gmxx_rx_bp_dropx_s cn58xx; struct cvmx_gmxx_rx_bp_dropx_s cn58xxp1; } cvmx_gmxx_rx_bp_dropx_t; /** * cvmx_gmx#_rx_bp_off# * * GMX_RX_BP_OFF = Lowater mark for packet drop * * * Notes: * In XAUI mode, prt0 is used for checking. * */ typedef union { uint64_t u64; struct cvmx_gmxx_rx_bp_offx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t mark : 6; /**< Water mark (8B ticks) to deassert backpressure */ #else uint64_t mark : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_gmxx_rx_bp_offx_s cn30xx; struct cvmx_gmxx_rx_bp_offx_s cn31xx; struct cvmx_gmxx_rx_bp_offx_s cn38xx; struct cvmx_gmxx_rx_bp_offx_s cn38xxp2; struct cvmx_gmxx_rx_bp_offx_s cn50xx; struct cvmx_gmxx_rx_bp_offx_s cn52xx; struct cvmx_gmxx_rx_bp_offx_s cn52xxp1; struct cvmx_gmxx_rx_bp_offx_s cn56xx; struct cvmx_gmxx_rx_bp_offx_s cn56xxp1; struct cvmx_gmxx_rx_bp_offx_s cn58xx; struct cvmx_gmxx_rx_bp_offx_s cn58xxp1; } cvmx_gmxx_rx_bp_offx_t; /** * cvmx_gmx#_rx_bp_on# * * GMX_RX_BP_ON = Hiwater mark for port/interface backpressure * * * Notes: * In XAUI mode, prt0 is used for checking. * */ typedef union { uint64_t u64; struct cvmx_gmxx_rx_bp_onx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t mark : 9; /**< Hiwater mark (8B ticks) for backpressure. In RGMII mode, the backpressure is given per port. In Spi4 mode, the backpressure is for the entire interface. GMX_RX_BP_ON must satisfy BP_OFF <= BP_ON < (FIFO_SIZE - BP_DROP) The reset value is half the FIFO. Reset value RGMII mode = 0x40 (512bytes) Reset value Spi4 mode = 0x100 (2048bytes) A value of zero will immediately assert back pressure. */ #else uint64_t mark : 9; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_gmxx_rx_bp_onx_s cn30xx; struct cvmx_gmxx_rx_bp_onx_s cn31xx; struct cvmx_gmxx_rx_bp_onx_s cn38xx; struct cvmx_gmxx_rx_bp_onx_s cn38xxp2; struct cvmx_gmxx_rx_bp_onx_s cn50xx; struct cvmx_gmxx_rx_bp_onx_s cn52xx; struct cvmx_gmxx_rx_bp_onx_s cn52xxp1; struct cvmx_gmxx_rx_bp_onx_s cn56xx; struct cvmx_gmxx_rx_bp_onx_s cn56xxp1; struct cvmx_gmxx_rx_bp_onx_s cn58xx; struct cvmx_gmxx_rx_bp_onx_s cn58xxp1; } cvmx_gmxx_rx_bp_onx_t; /** * cvmx_gmx#_rx_hg2_status * * ** HG2 message CSRs * */ typedef union { uint64_t u64; struct cvmx_gmxx_rx_hg2_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t phtim2go : 16; /**< Physical time to go for removal of physical link pause. Initial value from received HiGig2 msg pkt Non-zero only when physical back pressure active */ uint64_t xof : 16; /**< 16 bit xof back pressure vector from HiGig2 msg pkt or from CBFC packets. Non-zero only when logical back pressure is active All bits will be 0 when LGTIM2GO=0 */ uint64_t lgtim2go : 16; /**< Logical packet flow back pressure time remaining Initial value set from xof time field of HiGig2 message packet received or a function of the enabled and current timers for CBFC packets. Non-zero only when logical back pressure is active */ #else uint64_t lgtim2go : 16; uint64_t xof : 16; uint64_t phtim2go : 16; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_gmxx_rx_hg2_status_s cn52xx; struct cvmx_gmxx_rx_hg2_status_s cn52xxp1; struct cvmx_gmxx_rx_hg2_status_s cn56xx; } cvmx_gmxx_rx_hg2_status_t; /** * cvmx_gmx#_rx_pass_en * * GMX_RX_PASS_EN = Packet pass through mode enable * * When both Octane ports are running in Spi4 mode, packets can be directly * passed from one SPX interface to the other without being processed by the * core or PP's. The register has one bit for each port to enable the pass * through feature. * * Notes: * (1) Can only be used in dual Spi4 configs * * (2) The mapped pass through output port cannot be the destination port for * any Octane core traffic. */ typedef union { uint64_t u64; struct cvmx_gmxx_rx_pass_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t en : 16; /**< Which ports to configure in pass through mode */ #else uint64_t en : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_rx_pass_en_s cn38xx; struct cvmx_gmxx_rx_pass_en_s cn38xxp2; struct cvmx_gmxx_rx_pass_en_s cn58xx; struct cvmx_gmxx_rx_pass_en_s cn58xxp1; } cvmx_gmxx_rx_pass_en_t; /** * cvmx_gmx#_rx_pass_map# * * GMX_RX_PASS_MAP = Packet pass through port map * */ typedef union { uint64_t u64; struct cvmx_gmxx_rx_pass_mapx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t dprt : 4; /**< Destination port to map Spi pass through traffic */ #else uint64_t dprt : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_gmxx_rx_pass_mapx_s cn38xx; struct cvmx_gmxx_rx_pass_mapx_s cn38xxp2; struct cvmx_gmxx_rx_pass_mapx_s cn58xx; struct cvmx_gmxx_rx_pass_mapx_s cn58xxp1; } cvmx_gmxx_rx_pass_mapx_t; /** * cvmx_gmx#_rx_prt_info * * GMX_RX_PRT_INFO = Report the RX status for port * * * Notes: * In XAUI mode, only the lsb (corresponding to port0) of DROP and COMMIT are used. * */ typedef union { uint64_t u64; struct cvmx_gmxx_rx_prt_info_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t drop : 16; /**< Per port indication that data was dropped (PASS3 only) */ uint64_t commit : 16; /**< Per port indication that SOP was accepted (PASS3 only) */ #else uint64_t commit : 16; uint64_t drop : 16; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_gmxx_rx_prt_info_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t drop : 3; /**< Per port indication that data was dropped */ uint64_t reserved_3_15 : 13; uint64_t commit : 3; /**< Per port indication that SOP was accepted */ #else uint64_t commit : 3; uint64_t reserved_3_15 : 13; uint64_t drop : 3; uint64_t reserved_19_63 : 45; #endif } cn30xx; struct cvmx_gmxx_rx_prt_info_cn30xx cn31xx; struct cvmx_gmxx_rx_prt_info_s cn38xx; struct cvmx_gmxx_rx_prt_info_cn30xx cn50xx; struct cvmx_gmxx_rx_prt_info_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t drop : 4; /**< Per port indication that data was dropped */ uint64_t reserved_4_15 : 12; uint64_t commit : 4; /**< Per port indication that SOP was accepted */ #else uint64_t commit : 4; uint64_t reserved_4_15 : 12; uint64_t drop : 4; uint64_t reserved_20_63 : 44; #endif } cn52xx; struct cvmx_gmxx_rx_prt_info_cn52xx cn52xxp1; struct cvmx_gmxx_rx_prt_info_cn52xx cn56xx; struct cvmx_gmxx_rx_prt_info_cn52xx cn56xxp1; struct cvmx_gmxx_rx_prt_info_s cn58xx; struct cvmx_gmxx_rx_prt_info_s cn58xxp1; } cvmx_gmxx_rx_prt_info_t; /** * cvmx_gmx#_rx_prts * * GMX_RX_PRTS = Number of FIFOs to carve the RX buffer into * * * Notes: * GMX_RX_PRTS is unused in XAUI mode since the RX buffer is always unified. * */ typedef union { uint64_t u64; struct cvmx_gmxx_rx_prts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t prts : 3; /**< In RGMII mode, the RX buffer can be carved into several logical buffers depending on the number or implemented ports. 0 or 1 port = 512ticks / 4096bytes 2 ports = 256ticks / 2048bytes 3 or 4 ports = 128ticks / 1024bytes */ #else uint64_t prts : 3; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_gmxx_rx_prts_s cn30xx; struct cvmx_gmxx_rx_prts_s cn31xx; struct cvmx_gmxx_rx_prts_s cn38xx; struct cvmx_gmxx_rx_prts_s cn38xxp2; struct cvmx_gmxx_rx_prts_s cn50xx; struct cvmx_gmxx_rx_prts_s cn52xx; struct cvmx_gmxx_rx_prts_s cn52xxp1; struct cvmx_gmxx_rx_prts_s cn56xx; struct cvmx_gmxx_rx_prts_s cn56xxp1; struct cvmx_gmxx_rx_prts_s cn58xx; struct cvmx_gmxx_rx_prts_s cn58xxp1; } cvmx_gmxx_rx_prts_t; /** * cvmx_gmx#_rx_tx_status * * GMX_RX_TX_STATUS = GMX RX/TX Status * */ typedef union { uint64_t u64; struct cvmx_gmxx_rx_tx_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t tx : 3; /**< Transmit data since last read */ uint64_t reserved_3_3 : 1; uint64_t rx : 3; /**< Receive data since last read */ #else uint64_t rx : 3; uint64_t reserved_3_3 : 1; uint64_t tx : 3; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_gmxx_rx_tx_status_s cn30xx; struct cvmx_gmxx_rx_tx_status_s cn31xx; struct cvmx_gmxx_rx_tx_status_s cn50xx; } cvmx_gmxx_rx_tx_status_t; /** * cvmx_gmx#_rx_xaui_bad_col */ typedef union { uint64_t u64; struct cvmx_gmxx_rx_xaui_bad_col_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t val : 1; /**< Set when GMX_RX_INT_REG[PCTERR] is set. (XAUI mode only) */ uint64_t state : 3; /**< When GMX_RX_INT_REG[PCTERR] is set, STATE will conatin the receive state at the time of the error. (XAUI mode only) */ uint64_t lane_rxc : 4; /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXC will conatin the XAUI column at the time of the error. (XAUI mode only) */ uint64_t lane_rxd : 32; /**< When GMX_RX_INT_REG[PCTERR] is set, LANE_RXD will conatin the XAUI column at the time of the error. (XAUI mode only) */ #else uint64_t lane_rxd : 32; uint64_t lane_rxc : 4; uint64_t state : 3; uint64_t val : 1; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_gmxx_rx_xaui_bad_col_s cn52xx; struct cvmx_gmxx_rx_xaui_bad_col_s cn52xxp1; struct cvmx_gmxx_rx_xaui_bad_col_s cn56xx; struct cvmx_gmxx_rx_xaui_bad_col_s cn56xxp1; } cvmx_gmxx_rx_xaui_bad_col_t; /** * cvmx_gmx#_rx_xaui_ctl */ typedef union { uint64_t u64; struct cvmx_gmxx_rx_xaui_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t status : 2; /**< Link Status 0=Link OK 1=Local Fault 2=Remote Fault 3=Reserved (XAUI mode only) */ #else uint64_t status : 2; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_gmxx_rx_xaui_ctl_s cn52xx; struct cvmx_gmxx_rx_xaui_ctl_s cn52xxp1; struct cvmx_gmxx_rx_xaui_ctl_s cn56xx; struct cvmx_gmxx_rx_xaui_ctl_s cn56xxp1; } cvmx_gmxx_rx_xaui_ctl_t; /** * cvmx_gmx#_smac# * * GMX_SMAC = Packet SMAC * */ typedef union { uint64_t u64; struct cvmx_gmxx_smacx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t smac : 48; /**< The SMAC field is used for generating and accepting Control Pause packets */ #else uint64_t smac : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_gmxx_smacx_s cn30xx; struct cvmx_gmxx_smacx_s cn31xx; struct cvmx_gmxx_smacx_s cn38xx; struct cvmx_gmxx_smacx_s cn38xxp2; struct cvmx_gmxx_smacx_s cn50xx; struct cvmx_gmxx_smacx_s cn52xx; struct cvmx_gmxx_smacx_s cn52xxp1; struct cvmx_gmxx_smacx_s cn56xx; struct cvmx_gmxx_smacx_s cn56xxp1; struct cvmx_gmxx_smacx_s cn58xx; struct cvmx_gmxx_smacx_s cn58xxp1; } cvmx_gmxx_smacx_t; /** * cvmx_gmx#_stat_bp * * GMX_STAT_BP = Number of cycles that the TX/Stats block has help up operation * */ typedef union { uint64_t u64; struct cvmx_gmxx_stat_bp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t bp : 1; /**< Current BP state */ uint64_t cnt : 16; /**< Number of cycles that BP has been asserted Saturating counter */ #else uint64_t cnt : 16; uint64_t bp : 1; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_gmxx_stat_bp_s cn30xx; struct cvmx_gmxx_stat_bp_s cn31xx; struct cvmx_gmxx_stat_bp_s cn38xx; struct cvmx_gmxx_stat_bp_s cn38xxp2; struct cvmx_gmxx_stat_bp_s cn50xx; struct cvmx_gmxx_stat_bp_s cn52xx; struct cvmx_gmxx_stat_bp_s cn52xxp1; struct cvmx_gmxx_stat_bp_s cn56xx; struct cvmx_gmxx_stat_bp_s cn56xxp1; struct cvmx_gmxx_stat_bp_s cn58xx; struct cvmx_gmxx_stat_bp_s cn58xxp1; } cvmx_gmxx_stat_bp_t; /** * cvmx_gmx#_tx#_append * * GMX_TX_APPEND = Packet TX Append Control * */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_append_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t force_fcs : 1; /**< Append the Ethernet FCS on each pause packet When FCS is clear This implies that FCS==0 and PAD==0 (PASS2 only) */ uint64_t fcs : 1; /**< Append the Ethernet FCS on each packet */ uint64_t pad : 1; /**< Append PAD bytes such that min sized */ uint64_t preamble : 1; /**< Prepend the Ethernet preamble on each transfer */ #else uint64_t preamble : 1; uint64_t pad : 1; uint64_t fcs : 1; uint64_t force_fcs : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_gmxx_txx_append_s cn30xx; struct cvmx_gmxx_txx_append_s cn31xx; struct cvmx_gmxx_txx_append_s cn38xx; struct cvmx_gmxx_txx_append_s cn38xxp2; struct cvmx_gmxx_txx_append_s cn50xx; struct cvmx_gmxx_txx_append_s cn52xx; struct cvmx_gmxx_txx_append_s cn52xxp1; struct cvmx_gmxx_txx_append_s cn56xx; struct cvmx_gmxx_txx_append_s cn56xxp1; struct cvmx_gmxx_txx_append_s cn58xx; struct cvmx_gmxx_txx_append_s cn58xxp1; } cvmx_gmxx_txx_append_t; /** * cvmx_gmx#_tx#_burst * * GMX_TX_BURST = Packet TX Burst Counter * */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_burst_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t burst : 16; /**< Burst (refer to 802.3 to set correctly) 10/100Mbs: 0x0 1000Mbs: 0x2000 */ #else uint64_t burst : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_txx_burst_s cn30xx; struct cvmx_gmxx_txx_burst_s cn31xx; struct cvmx_gmxx_txx_burst_s cn38xx; struct cvmx_gmxx_txx_burst_s cn38xxp2; struct cvmx_gmxx_txx_burst_s cn50xx; struct cvmx_gmxx_txx_burst_s cn52xx; struct cvmx_gmxx_txx_burst_s cn52xxp1; struct cvmx_gmxx_txx_burst_s cn56xx; struct cvmx_gmxx_txx_burst_s cn56xxp1; struct cvmx_gmxx_txx_burst_s cn58xx; struct cvmx_gmxx_txx_burst_s cn58xxp1; } cvmx_gmxx_txx_burst_t; /** * cvmx_gmx#_tx#_cbfc_xoff */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_cbfc_xoff_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t xoff : 16; /**< Which ports to backpressure Do not write in HiGig2 mode i.e. when GMX_TX_XAUI_CTL[HG_EN]=1 and GMX_RX_UDD_SKP[SKIP]=16. */ #else uint64_t xoff : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_txx_cbfc_xoff_s cn52xx; struct cvmx_gmxx_txx_cbfc_xoff_s cn56xx; } cvmx_gmxx_txx_cbfc_xoff_t; /** * cvmx_gmx#_tx#_cbfc_xon */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_cbfc_xon_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t xon : 16; /**< Which ports to stop backpressure Do not write in HiGig2 mode i.e. when GMX_TX_XAUI_CTL[HG_EN]=1 and GMX_RX_UDD_SKP[SKIP]=16. */ #else uint64_t xon : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_txx_cbfc_xon_s cn52xx; struct cvmx_gmxx_txx_cbfc_xon_s cn56xx; } cvmx_gmxx_txx_cbfc_xon_t; /** * cvmx_gmx#_tx#_clk * * Per Port * * * GMX_TX_CLK = RGMII TX Clock Generation Register * * Notes: * Programming Restrictions: * (1) In RGMII mode, if GMX_PRT_CFG[SPEED]==0, then CLK_CNT must be > 1. * (2) In MII mode, CLK_CNT == 1 * (3) In RGMII or GMII mode, if CLK_CNT==0, Octeon will not generate a tx clock. * * RGMII Example: * Given a 125MHz PLL reference clock... * CLK_CNT == 1 ==> 125.0MHz TXC clock period (8ns* 1) * CLK_CNT == 5 ==> 25.0MHz TXC clock period (8ns* 5) * CLK_CNT == 50 ==> 2.5MHz TXC clock period (8ns*50) */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_clk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t clk_cnt : 6; /**< Controls the RGMII TXC frequency When PLL is used, TXC(phase) = spi4_tx_pll_ref_clk(period)/2*CLK_CNT When PLL bypass is used, TXC(phase) = spi4_tx_pll_ref_clk(period)*2*CLK_CNT NOTE: CLK_CNT==0 will not generate any clock if CLK_CNT > 1 if GMX_PRT_CFG[SPEED]==0 */ #else uint64_t clk_cnt : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_gmxx_txx_clk_s cn30xx; struct cvmx_gmxx_txx_clk_s cn31xx; struct cvmx_gmxx_txx_clk_s cn38xx; struct cvmx_gmxx_txx_clk_s cn38xxp2; struct cvmx_gmxx_txx_clk_s cn50xx; struct cvmx_gmxx_txx_clk_s cn58xx; struct cvmx_gmxx_txx_clk_s cn58xxp1; } cvmx_gmxx_txx_clk_t; /** * cvmx_gmx#_tx#_ctl * * GMX_TX_CTL = TX Control register * */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t xsdef_en : 1; /**< Enables the excessive deferral check for stats and interrupts (PASS2 only) */ uint64_t xscol_en : 1; /**< Enables the excessive collision check for stats and interrupts (PASS2 only) */ #else uint64_t xscol_en : 1; uint64_t xsdef_en : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_gmxx_txx_ctl_s cn30xx; struct cvmx_gmxx_txx_ctl_s cn31xx; struct cvmx_gmxx_txx_ctl_s cn38xx; struct cvmx_gmxx_txx_ctl_s cn38xxp2; struct cvmx_gmxx_txx_ctl_s cn50xx; struct cvmx_gmxx_txx_ctl_s cn52xx; struct cvmx_gmxx_txx_ctl_s cn52xxp1; struct cvmx_gmxx_txx_ctl_s cn56xx; struct cvmx_gmxx_txx_ctl_s cn56xxp1; struct cvmx_gmxx_txx_ctl_s cn58xx; struct cvmx_gmxx_txx_ctl_s cn58xxp1; } cvmx_gmxx_txx_ctl_t; /** * cvmx_gmx#_tx#_min_pkt * * GMX_TX_MIN_PKT = Packet TX Min Size Packet (PAD upto min size) * */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_min_pkt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t min_size : 8; /**< Min frame in bytes before the FCS is applied Padding is only appened when GMX_TX_APPEND[PAD] for the coresponding RGMII port is set. */ #else uint64_t min_size : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_gmxx_txx_min_pkt_s cn30xx; struct cvmx_gmxx_txx_min_pkt_s cn31xx; struct cvmx_gmxx_txx_min_pkt_s cn38xx; struct cvmx_gmxx_txx_min_pkt_s cn38xxp2; struct cvmx_gmxx_txx_min_pkt_s cn50xx; struct cvmx_gmxx_txx_min_pkt_s cn52xx; struct cvmx_gmxx_txx_min_pkt_s cn52xxp1; struct cvmx_gmxx_txx_min_pkt_s cn56xx; struct cvmx_gmxx_txx_min_pkt_s cn56xxp1; struct cvmx_gmxx_txx_min_pkt_s cn58xx; struct cvmx_gmxx_txx_min_pkt_s cn58xxp1; } cvmx_gmxx_txx_min_pkt_t; /** * cvmx_gmx#_tx#_pause_pkt_interval * * GMX_TX_PAUSE_PKT_INTERVAL = Packet TX Pause Packet transmission interval - how often PAUSE packets will be sent * * * Notes: * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system * designer. It is suggested that TIME be much greater than INTERVAL and * GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE * count and then when the backpressure condition is lifted, a PAUSE packet * with TIME==0 will be sent indicating that Octane is ready for additional * data. * * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is * suggested that TIME and INTERVAL are programmed such that they satisify the * following rule... * * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size) * * where largest_pkt_size is that largest packet that the system can send * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size * of the PAUSE packet (normally 64B). */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_pause_pkt_interval_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t interval : 16; /**< Arbitrate for a pause packet every (INTERVAL*512) bit-times. Normally, 0 < INTERVAL < GMX_TX_PAUSE_PKT_TIME INTERVAL=0, will only send a single PAUSE packet for each backpressure event */ #else uint64_t interval : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx; struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx; struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx; struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2; struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx; struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx; struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1; struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx; struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1; struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx; struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1; } cvmx_gmxx_txx_pause_pkt_interval_t; /** * cvmx_gmx#_tx#_pause_pkt_time * * GMX_TX_PAUSE_PKT_TIME = Packet TX Pause Packet pause_time field * * * Notes: * Choosing proper values of GMX_TX_PAUSE_PKT_TIME[TIME] and * GMX_TX_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system * designer. It is suggested that TIME be much greater than INTERVAL and * GMX_TX_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE * count and then when the backpressure condition is lifted, a PAUSE packet * with TIME==0 will be sent indicating that Octane is ready for additional * data. * * If the system chooses to not set GMX_TX_PAUSE_ZERO[SEND], then it is * suggested that TIME and INTERVAL are programmed such that they satisify the * following rule... * * INTERVAL <= TIME - (largest_pkt_size + IFG + pause_pkt_size) * * where largest_pkt_size is that largest packet that the system can send * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size * of the PAUSE packet (normally 64B). */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_pause_pkt_time_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t time : 16; /**< The pause_time field placed is outbnd pause pkts pause_time is in 512 bit-times Normally, TIME > GMX_TX_PAUSE_PKT_INTERVAL */ #else uint64_t time : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx; struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx; struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx; struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2; struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx; struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx; struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1; struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx; struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1; struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx; struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1; } cvmx_gmxx_txx_pause_pkt_time_t; /** * cvmx_gmx#_tx#_pause_togo * * GMX_TX_PAUSE_TOGO = Packet TX Amount of time remaining to backpressure * */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_pause_togo_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t msg_time : 16; /**< Amount of time remaining to backpressure From the higig2 physical message pause timer (only valid on port0) */ uint64_t time : 16; /**< Amount of time remaining to backpressure */ #else uint64_t time : 16; uint64_t msg_time : 16; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_gmxx_txx_pause_togo_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t time : 16; /**< Amount of time remaining to backpressure */ #else uint64_t time : 16; uint64_t reserved_16_63 : 48; #endif } cn30xx; struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx; struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx; struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2; struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx; struct cvmx_gmxx_txx_pause_togo_s cn52xx; struct cvmx_gmxx_txx_pause_togo_s cn52xxp1; struct cvmx_gmxx_txx_pause_togo_s cn56xx; struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1; struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx; struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1; } cvmx_gmxx_txx_pause_togo_t; /** * cvmx_gmx#_tx#_pause_zero * * GMX_TX_PAUSE_ZERO = Packet TX Amount of time remaining to backpressure * */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_pause_zero_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t send : 1; /**< When backpressure condition clear, send PAUSE packet with pause_time of zero to enable the channel */ #else uint64_t send : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_gmxx_txx_pause_zero_s cn30xx; struct cvmx_gmxx_txx_pause_zero_s cn31xx; struct cvmx_gmxx_txx_pause_zero_s cn38xx; struct cvmx_gmxx_txx_pause_zero_s cn38xxp2; struct cvmx_gmxx_txx_pause_zero_s cn50xx; struct cvmx_gmxx_txx_pause_zero_s cn52xx; struct cvmx_gmxx_txx_pause_zero_s cn52xxp1; struct cvmx_gmxx_txx_pause_zero_s cn56xx; struct cvmx_gmxx_txx_pause_zero_s cn56xxp1; struct cvmx_gmxx_txx_pause_zero_s cn58xx; struct cvmx_gmxx_txx_pause_zero_s cn58xxp1; } cvmx_gmxx_txx_pause_zero_t; /** * cvmx_gmx#_tx#_sgmii_ctl */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_sgmii_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t align : 1; /**< Align the transmission to even cycles 0 = Data can be sent on any cycle Possible to for the TX PCS machine to drop first byte of preamble 1 = Data will only be sent on even cycles There will be no loss of data (SGMII/1000Base-X only) */ #else uint64_t align : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_gmxx_txx_sgmii_ctl_s cn52xx; struct cvmx_gmxx_txx_sgmii_ctl_s cn52xxp1; struct cvmx_gmxx_txx_sgmii_ctl_s cn56xx; struct cvmx_gmxx_txx_sgmii_ctl_s cn56xxp1; } cvmx_gmxx_txx_sgmii_ctl_t; /** * cvmx_gmx#_tx#_slot * * GMX_TX_SLOT = Packet TX Slottime Counter * */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_slot_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t slot : 10; /**< Slottime (refer to 802.3 to set correctly) 10/100Mbs: 0x40 1000Mbs: 0x200 */ #else uint64_t slot : 10; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_gmxx_txx_slot_s cn30xx; struct cvmx_gmxx_txx_slot_s cn31xx; struct cvmx_gmxx_txx_slot_s cn38xx; struct cvmx_gmxx_txx_slot_s cn38xxp2; struct cvmx_gmxx_txx_slot_s cn50xx; struct cvmx_gmxx_txx_slot_s cn52xx; struct cvmx_gmxx_txx_slot_s cn52xxp1; struct cvmx_gmxx_txx_slot_s cn56xx; struct cvmx_gmxx_txx_slot_s cn56xxp1; struct cvmx_gmxx_txx_slot_s cn58xx; struct cvmx_gmxx_txx_slot_s cn58xxp1; } cvmx_gmxx_txx_slot_t; /** * cvmx_gmx#_tx#_soft_pause * * GMX_TX_SOFT_PAUSE = Packet TX Software Pause * */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_soft_pause_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t time : 16; /**< Back off the TX bus for (TIME*512) bit-times */ #else uint64_t time : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_txx_soft_pause_s cn30xx; struct cvmx_gmxx_txx_soft_pause_s cn31xx; struct cvmx_gmxx_txx_soft_pause_s cn38xx; struct cvmx_gmxx_txx_soft_pause_s cn38xxp2; struct cvmx_gmxx_txx_soft_pause_s cn50xx; struct cvmx_gmxx_txx_soft_pause_s cn52xx; struct cvmx_gmxx_txx_soft_pause_s cn52xxp1; struct cvmx_gmxx_txx_soft_pause_s cn56xx; struct cvmx_gmxx_txx_soft_pause_s cn56xxp1; struct cvmx_gmxx_txx_soft_pause_s cn58xx; struct cvmx_gmxx_txx_soft_pause_s cn58xxp1; } cvmx_gmxx_txx_soft_pause_t; /** * cvmx_gmx#_tx#_stat0 * * GMX_TX_STAT0 = GMX_TX_STATS_XSDEF / GMX_TX_STATS_XSCOL * * * Notes: * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_stat0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t xsdef : 32; /**< Number of packets dropped (never successfully sent) due to excessive deferal */ uint64_t xscol : 32; /**< Number of packets dropped (never successfully sent) due to excessive collision. Defined by GMX_TX_COL_ATTEMPT[LIMIT]. */ #else uint64_t xscol : 32; uint64_t xsdef : 32; #endif } s; struct cvmx_gmxx_txx_stat0_s cn30xx; struct cvmx_gmxx_txx_stat0_s cn31xx; struct cvmx_gmxx_txx_stat0_s cn38xx; struct cvmx_gmxx_txx_stat0_s cn38xxp2; struct cvmx_gmxx_txx_stat0_s cn50xx; struct cvmx_gmxx_txx_stat0_s cn52xx; struct cvmx_gmxx_txx_stat0_s cn52xxp1; struct cvmx_gmxx_txx_stat0_s cn56xx; struct cvmx_gmxx_txx_stat0_s cn56xxp1; struct cvmx_gmxx_txx_stat0_s cn58xx; struct cvmx_gmxx_txx_stat0_s cn58xxp1; } cvmx_gmxx_txx_stat0_t; /** * cvmx_gmx#_tx#_stat1 * * GMX_TX_STAT1 = GMX_TX_STATS_SCOL / GMX_TX_STATS_MCOL * * * Notes: * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_stat1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t scol : 32; /**< Number of packets sent with a single collision */ uint64_t mcol : 32; /**< Number of packets sent with multiple collisions but < GMX_TX_COL_ATTEMPT[LIMIT]. */ #else uint64_t mcol : 32; uint64_t scol : 32; #endif } s; struct cvmx_gmxx_txx_stat1_s cn30xx; struct cvmx_gmxx_txx_stat1_s cn31xx; struct cvmx_gmxx_txx_stat1_s cn38xx; struct cvmx_gmxx_txx_stat1_s cn38xxp2; struct cvmx_gmxx_txx_stat1_s cn50xx; struct cvmx_gmxx_txx_stat1_s cn52xx; struct cvmx_gmxx_txx_stat1_s cn52xxp1; struct cvmx_gmxx_txx_stat1_s cn56xx; struct cvmx_gmxx_txx_stat1_s cn56xxp1; struct cvmx_gmxx_txx_stat1_s cn58xx; struct cvmx_gmxx_txx_stat1_s cn58xxp1; } cvmx_gmxx_txx_stat1_t; /** * cvmx_gmx#_tx#_stat2 * * GMX_TX_STAT2 = GMX_TX_STATS_OCTS * * * Notes: * - Octect counts are the sum of all data transmitted on the wire including * packet data, pad bytes, fcs bytes, pause bytes, and jam bytes. The octect * counts do not include PREAMBLE byte or EXTEND cycles. * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_stat2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t octs : 48; /**< Number of total octets sent on the interface. Does not count octets from frames that were truncated due to collisions in halfdup mode. */ #else uint64_t octs : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_gmxx_txx_stat2_s cn30xx; struct cvmx_gmxx_txx_stat2_s cn31xx; struct cvmx_gmxx_txx_stat2_s cn38xx; struct cvmx_gmxx_txx_stat2_s cn38xxp2; struct cvmx_gmxx_txx_stat2_s cn50xx; struct cvmx_gmxx_txx_stat2_s cn52xx; struct cvmx_gmxx_txx_stat2_s cn52xxp1; struct cvmx_gmxx_txx_stat2_s cn56xx; struct cvmx_gmxx_txx_stat2_s cn56xxp1; struct cvmx_gmxx_txx_stat2_s cn58xx; struct cvmx_gmxx_txx_stat2_s cn58xxp1; } cvmx_gmxx_txx_stat2_t; /** * cvmx_gmx#_tx#_stat3 * * GMX_TX_STAT3 = GMX_TX_STATS_PKTS * * * Notes: * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_stat3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t pkts : 32; /**< Number of total frames sent on the interface. Does not count frames that were truncated due to collisions in halfdup mode. */ #else uint64_t pkts : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_gmxx_txx_stat3_s cn30xx; struct cvmx_gmxx_txx_stat3_s cn31xx; struct cvmx_gmxx_txx_stat3_s cn38xx; struct cvmx_gmxx_txx_stat3_s cn38xxp2; struct cvmx_gmxx_txx_stat3_s cn50xx; struct cvmx_gmxx_txx_stat3_s cn52xx; struct cvmx_gmxx_txx_stat3_s cn52xxp1; struct cvmx_gmxx_txx_stat3_s cn56xx; struct cvmx_gmxx_txx_stat3_s cn56xxp1; struct cvmx_gmxx_txx_stat3_s cn58xx; struct cvmx_gmxx_txx_stat3_s cn58xxp1; } cvmx_gmxx_txx_stat3_t; /** * cvmx_gmx#_tx#_stat4 * * GMX_TX_STAT4 = GMX_TX_STATS_HIST1 (64) / GMX_TX_STATS_HIST0 (<64) * * * Notes: * - Packet length is the sum of all data transmitted on the wire for the given * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_stat4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t hist1 : 32; /**< Number of packets sent with an octet count of 64. */ uint64_t hist0 : 32; /**< Number of packets sent with an octet count of < 64. */ #else uint64_t hist0 : 32; uint64_t hist1 : 32; #endif } s; struct cvmx_gmxx_txx_stat4_s cn30xx; struct cvmx_gmxx_txx_stat4_s cn31xx; struct cvmx_gmxx_txx_stat4_s cn38xx; struct cvmx_gmxx_txx_stat4_s cn38xxp2; struct cvmx_gmxx_txx_stat4_s cn50xx; struct cvmx_gmxx_txx_stat4_s cn52xx; struct cvmx_gmxx_txx_stat4_s cn52xxp1; struct cvmx_gmxx_txx_stat4_s cn56xx; struct cvmx_gmxx_txx_stat4_s cn56xxp1; struct cvmx_gmxx_txx_stat4_s cn58xx; struct cvmx_gmxx_txx_stat4_s cn58xxp1; } cvmx_gmxx_txx_stat4_t; /** * cvmx_gmx#_tx#_stat5 * * GMX_TX_STAT5 = GMX_TX_STATS_HIST3 (128- 255) / GMX_TX_STATS_HIST2 (65- 127) * * * Notes: * - Packet length is the sum of all data transmitted on the wire for the given * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_stat5_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t hist3 : 32; /**< Number of packets sent with an octet count of 128 - 255. */ uint64_t hist2 : 32; /**< Number of packets sent with an octet count of 65 - 127. */ #else uint64_t hist2 : 32; uint64_t hist3 : 32; #endif } s; struct cvmx_gmxx_txx_stat5_s cn30xx; struct cvmx_gmxx_txx_stat5_s cn31xx; struct cvmx_gmxx_txx_stat5_s cn38xx; struct cvmx_gmxx_txx_stat5_s cn38xxp2; struct cvmx_gmxx_txx_stat5_s cn50xx; struct cvmx_gmxx_txx_stat5_s cn52xx; struct cvmx_gmxx_txx_stat5_s cn52xxp1; struct cvmx_gmxx_txx_stat5_s cn56xx; struct cvmx_gmxx_txx_stat5_s cn56xxp1; struct cvmx_gmxx_txx_stat5_s cn58xx; struct cvmx_gmxx_txx_stat5_s cn58xxp1; } cvmx_gmxx_txx_stat5_t; /** * cvmx_gmx#_tx#_stat6 * * GMX_TX_STAT6 = GMX_TX_STATS_HIST5 (512-1023) / GMX_TX_STATS_HIST4 (256-511) * * * Notes: * - Packet length is the sum of all data transmitted on the wire for the given * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_stat6_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t hist5 : 32; /**< Number of packets sent with an octet count of 512 - 1023. */ uint64_t hist4 : 32; /**< Number of packets sent with an octet count of 256 - 511. */ #else uint64_t hist4 : 32; uint64_t hist5 : 32; #endif } s; struct cvmx_gmxx_txx_stat6_s cn30xx; struct cvmx_gmxx_txx_stat6_s cn31xx; struct cvmx_gmxx_txx_stat6_s cn38xx; struct cvmx_gmxx_txx_stat6_s cn38xxp2; struct cvmx_gmxx_txx_stat6_s cn50xx; struct cvmx_gmxx_txx_stat6_s cn52xx; struct cvmx_gmxx_txx_stat6_s cn52xxp1; struct cvmx_gmxx_txx_stat6_s cn56xx; struct cvmx_gmxx_txx_stat6_s cn56xxp1; struct cvmx_gmxx_txx_stat6_s cn58xx; struct cvmx_gmxx_txx_stat6_s cn58xxp1; } cvmx_gmxx_txx_stat6_t; /** * cvmx_gmx#_tx#_stat7 * * GMX_TX_STAT7 = GMX_TX_STATS_HIST7 (1024-1518) / GMX_TX_STATS_HIST6 (>1518) * * * Notes: * - Packet length is the sum of all data transmitted on the wire for the given * packet including packet data, pad bytes, fcs bytes, pause bytes, and jam * bytes. The octect counts do not include PREAMBLE byte or EXTEND cycles. * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_stat7_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t hist7 : 32; /**< Number of packets sent with an octet count of > 1518. */ uint64_t hist6 : 32; /**< Number of packets sent with an octet count of 1024 - 1518. */ #else uint64_t hist6 : 32; uint64_t hist7 : 32; #endif } s; struct cvmx_gmxx_txx_stat7_s cn30xx; struct cvmx_gmxx_txx_stat7_s cn31xx; struct cvmx_gmxx_txx_stat7_s cn38xx; struct cvmx_gmxx_txx_stat7_s cn38xxp2; struct cvmx_gmxx_txx_stat7_s cn50xx; struct cvmx_gmxx_txx_stat7_s cn52xx; struct cvmx_gmxx_txx_stat7_s cn52xxp1; struct cvmx_gmxx_txx_stat7_s cn56xx; struct cvmx_gmxx_txx_stat7_s cn56xxp1; struct cvmx_gmxx_txx_stat7_s cn58xx; struct cvmx_gmxx_txx_stat7_s cn58xxp1; } cvmx_gmxx_txx_stat7_t; /** * cvmx_gmx#_tx#_stat8 * * GMX_TX_STAT8 = GMX_TX_STATS_MCST / GMX_TX_STATS_BCST * * * Notes: * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap * - Note, GMX determines if the packet is MCST or BCST from the DMAC of the * packet. GMX assumes that the DMAC lies in the first 6 bytes of the packet * as per the 802.3 frame definition. If the system requires additional data * before the L2 header, then the MCST and BCST counters may not reflect * reality and should be ignored by software. */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_stat8_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mcst : 32; /**< Number of packets sent to multicast DMAC. Does not include BCST packets. */ uint64_t bcst : 32; /**< Number of packets sent to broadcast DMAC. Does not include MCST packets. */ #else uint64_t bcst : 32; uint64_t mcst : 32; #endif } s; struct cvmx_gmxx_txx_stat8_s cn30xx; struct cvmx_gmxx_txx_stat8_s cn31xx; struct cvmx_gmxx_txx_stat8_s cn38xx; struct cvmx_gmxx_txx_stat8_s cn38xxp2; struct cvmx_gmxx_txx_stat8_s cn50xx; struct cvmx_gmxx_txx_stat8_s cn52xx; struct cvmx_gmxx_txx_stat8_s cn52xxp1; struct cvmx_gmxx_txx_stat8_s cn56xx; struct cvmx_gmxx_txx_stat8_s cn56xxp1; struct cvmx_gmxx_txx_stat8_s cn58xx; struct cvmx_gmxx_txx_stat8_s cn58xxp1; } cvmx_gmxx_txx_stat8_t; /** * cvmx_gmx#_tx#_stat9 * * GMX_TX_STAT9 = GMX_TX_STATS_UNDFLW / GMX_TX_STATS_CTL * * * Notes: * - Cleared either by a write (of any value) or a read when GMX_TX_STATS_CTL[RD_CLR] is set * - Counters will wrap */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_stat9_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t undflw : 32; /**< Number of underflow packets */ uint64_t ctl : 32; /**< Number of Control packets (PAUSE flow control) generated by GMX. It does not include control packets forwarded or generated by the PP's. */ #else uint64_t ctl : 32; uint64_t undflw : 32; #endif } s; struct cvmx_gmxx_txx_stat9_s cn30xx; struct cvmx_gmxx_txx_stat9_s cn31xx; struct cvmx_gmxx_txx_stat9_s cn38xx; struct cvmx_gmxx_txx_stat9_s cn38xxp2; struct cvmx_gmxx_txx_stat9_s cn50xx; struct cvmx_gmxx_txx_stat9_s cn52xx; struct cvmx_gmxx_txx_stat9_s cn52xxp1; struct cvmx_gmxx_txx_stat9_s cn56xx; struct cvmx_gmxx_txx_stat9_s cn56xxp1; struct cvmx_gmxx_txx_stat9_s cn58xx; struct cvmx_gmxx_txx_stat9_s cn58xxp1; } cvmx_gmxx_txx_stat9_t; /** * cvmx_gmx#_tx#_stats_ctl * * GMX_TX_STATS_CTL = TX Stats Control register * */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_stats_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t rd_clr : 1; /**< Stats registers will clear on reads */ #else uint64_t rd_clr : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_gmxx_txx_stats_ctl_s cn30xx; struct cvmx_gmxx_txx_stats_ctl_s cn31xx; struct cvmx_gmxx_txx_stats_ctl_s cn38xx; struct cvmx_gmxx_txx_stats_ctl_s cn38xxp2; struct cvmx_gmxx_txx_stats_ctl_s cn50xx; struct cvmx_gmxx_txx_stats_ctl_s cn52xx; struct cvmx_gmxx_txx_stats_ctl_s cn52xxp1; struct cvmx_gmxx_txx_stats_ctl_s cn56xx; struct cvmx_gmxx_txx_stats_ctl_s cn56xxp1; struct cvmx_gmxx_txx_stats_ctl_s cn58xx; struct cvmx_gmxx_txx_stats_ctl_s cn58xxp1; } cvmx_gmxx_txx_stats_ctl_t; /** * cvmx_gmx#_tx#_thresh * * Per Port * * * GMX_TX_THRESH = Packet TX Threshold * * Notes: * In XAUI mode, prt0 is used for checking. Since XAUI mode uses a single TX FIFO and is higher data rate, recommended value is 0x80. * */ typedef union { uint64_t u64; struct cvmx_gmxx_txx_thresh_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t cnt : 9; /**< Number of 16B ticks to accumulate in the TX FIFO before sending on the RGMII interface This register should be large enough to prevent underflow on the RGMII interface and must never be set to zero. This register cannot exceed the the TX FIFO depth which is... GMX_TX_PRTS==0,1: CNT MAX = 0x100 GMX_TX_PRTS==2 : CNT MAX = 0x080 GMX_TX_PRTS==3,4: CNT MAX = 0x040 */ #else uint64_t cnt : 9; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_gmxx_txx_thresh_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t cnt : 7; /**< Number of 16B ticks to accumulate in the TX FIFO before sending on the RGMII interface This register should be large enough to prevent underflow on the RGMII interface and must never be set below 4. This register cannot exceed the the TX FIFO depth which is 64 16B entries. */ #else uint64_t cnt : 7; uint64_t reserved_7_63 : 57; #endif } cn30xx; struct cvmx_gmxx_txx_thresh_cn30xx cn31xx; struct cvmx_gmxx_txx_thresh_s cn38xx; struct cvmx_gmxx_txx_thresh_s cn38xxp2; struct cvmx_gmxx_txx_thresh_cn30xx cn50xx; struct cvmx_gmxx_txx_thresh_s cn52xx; struct cvmx_gmxx_txx_thresh_s cn52xxp1; struct cvmx_gmxx_txx_thresh_s cn56xx; struct cvmx_gmxx_txx_thresh_s cn56xxp1; struct cvmx_gmxx_txx_thresh_s cn58xx; struct cvmx_gmxx_txx_thresh_s cn58xxp1; } cvmx_gmxx_txx_thresh_t; /** * cvmx_gmx#_tx_bp * * GMX_TX_BP = Packet Interface TX BackPressure Register * * * Notes: * In XAUI mode, only the lsb (corresponding to port0) of BP is used. * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_bp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t bp : 4; /**< Per port BackPressure status 0=Port is available 1=Port should be back pressured */ #else uint64_t bp : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_gmxx_tx_bp_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t bp : 3; /**< Per port BackPressure status 0=Port is available 1=Port should be back pressured */ #else uint64_t bp : 3; uint64_t reserved_3_63 : 61; #endif } cn30xx; struct cvmx_gmxx_tx_bp_cn30xx cn31xx; struct cvmx_gmxx_tx_bp_s cn38xx; struct cvmx_gmxx_tx_bp_s cn38xxp2; struct cvmx_gmxx_tx_bp_cn30xx cn50xx; struct cvmx_gmxx_tx_bp_s cn52xx; struct cvmx_gmxx_tx_bp_s cn52xxp1; struct cvmx_gmxx_tx_bp_s cn56xx; struct cvmx_gmxx_tx_bp_s cn56xxp1; struct cvmx_gmxx_tx_bp_s cn58xx; struct cvmx_gmxx_tx_bp_s cn58xxp1; } cvmx_gmxx_tx_bp_t; /** * cvmx_gmx#_tx_clk_msk# * * GMX_TX_CLK_MSK = GMX Clock Select * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_clk_mskx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t msk : 1; /**< Write this bit to a 1 when switching clks */ #else uint64_t msk : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_gmxx_tx_clk_mskx_s cn30xx; struct cvmx_gmxx_tx_clk_mskx_s cn50xx; } cvmx_gmxx_tx_clk_mskx_t; /** * cvmx_gmx#_tx_col_attempt * * GMX_TX_COL_ATTEMPT = Packet TX collision attempts before dropping frame * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_col_attempt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t limit : 5; /**< Collision Attempts */ #else uint64_t limit : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_gmxx_tx_col_attempt_s cn30xx; struct cvmx_gmxx_tx_col_attempt_s cn31xx; struct cvmx_gmxx_tx_col_attempt_s cn38xx; struct cvmx_gmxx_tx_col_attempt_s cn38xxp2; struct cvmx_gmxx_tx_col_attempt_s cn50xx; struct cvmx_gmxx_tx_col_attempt_s cn52xx; struct cvmx_gmxx_tx_col_attempt_s cn52xxp1; struct cvmx_gmxx_tx_col_attempt_s cn56xx; struct cvmx_gmxx_tx_col_attempt_s cn56xxp1; struct cvmx_gmxx_tx_col_attempt_s cn58xx; struct cvmx_gmxx_tx_col_attempt_s cn58xxp1; } cvmx_gmxx_tx_col_attempt_t; /** * cvmx_gmx#_tx_corrupt * * GMX_TX_CORRUPT = TX - Corrupt TX packets with the ERR bit set * * * Notes: * Packets sent from PKO with the ERR wire asserted will be corrupted by * the transmitter if CORRUPT[prt] is set (XAUI uses prt==0). * * Corruption means that GMX will send a bad FCS value. If GMX_TX_APPEND[FCS] * is clear then no FCS is sent and the GMX cannot corrupt it. The corrupt FCS * value is 0xeeeeeeee for SGMII/1000Base-X and 4 bytes of the error * propagation code in XAUI mode. */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_corrupt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t corrupt : 4; /**< Per port error propagation 0=Never corrupt packets 1=Corrupt packets with ERR */ #else uint64_t corrupt : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_gmxx_tx_corrupt_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t corrupt : 3; /**< Per port error propagation 0=Never corrupt packets 1=Corrupt packets with ERR */ #else uint64_t corrupt : 3; uint64_t reserved_3_63 : 61; #endif } cn30xx; struct cvmx_gmxx_tx_corrupt_cn30xx cn31xx; struct cvmx_gmxx_tx_corrupt_s cn38xx; struct cvmx_gmxx_tx_corrupt_s cn38xxp2; struct cvmx_gmxx_tx_corrupt_cn30xx cn50xx; struct cvmx_gmxx_tx_corrupt_s cn52xx; struct cvmx_gmxx_tx_corrupt_s cn52xxp1; struct cvmx_gmxx_tx_corrupt_s cn56xx; struct cvmx_gmxx_tx_corrupt_s cn56xxp1; struct cvmx_gmxx_tx_corrupt_s cn58xx; struct cvmx_gmxx_tx_corrupt_s cn58xxp1; } cvmx_gmxx_tx_corrupt_t; /** * cvmx_gmx#_tx_hg2_reg1 * * Notes: * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address of * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior. * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values. */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_hg2_reg1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t tx_xof : 16; /**< TX HiGig2 message for logical link pause when any bit value changes Only write in HiGig2 mode i.e. when GMX_TX_XAUI_CTL[HG_EN]=1 and GMX_RX_UDD_SKP[SKIP]=16. */ #else uint64_t tx_xof : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_tx_hg2_reg1_s cn52xx; struct cvmx_gmxx_tx_hg2_reg1_s cn52xxp1; struct cvmx_gmxx_tx_hg2_reg1_s cn56xx; } cvmx_gmxx_tx_hg2_reg1_t; /** * cvmx_gmx#_tx_hg2_reg2 * * Notes: * The TX_XOF[15:0] field in GMX(0)_TX_HG2_REG1 and the TX_XON[15:0] field in * GMX(0)_TX_HG2_REG2 register map to the same 16 physical flops. When written with address of * GMX(0)_TX_HG2_REG1, it will exhibit write 1 to set behavior and when written with address of * GMX(0)_TX_HG2_REG2, it will exhibit write 1 to clear behavior. * For reads, either address will return the $GMX(0)_TX_HG2_REG1 values. */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_hg2_reg2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t tx_xon : 16; /**< TX HiGig2 message for logical link pause when any bit value changes Only write in HiGig2 mode i.e. when GMX_TX_XAUI_CTL[HG_EN]=1 and GMX_RX_UDD_SKP[SKIP]=16. */ #else uint64_t tx_xon : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_tx_hg2_reg2_s cn52xx; struct cvmx_gmxx_tx_hg2_reg2_s cn52xxp1; struct cvmx_gmxx_tx_hg2_reg2_s cn56xx; } cvmx_gmxx_tx_hg2_reg2_t; /** * cvmx_gmx#_tx_ifg * * GMX_TX_IFG = Packet TX Interframe Gap * * * Notes: * * Programming IFG1 and IFG2. * * For 10/100/1000Mbs half-duplex systems that require IEEE 802.3 * compatibility, IFG1 must be in the range of 1-8, IFG2 must be in the range * of 4-12, and the IFG1+IFG2 sum must be 12. * * For 10/100/1000Mbs full-duplex systems that require IEEE 802.3 * compatibility, IFG1 must be in the range of 1-11, IFG2 must be in the range * of 1-11, and the IFG1+IFG2 sum must be 12. * * For XAUI/10Gbs systems that require IEEE 802.3 compatibility, the * IFG1+IFG2 sum must be 12. IFG1[1:0] and IFG2[1:0] must be zero. * * For all other systems, IFG1 and IFG2 can be any value in the range of * 1-15. Allowing for a total possible IFG sum of 2-30. */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_ifg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t ifg2 : 4; /**< 1/3 of the interframe gap timing (in IFG2*8 bits) If CRS is detected during IFG2, then the interFrameSpacing timer is not reset and a frame is transmited once the timer expires. */ uint64_t ifg1 : 4; /**< 2/3 of the interframe gap timing (in IFG1*8 bits) If CRS is detected during IFG1, then the interFrameSpacing timer is reset and a frame is not transmited. */ #else uint64_t ifg1 : 4; uint64_t ifg2 : 4; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_gmxx_tx_ifg_s cn30xx; struct cvmx_gmxx_tx_ifg_s cn31xx; struct cvmx_gmxx_tx_ifg_s cn38xx; struct cvmx_gmxx_tx_ifg_s cn38xxp2; struct cvmx_gmxx_tx_ifg_s cn50xx; struct cvmx_gmxx_tx_ifg_s cn52xx; struct cvmx_gmxx_tx_ifg_s cn52xxp1; struct cvmx_gmxx_tx_ifg_s cn56xx; struct cvmx_gmxx_tx_ifg_s cn56xxp1; struct cvmx_gmxx_tx_ifg_s cn58xx; struct cvmx_gmxx_tx_ifg_s cn58xxp1; } cvmx_gmxx_tx_ifg_t; /** * cvmx_gmx#_tx_int_en * * GMX_TX_INT_EN = Interrupt Enable * * * Notes: * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used. * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_int_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t late_col : 4; /**< TX Late Collision (PASS3 only) */ uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) (PASS2 only) */ uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) (PASS2 only) */ uint64_t reserved_6_7 : 2; uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */ uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t ncb_nxa : 1; uint64_t undflw : 4; uint64_t reserved_6_7 : 2; uint64_t xscol : 4; uint64_t xsdef : 4; uint64_t late_col : 4; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_gmxx_tx_int_en_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t late_col : 3; /**< TX Late Collision */ uint64_t reserved_15_15 : 1; uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */ uint64_t reserved_11_11 : 1; uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */ uint64_t reserved_5_7 : 3; uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */ uint64_t reserved_1_1 : 1; uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t reserved_1_1 : 1; uint64_t undflw : 3; uint64_t reserved_5_7 : 3; uint64_t xscol : 3; uint64_t reserved_11_11 : 1; uint64_t xsdef : 3; uint64_t reserved_15_15 : 1; uint64_t late_col : 3; uint64_t reserved_19_63 : 45; #endif } cn30xx; struct cvmx_gmxx_tx_int_en_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_15_63 : 49; uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */ uint64_t reserved_11_11 : 1; uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */ uint64_t reserved_5_7 : 3; uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */ uint64_t reserved_1_1 : 1; uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t reserved_1_1 : 1; uint64_t undflw : 3; uint64_t reserved_5_7 : 3; uint64_t xscol : 3; uint64_t reserved_11_11 : 1; uint64_t xsdef : 3; uint64_t reserved_15_63 : 49; #endif } cn31xx; struct cvmx_gmxx_tx_int_en_s cn38xx; struct cvmx_gmxx_tx_int_en_cn38xxp2 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) (PASS2 only) */ uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) (PASS2 only) */ uint64_t reserved_6_7 : 2; uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */ uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t ncb_nxa : 1; uint64_t undflw : 4; uint64_t reserved_6_7 : 2; uint64_t xscol : 4; uint64_t xsdef : 4; uint64_t reserved_16_63 : 48; #endif } cn38xxp2; struct cvmx_gmxx_tx_int_en_cn30xx cn50xx; struct cvmx_gmxx_tx_int_en_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t late_col : 4; /**< TX Late Collision (SGMII/1000Base-X half-duplex only) */ uint64_t xsdef : 4; /**< TX Excessive deferral (SGMII/1000Base-X half-duplex only) */ uint64_t xscol : 4; /**< TX Excessive collisions (SGMII/1000Base-X half-duplex only) */ uint64_t reserved_6_7 : 2; uint64_t undflw : 4; /**< TX Underflow */ uint64_t reserved_1_1 : 1; uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t reserved_1_1 : 1; uint64_t undflw : 4; uint64_t reserved_6_7 : 2; uint64_t xscol : 4; uint64_t xsdef : 4; uint64_t late_col : 4; uint64_t reserved_20_63 : 44; #endif } cn52xx; struct cvmx_gmxx_tx_int_en_cn52xx cn52xxp1; struct cvmx_gmxx_tx_int_en_cn52xx cn56xx; struct cvmx_gmxx_tx_int_en_cn52xx cn56xxp1; struct cvmx_gmxx_tx_int_en_s cn58xx; struct cvmx_gmxx_tx_int_en_s cn58xxp1; } cvmx_gmxx_tx_int_en_t; /** * cvmx_gmx#_tx_int_reg * * GMX_TX_INT_REG = Interrupt Register * * * Notes: * In XAUI mode, only the lsb (corresponding to port0) of UNDFLW is used. * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_int_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t late_col : 4; /**< TX Late Collision (PASS3 only) */ uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) (PASS2 only) */ uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) (PASS2 only) */ uint64_t reserved_6_7 : 2; uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */ uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t ncb_nxa : 1; uint64_t undflw : 4; uint64_t reserved_6_7 : 2; uint64_t xscol : 4; uint64_t xsdef : 4; uint64_t late_col : 4; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_gmxx_tx_int_reg_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t late_col : 3; /**< TX Late Collision */ uint64_t reserved_15_15 : 1; uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */ uint64_t reserved_11_11 : 1; uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */ uint64_t reserved_5_7 : 3; uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */ uint64_t reserved_1_1 : 1; uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t reserved_1_1 : 1; uint64_t undflw : 3; uint64_t reserved_5_7 : 3; uint64_t xscol : 3; uint64_t reserved_11_11 : 1; uint64_t xsdef : 3; uint64_t reserved_15_15 : 1; uint64_t late_col : 3; uint64_t reserved_19_63 : 45; #endif } cn30xx; struct cvmx_gmxx_tx_int_reg_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_15_63 : 49; uint64_t xsdef : 3; /**< TX Excessive deferral (RGMII/halfdup mode only) */ uint64_t reserved_11_11 : 1; uint64_t xscol : 3; /**< TX Excessive collisions (RGMII/halfdup mode only) */ uint64_t reserved_5_7 : 3; uint64_t undflw : 3; /**< TX Underflow (RGMII mode only) */ uint64_t reserved_1_1 : 1; uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t reserved_1_1 : 1; uint64_t undflw : 3; uint64_t reserved_5_7 : 3; uint64_t xscol : 3; uint64_t reserved_11_11 : 1; uint64_t xsdef : 3; uint64_t reserved_15_63 : 49; #endif } cn31xx; struct cvmx_gmxx_tx_int_reg_s cn38xx; struct cvmx_gmxx_tx_int_reg_cn38xxp2 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t xsdef : 4; /**< TX Excessive deferral (RGMII/halfdup mode only) (PASS2 only) */ uint64_t xscol : 4; /**< TX Excessive collisions (RGMII/halfdup mode only) (PASS2 only) */ uint64_t reserved_6_7 : 2; uint64_t undflw : 4; /**< TX Underflow (RGMII mode only) */ uint64_t ncb_nxa : 1; /**< Port address out-of-range from NCB Interface */ uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t ncb_nxa : 1; uint64_t undflw : 4; uint64_t reserved_6_7 : 2; uint64_t xscol : 4; uint64_t xsdef : 4; uint64_t reserved_16_63 : 48; #endif } cn38xxp2; struct cvmx_gmxx_tx_int_reg_cn30xx cn50xx; struct cvmx_gmxx_tx_int_reg_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t late_col : 4; /**< TX Late Collision (SGMII/1000Base-X half-duplex only) */ uint64_t xsdef : 4; /**< TX Excessive deferral (SGMII/1000Base-X half-duplex only) */ uint64_t xscol : 4; /**< TX Excessive collisions (SGMII/1000Base-X half-duplex only) */ uint64_t reserved_6_7 : 2; uint64_t undflw : 4; /**< TX Underflow */ uint64_t reserved_1_1 : 1; uint64_t pko_nxa : 1; /**< Port address out-of-range from PKO Interface */ #else uint64_t pko_nxa : 1; uint64_t reserved_1_1 : 1; uint64_t undflw : 4; uint64_t reserved_6_7 : 2; uint64_t xscol : 4; uint64_t xsdef : 4; uint64_t late_col : 4; uint64_t reserved_20_63 : 44; #endif } cn52xx; struct cvmx_gmxx_tx_int_reg_cn52xx cn52xxp1; struct cvmx_gmxx_tx_int_reg_cn52xx cn56xx; struct cvmx_gmxx_tx_int_reg_cn52xx cn56xxp1; struct cvmx_gmxx_tx_int_reg_s cn58xx; struct cvmx_gmxx_tx_int_reg_s cn58xxp1; } cvmx_gmxx_tx_int_reg_t; /** * cvmx_gmx#_tx_jam * * GMX_TX_JAM = Packet TX Jam Pattern * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_jam_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t jam : 8; /**< Jam pattern */ #else uint64_t jam : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_gmxx_tx_jam_s cn30xx; struct cvmx_gmxx_tx_jam_s cn31xx; struct cvmx_gmxx_tx_jam_s cn38xx; struct cvmx_gmxx_tx_jam_s cn38xxp2; struct cvmx_gmxx_tx_jam_s cn50xx; struct cvmx_gmxx_tx_jam_s cn52xx; struct cvmx_gmxx_tx_jam_s cn52xxp1; struct cvmx_gmxx_tx_jam_s cn56xx; struct cvmx_gmxx_tx_jam_s cn56xxp1; struct cvmx_gmxx_tx_jam_s cn58xx; struct cvmx_gmxx_tx_jam_s cn58xxp1; } cvmx_gmxx_tx_jam_t; /** * cvmx_gmx#_tx_lfsr * * GMX_TX_LFSR = LFSR used to implement truncated binary exponential backoff * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_lfsr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t lfsr : 16; /**< The current state of the LFSR used to feed random numbers to compute truncated binary exponential backoff. */ #else uint64_t lfsr : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_tx_lfsr_s cn30xx; struct cvmx_gmxx_tx_lfsr_s cn31xx; struct cvmx_gmxx_tx_lfsr_s cn38xx; struct cvmx_gmxx_tx_lfsr_s cn38xxp2; struct cvmx_gmxx_tx_lfsr_s cn50xx; struct cvmx_gmxx_tx_lfsr_s cn52xx; struct cvmx_gmxx_tx_lfsr_s cn52xxp1; struct cvmx_gmxx_tx_lfsr_s cn56xx; struct cvmx_gmxx_tx_lfsr_s cn56xxp1; struct cvmx_gmxx_tx_lfsr_s cn58xx; struct cvmx_gmxx_tx_lfsr_s cn58xxp1; } cvmx_gmxx_tx_lfsr_t; /** * cvmx_gmx#_tx_ovr_bp * * GMX_TX_OVR_BP = Packet Interface TX Override BackPressure * * * Notes: * In XAUI mode, only the lsb (corresponding to port0) of EN, BP, and IGN_FULL are used. * * GMX*_TX_OVR_BP[EN<0>] must be set to one and GMX*_TX_OVR_BP[BP<0>] must be cleared to zero * (to forcibly disable HW-automatic 802.3 pause packet generation) with the HiGig2 Protocol * when GMX*_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is indicated by * GMX*_TX_XAUI_CTL[HG_EN]=1 and GMX*_RX0_UDD_SKP[LEN]=16.) HW can only auto-generate backpressure * through HiGig2 messages (optionally, when GMX*_HG2_CONTROL[HG2TX_EN]=1) with the HiGig2 * protocol. */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_ovr_bp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t tx_prt_bp : 16; /**< Per port BP sent to PKO 0=Port is available 1=Port should be back pressured */ uint64_t reserved_12_31 : 20; uint64_t en : 4; /**< Per port Enable back pressure override */ uint64_t bp : 4; /**< Per port BackPressure status to use 0=Port is available 1=Port should be back pressured */ uint64_t ign_full : 4; /**< Ignore the RX FIFO full when computing BP */ #else uint64_t ign_full : 4; uint64_t bp : 4; uint64_t en : 4; uint64_t reserved_12_31 : 20; uint64_t tx_prt_bp : 16; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_gmxx_tx_ovr_bp_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t en : 3; /**< Per port Enable back pressure override */ uint64_t reserved_7_7 : 1; uint64_t bp : 3; /**< Per port BackPressure status to use 0=Port is available 1=Port should be back pressured */ uint64_t reserved_3_3 : 1; uint64_t ign_full : 3; /**< Ignore the RX FIFO full when computing BP */ #else uint64_t ign_full : 3; uint64_t reserved_3_3 : 1; uint64_t bp : 3; uint64_t reserved_7_7 : 1; uint64_t en : 3; uint64_t reserved_11_63 : 53; #endif } cn30xx; struct cvmx_gmxx_tx_ovr_bp_cn30xx cn31xx; struct cvmx_gmxx_tx_ovr_bp_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t en : 4; /**< Per port Enable back pressure override */ uint64_t bp : 4; /**< Per port BackPressure status to use 0=Port is available 1=Port should be back pressured */ uint64_t ign_full : 4; /**< Ignore the RX FIFO full when computing BP */ #else uint64_t ign_full : 4; uint64_t bp : 4; uint64_t en : 4; uint64_t reserved_12_63 : 52; #endif } cn38xx; struct cvmx_gmxx_tx_ovr_bp_cn38xx cn38xxp2; struct cvmx_gmxx_tx_ovr_bp_cn30xx cn50xx; struct cvmx_gmxx_tx_ovr_bp_s cn52xx; struct cvmx_gmxx_tx_ovr_bp_s cn52xxp1; struct cvmx_gmxx_tx_ovr_bp_s cn56xx; struct cvmx_gmxx_tx_ovr_bp_s cn56xxp1; struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xx; struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xxp1; } cvmx_gmxx_tx_ovr_bp_t; /** * cvmx_gmx#_tx_pause_pkt_dmac * * GMX_TX_PAUSE_PKT_DMAC = Packet TX Pause Packet DMAC field * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_pause_pkt_dmac_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t dmac : 48; /**< The DMAC field placed is outbnd pause pkts */ #else uint64_t dmac : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_gmxx_tx_pause_pkt_dmac_s cn30xx; struct cvmx_gmxx_tx_pause_pkt_dmac_s cn31xx; struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xx; struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xxp2; struct cvmx_gmxx_tx_pause_pkt_dmac_s cn50xx; struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xx; struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xxp1; struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xx; struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xxp1; struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xx; struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xxp1; } cvmx_gmxx_tx_pause_pkt_dmac_t; /** * cvmx_gmx#_tx_pause_pkt_type * * GMX_TX_PAUSE_PKT_TYPE = Packet Interface TX Pause Packet TYPE field * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_pause_pkt_type_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t type : 16; /**< The TYPE field placed is outbnd pause pkts */ #else uint64_t type : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_tx_pause_pkt_type_s cn30xx; struct cvmx_gmxx_tx_pause_pkt_type_s cn31xx; struct cvmx_gmxx_tx_pause_pkt_type_s cn38xx; struct cvmx_gmxx_tx_pause_pkt_type_s cn38xxp2; struct cvmx_gmxx_tx_pause_pkt_type_s cn50xx; struct cvmx_gmxx_tx_pause_pkt_type_s cn52xx; struct cvmx_gmxx_tx_pause_pkt_type_s cn52xxp1; struct cvmx_gmxx_tx_pause_pkt_type_s cn56xx; struct cvmx_gmxx_tx_pause_pkt_type_s cn56xxp1; struct cvmx_gmxx_tx_pause_pkt_type_s cn58xx; struct cvmx_gmxx_tx_pause_pkt_type_s cn58xxp1; } cvmx_gmxx_tx_pause_pkt_type_t; /** * cvmx_gmx#_tx_prts * * Common * * * GMX_TX_PRTS = TX Ports * * Notes: * * The value programmed for PRTS is the number of the highest architected * port number on the interface, plus 1. For example, if port 2 is the * highest architected port, then the programmed value should be 3 since * there are 3 ports in the system - 0, 1, and 2. */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_prts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t prts : 5; /**< Number of ports allowed on the interface */ #else uint64_t prts : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_gmxx_tx_prts_s cn30xx; struct cvmx_gmxx_tx_prts_s cn31xx; struct cvmx_gmxx_tx_prts_s cn38xx; struct cvmx_gmxx_tx_prts_s cn38xxp2; struct cvmx_gmxx_tx_prts_s cn50xx; struct cvmx_gmxx_tx_prts_s cn52xx; struct cvmx_gmxx_tx_prts_s cn52xxp1; struct cvmx_gmxx_tx_prts_s cn56xx; struct cvmx_gmxx_tx_prts_s cn56xxp1; struct cvmx_gmxx_tx_prts_s cn58xx; struct cvmx_gmxx_tx_prts_s cn58xxp1; } cvmx_gmxx_tx_prts_t; /** * cvmx_gmx#_tx_spi_ctl * * GMX_TX_SPI_CTL = Spi4 TX ModesSpi4 * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_spi_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t tpa_clr : 1; /**< TPA Clear Mode Clear credit counter when satisifed status */ uint64_t cont_pkt : 1; /**< Contiguous Packet Mode Finish one packet before switching to another Cannot be set in Spi4 pass-through mode */ #else uint64_t cont_pkt : 1; uint64_t tpa_clr : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_gmxx_tx_spi_ctl_s cn38xx; struct cvmx_gmxx_tx_spi_ctl_s cn38xxp2; struct cvmx_gmxx_tx_spi_ctl_s cn58xx; struct cvmx_gmxx_tx_spi_ctl_s cn58xxp1; } cvmx_gmxx_tx_spi_ctl_t; /** * cvmx_gmx#_tx_spi_drain * * GMX_TX_SPI_DRAIN = Drain out Spi TX FIFO * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_spi_drain_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t drain : 16; /**< Per port drain control 0=Normal operation 1=GMX TX will be popped, but no valid data will be sent to SPX. Credits are correctly returned to PKO. STX_IGN_CAL should be set to ignore TPA and not stall due to back-pressure. (PASS3 only) */ #else uint64_t drain : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_tx_spi_drain_s cn38xx; struct cvmx_gmxx_tx_spi_drain_s cn58xx; struct cvmx_gmxx_tx_spi_drain_s cn58xxp1; } cvmx_gmxx_tx_spi_drain_t; /** * cvmx_gmx#_tx_spi_max * * GMX_TX_SPI_MAX = RGMII TX Spi4 MAX * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_spi_max_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t slice : 7; /**< Number of 16B blocks to transmit in a burst before switching to the next port. SLICE does not always limit the burst length transmitted by OCTEON. Depending on the traffic pattern and GMX_TX_SPI_ROUND programming, the next port could be the same as the current port. In this case, OCTEON may merge multiple sub-SLICE bursts into one contiguous burst that is longer than SLICE (as long as the burst does not cross a packet boundary). SLICE must be programmed to be >= GMX_TX_SPI_THRESH[THRESH] If SLICE==0, then the transmitter will tend to send the complete packet. The port will only switch if credits are exhausted or PKO cannot keep up. (90nm ONLY) */ uint64_t max2 : 8; /**< MAX2 (per Spi4.2 spec) */ uint64_t max1 : 8; /**< MAX1 (per Spi4.2 spec) MAX1 >= GMX_TX_SPI_THRESH[THRESH] */ #else uint64_t max1 : 8; uint64_t max2 : 8; uint64_t slice : 7; uint64_t reserved_23_63 : 41; #endif } s; struct cvmx_gmxx_tx_spi_max_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t max2 : 8; /**< MAX2 (per Spi4.2 spec) */ uint64_t max1 : 8; /**< MAX1 (per Spi4.2 spec) MAX1 >= GMX_TX_SPI_THRESH[THRESH] */ #else uint64_t max1 : 8; uint64_t max2 : 8; uint64_t reserved_16_63 : 48; #endif } cn38xx; struct cvmx_gmxx_tx_spi_max_cn38xx cn38xxp2; struct cvmx_gmxx_tx_spi_max_s cn58xx; struct cvmx_gmxx_tx_spi_max_s cn58xxp1; } cvmx_gmxx_tx_spi_max_t; /** * cvmx_gmx#_tx_spi_round# * * GMX_TX_SPI_ROUND = Controls SPI4 TX Arbitration * */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_spi_roundx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t round : 16; /**< Which Spi ports participate in each arbitration round. Each bit corresponds to a spi port - 0: this port will arb in this round - 1: this port will not arb in this round (90nm ONLY) */ #else uint64_t round : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gmxx_tx_spi_roundx_s cn58xx; struct cvmx_gmxx_tx_spi_roundx_s cn58xxp1; } cvmx_gmxx_tx_spi_roundx_t; /** * cvmx_gmx#_tx_spi_thresh * * GMX_TX_SPI_THRESH = RGMII TX Spi4 Transmit Threshold * * * Notes: * Note: zero will map to 0x20 * * This will normally creates Spi4 traffic bursts at least THRESH in length. * If dclk > eclk, then this rule may not always hold and Octeon may split * transfers into smaller bursts - some of which could be as short as 16B. * Octeon will never violate the Spi4.2 spec and send a non-EOP burst that is * not a multiple of 16B. */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_spi_thresh_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t thresh : 6; /**< Transmit threshold in 16B blocks - cannot be zero THRESH <= TX_FIFO size (in non-passthrough mode) THRESH <= TX_FIFO size-2 (in passthrough mode) THRESH <= GMX_TX_SPI_MAX[MAX1] THRESH <= GMX_TX_SPI_MAX[MAX2], if not then is it possible for Octeon to send a Spi4 data burst of MAX2 <= burst <= THRESH 16B ticks GMX_TX_SPI_MAX[SLICE] must be programmed to be >= THRESH */ #else uint64_t thresh : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_gmxx_tx_spi_thresh_s cn38xx; struct cvmx_gmxx_tx_spi_thresh_s cn38xxp2; struct cvmx_gmxx_tx_spi_thresh_s cn58xx; struct cvmx_gmxx_tx_spi_thresh_s cn58xxp1; } cvmx_gmxx_tx_spi_thresh_t; /** * cvmx_gmx#_tx_xaui_ctl */ typedef union { uint64_t u64; struct cvmx_gmxx_tx_xaui_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t hg_pause_hgi : 2; /**< HGI Field for HW generated HiGig pause packets (XAUI mode only) */ uint64_t hg_en : 1; /**< Enable HiGig Mode When HG_EN is set, the following must be set: GMX_RX_FRM_CTL[PRE_CHK] == 0 GMX_RX_UDD_SKP[FCSSEL] == 0 GMX_RX_UDD_SKP[SKIP] == 12 or 16 GMX_TX_APPEND[PREAMBLE] == 0 (depending on the HiGig header size) (XAUI mode only) */ uint64_t reserved_7_7 : 1; uint64_t ls_byp : 1; /**< Bypass the link status as determined by the XGMII receiver and set the link status of the transmitter to LS. (XAUI mode only) */ uint64_t ls : 2; /**< Link Status 0 = Link Ok Link runs normally. RS passes MAC data to PCS 1 = Local Fault RS layer sends continuous remote fault sequences. 2 = Remote Fault RS layer sends continuous idles sequences (XAUI mode only) */ uint64_t reserved_2_3 : 2; uint64_t uni_en : 1; /**< Enable Unidirectional Mode (IEEE Clause 66) (XAUI mode only) */ uint64_t dic_en : 1; /**< Enable the deficit idle counter for IFG averaging (XAUI mode only) */ #else uint64_t dic_en : 1; uint64_t uni_en : 1; uint64_t reserved_2_3 : 2; uint64_t ls : 2; uint64_t ls_byp : 1; uint64_t reserved_7_7 : 1; uint64_t hg_en : 1; uint64_t hg_pause_hgi : 2; uint64_t reserved_11_63 : 53; #endif } s; struct cvmx_gmxx_tx_xaui_ctl_s cn52xx; struct cvmx_gmxx_tx_xaui_ctl_s cn52xxp1; struct cvmx_gmxx_tx_xaui_ctl_s cn56xx; struct cvmx_gmxx_tx_xaui_ctl_s cn56xxp1; } cvmx_gmxx_tx_xaui_ctl_t; /** * cvmx_gmx#_xaui_ext_loopback */ typedef union { uint64_t u64; struct cvmx_gmxx_xaui_ext_loopback_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t en : 1; /**< Loopback enable Puts the packet interface in external loopback mode on the XAUI bus in which the RX lines are reflected on the TX lines. (XAUI mode only) */ uint64_t thresh : 4; /**< Threshhold on the TX FIFO SW must only write the typical value. Any other value will cause loopback mode not to function correctly. (XAUI mode only) */ #else uint64_t thresh : 4; uint64_t en : 1; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_gmxx_xaui_ext_loopback_s cn52xx; struct cvmx_gmxx_xaui_ext_loopback_s cn52xxp1; struct cvmx_gmxx_xaui_ext_loopback_s cn56xx; struct cvmx_gmxx_xaui_ext_loopback_s cn56xxp1; } cvmx_gmxx_xaui_ext_loopback_t; /** * cvmx_gpio_bit_cfg# */ typedef union { uint64_t u64; struct cvmx_gpio_bit_cfgx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_15_63 : 49; uint64_t clk_gen : 1; /**< When TX_OE is set, GPIO pin becomes a clock */ uint64_t clk_sel : 2; /**< Selects which of the 4 GPIO clock generators */ uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */ uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */ uint64_t int_type : 1; /**< Type of interrupt 0 = level (default) 1 = rising edge */ uint64_t int_en : 1; /**< Bit mask to indicate which bits to raise interrupt */ uint64_t rx_xor : 1; /**< Invert the GPIO pin */ uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */ #else uint64_t tx_oe : 1; uint64_t rx_xor : 1; uint64_t int_en : 1; uint64_t int_type : 1; uint64_t fil_cnt : 4; uint64_t fil_sel : 4; uint64_t clk_sel : 2; uint64_t clk_gen : 1; uint64_t reserved_15_63 : 49; #endif } s; struct cvmx_gpio_bit_cfgx_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */ uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */ uint64_t int_type : 1; /**< Type of interrupt 0 = level (default) 1 = rising edge */ uint64_t int_en : 1; /**< Bit mask to indicate which bits to raise interrupt */ uint64_t rx_xor : 1; /**< Invert the GPIO pin */ uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */ #else uint64_t tx_oe : 1; uint64_t rx_xor : 1; uint64_t int_en : 1; uint64_t int_type : 1; uint64_t fil_cnt : 4; uint64_t fil_sel : 4; uint64_t reserved_12_63 : 52; #endif } cn30xx; struct cvmx_gpio_bit_cfgx_cn30xx cn31xx; struct cvmx_gpio_bit_cfgx_cn30xx cn38xx; struct cvmx_gpio_bit_cfgx_cn30xx cn38xxp2; struct cvmx_gpio_bit_cfgx_cn30xx cn50xx; struct cvmx_gpio_bit_cfgx_s cn52xx; struct cvmx_gpio_bit_cfgx_s cn52xxp1; struct cvmx_gpio_bit_cfgx_s cn56xx; struct cvmx_gpio_bit_cfgx_s cn56xxp1; struct cvmx_gpio_bit_cfgx_cn30xx cn58xx; struct cvmx_gpio_bit_cfgx_cn30xx cn58xxp1; } cvmx_gpio_bit_cfgx_t; /** * cvmx_gpio_boot_ena */ typedef union { uint64_t u64; struct cvmx_gpio_boot_ena_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t boot_ena : 4; /**< Drive boot bus chip enables [7:4] on gpio [11:8] */ uint64_t reserved_0_7 : 8; #else uint64_t reserved_0_7 : 8; uint64_t boot_ena : 4; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_gpio_boot_ena_s cn30xx; struct cvmx_gpio_boot_ena_s cn31xx; struct cvmx_gpio_boot_ena_s cn50xx; } cvmx_gpio_boot_ena_t; /** * cvmx_gpio_clk_gen# */ typedef union { uint64_t u64; struct cvmx_gpio_clk_genx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t n : 32; /**< Determines the frequency of the GPIO clk generator NOTE: Fgpio_clk = Feclk * N / 2^32 N = (Fgpio_clk / Feclk) * 2^32 NOTE: writing N == 0 stops the clock generator N should be <= 2^31-1. */ #else uint64_t n : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_gpio_clk_genx_s cn52xx; struct cvmx_gpio_clk_genx_s cn52xxp1; struct cvmx_gpio_clk_genx_s cn56xx; struct cvmx_gpio_clk_genx_s cn56xxp1; } cvmx_gpio_clk_genx_t; /** * cvmx_gpio_dbg_ena */ typedef union { uint64_t u64; struct cvmx_gpio_dbg_ena_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_21_63 : 43; uint64_t dbg_ena : 21; /**< Enable the debug port to be driven on the gpio */ #else uint64_t dbg_ena : 21; uint64_t reserved_21_63 : 43; #endif } s; struct cvmx_gpio_dbg_ena_s cn30xx; struct cvmx_gpio_dbg_ena_s cn31xx; struct cvmx_gpio_dbg_ena_s cn50xx; } cvmx_gpio_dbg_ena_t; /** * cvmx_gpio_int_clr */ typedef union { uint64_t u64; struct cvmx_gpio_int_clr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t type : 16; /**< Clear the interrupt rising edge detector */ #else uint64_t type : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_gpio_int_clr_s cn30xx; struct cvmx_gpio_int_clr_s cn31xx; struct cvmx_gpio_int_clr_s cn38xx; struct cvmx_gpio_int_clr_s cn38xxp2; struct cvmx_gpio_int_clr_s cn50xx; struct cvmx_gpio_int_clr_s cn52xx; struct cvmx_gpio_int_clr_s cn52xxp1; struct cvmx_gpio_int_clr_s cn56xx; struct cvmx_gpio_int_clr_s cn56xxp1; struct cvmx_gpio_int_clr_s cn58xx; struct cvmx_gpio_int_clr_s cn58xxp1; } cvmx_gpio_int_clr_t; /** * cvmx_gpio_rx_dat */ typedef union { uint64_t u64; struct cvmx_gpio_rx_dat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t dat : 24; /**< GPIO Read Data */ #else uint64_t dat : 24; uint64_t reserved_24_63 : 40; #endif } s; struct cvmx_gpio_rx_dat_s cn30xx; struct cvmx_gpio_rx_dat_s cn31xx; struct cvmx_gpio_rx_dat_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t dat : 16; /**< GPIO Read Data */ #else uint64_t dat : 16; uint64_t reserved_16_63 : 48; #endif } cn38xx; struct cvmx_gpio_rx_dat_cn38xx cn38xxp2; struct cvmx_gpio_rx_dat_s cn50xx; struct cvmx_gpio_rx_dat_cn38xx cn52xx; struct cvmx_gpio_rx_dat_cn38xx cn52xxp1; struct cvmx_gpio_rx_dat_cn38xx cn56xx; struct cvmx_gpio_rx_dat_cn38xx cn56xxp1; struct cvmx_gpio_rx_dat_cn38xx cn58xx; struct cvmx_gpio_rx_dat_cn38xx cn58xxp1; } cvmx_gpio_rx_dat_t; /** * cvmx_gpio_tx_clr */ typedef union { uint64_t u64; struct cvmx_gpio_tx_clr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t clr : 24; /**< Bit mask to indicate which bits to drive to '0'. */ #else uint64_t clr : 24; uint64_t reserved_24_63 : 40; #endif } s; struct cvmx_gpio_tx_clr_s cn30xx; struct cvmx_gpio_tx_clr_s cn31xx; struct cvmx_gpio_tx_clr_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t clr : 16; /**< Bit mask to indicate which bits to drive to '0'. */ #else uint64_t clr : 16; uint64_t reserved_16_63 : 48; #endif } cn38xx; struct cvmx_gpio_tx_clr_cn38xx cn38xxp2; struct cvmx_gpio_tx_clr_s cn50xx; struct cvmx_gpio_tx_clr_cn38xx cn52xx; struct cvmx_gpio_tx_clr_cn38xx cn52xxp1; struct cvmx_gpio_tx_clr_cn38xx cn56xx; struct cvmx_gpio_tx_clr_cn38xx cn56xxp1; struct cvmx_gpio_tx_clr_cn38xx cn58xx; struct cvmx_gpio_tx_clr_cn38xx cn58xxp1; } cvmx_gpio_tx_clr_t; /** * cvmx_gpio_tx_set */ typedef union { uint64_t u64; struct cvmx_gpio_tx_set_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t set : 24; /**< Bit mask to indicate which bits to drive to '1'. */ #else uint64_t set : 24; uint64_t reserved_24_63 : 40; #endif } s; struct cvmx_gpio_tx_set_s cn30xx; struct cvmx_gpio_tx_set_s cn31xx; struct cvmx_gpio_tx_set_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t set : 16; /**< Bit mask to indicate which bits to drive to '1'. */ #else uint64_t set : 16; uint64_t reserved_16_63 : 48; #endif } cn38xx; struct cvmx_gpio_tx_set_cn38xx cn38xxp2; struct cvmx_gpio_tx_set_s cn50xx; struct cvmx_gpio_tx_set_cn38xx cn52xx; struct cvmx_gpio_tx_set_cn38xx cn52xxp1; struct cvmx_gpio_tx_set_cn38xx cn56xx; struct cvmx_gpio_tx_set_cn38xx cn56xxp1; struct cvmx_gpio_tx_set_cn38xx cn58xx; struct cvmx_gpio_tx_set_cn38xx cn58xxp1; } cvmx_gpio_tx_set_t; /** * cvmx_gpio_xbit_cfg# */ typedef union { uint64_t u64; struct cvmx_gpio_xbit_cfgx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */ uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */ uint64_t reserved_2_3 : 2; uint64_t rx_xor : 1; /**< Invert the GPIO pin */ uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */ #else uint64_t tx_oe : 1; uint64_t rx_xor : 1; uint64_t reserved_2_3 : 2; uint64_t fil_cnt : 4; uint64_t fil_sel : 4; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_gpio_xbit_cfgx_s cn30xx; struct cvmx_gpio_xbit_cfgx_s cn31xx; struct cvmx_gpio_xbit_cfgx_s cn50xx; } cvmx_gpio_xbit_cfgx_t; /** * cvmx_iob_bist_status * * IOB_BIST_STATUS = BIST Status of IOB Memories * * The result of the BIST run on the IOB memories. */ typedef union { uint64_t u64; struct cvmx_iob_bist_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t icnrcb : 1; /**< icnr_cb_reg_fifo_bist_status */ uint64_t icr0 : 1; /**< icr_bist_req_fifo0_status */ uint64_t icr1 : 1; /**< icr_bist_req_fifo1_status */ uint64_t icnr1 : 1; /**< icnr_reg_mem1_bist_status */ uint64_t icnr0 : 1; /**< icnr_reg_mem0_bist_status */ uint64_t ibdr0 : 1; /**< ibdr_bist_req_fifo0_status */ uint64_t ibdr1 : 1; /**< ibdr_bist_req_fifo1_status */ uint64_t ibr0 : 1; /**< ibr_bist_rsp_fifo0_status */ uint64_t ibr1 : 1; /**< ibr_bist_rsp_fifo1_status */ uint64_t icnrt : 1; /**< icnr_tag_cb_reg_fifo_bist_status */ uint64_t ibrq0 : 1; /**< ibrq_bist_req_fifo0_status */ uint64_t ibrq1 : 1; /**< ibrq_bist_req_fifo1_status */ uint64_t icrn0 : 1; /**< icr_ncb_bist_mem0_status */ uint64_t icrn1 : 1; /**< icr_ncb_bist_mem1_status */ uint64_t icrp0 : 1; /**< icr_pko_bist_mem0_status */ uint64_t icrp1 : 1; /**< icr_pko_bist_mem1_status */ uint64_t ibd : 1; /**< ibd_bist_mem0_status */ uint64_t icd : 1; /**< icd_ncb_fifo_bist_status */ #else uint64_t icd : 1; uint64_t ibd : 1; uint64_t icrp1 : 1; uint64_t icrp0 : 1; uint64_t icrn1 : 1; uint64_t icrn0 : 1; uint64_t ibrq1 : 1; uint64_t ibrq0 : 1; uint64_t icnrt : 1; uint64_t ibr1 : 1; uint64_t ibr0 : 1; uint64_t ibdr1 : 1; uint64_t ibdr0 : 1; uint64_t icnr0 : 1; uint64_t icnr1 : 1; uint64_t icr1 : 1; uint64_t icr0 : 1; uint64_t icnrcb : 1; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_iob_bist_status_s cn30xx; struct cvmx_iob_bist_status_s cn31xx; struct cvmx_iob_bist_status_s cn38xx; struct cvmx_iob_bist_status_s cn38xxp2; struct cvmx_iob_bist_status_s cn50xx; struct cvmx_iob_bist_status_s cn52xx; struct cvmx_iob_bist_status_s cn52xxp1; struct cvmx_iob_bist_status_s cn56xx; struct cvmx_iob_bist_status_s cn56xxp1; struct cvmx_iob_bist_status_s cn58xx; struct cvmx_iob_bist_status_s cn58xxp1; } cvmx_iob_bist_status_t; /** * cvmx_iob_ctl_status * * IOB Control Status = IOB Control and Status Register * * Provides control for IOB functions. */ typedef union { uint64_t u64; struct cvmx_iob_ctl_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t rr_mode : 1; /**< When set to '1' will enable Round-Robin mode of next transaction that could arbitrate for the XMB. */ uint64_t outb_mat : 1; /**< Was a match on the outbound bus to the inb pattern matchers. PASS2 FIELD. */ uint64_t inb_mat : 1; /**< Was a match on the inbound bus to the inb pattern matchers. PASS2 FIELD. */ uint64_t pko_enb : 1; /**< Toggles the endian style of the FAU for the PKO. '0' is for big-endian and '1' is for little-endian. */ uint64_t dwb_enb : 1; /**< Enables the DWB function of the IOB. */ uint64_t fau_end : 1; /**< Toggles the endian style of the FAU. '0' is for big-endian and '1' is for little-endian. */ #else uint64_t fau_end : 1; uint64_t dwb_enb : 1; uint64_t pko_enb : 1; uint64_t inb_mat : 1; uint64_t outb_mat : 1; uint64_t rr_mode : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_iob_ctl_status_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t outb_mat : 1; /**< Was a match on the outbound bus to the inb pattern matchers. */ uint64_t inb_mat : 1; /**< Was a match on the inbound bus to the inb pattern matchers. */ uint64_t pko_enb : 1; /**< Toggles the endian style of the FAU for the PKO. '0' is for big-endian and '1' is for little-endian. */ uint64_t dwb_enb : 1; /**< Enables the DWB function of the IOB. */ uint64_t fau_end : 1; /**< Toggles the endian style of the FAU. '0' is for big-endian and '1' is for little-endian. */ #else uint64_t fau_end : 1; uint64_t dwb_enb : 1; uint64_t pko_enb : 1; uint64_t inb_mat : 1; uint64_t outb_mat : 1; uint64_t reserved_5_63 : 59; #endif } cn30xx; struct cvmx_iob_ctl_status_cn30xx cn31xx; struct cvmx_iob_ctl_status_cn30xx cn38xx; struct cvmx_iob_ctl_status_cn30xx cn38xxp2; struct cvmx_iob_ctl_status_cn30xx cn50xx; struct cvmx_iob_ctl_status_s cn52xx; struct cvmx_iob_ctl_status_cn30xx cn52xxp1; struct cvmx_iob_ctl_status_cn30xx cn56xx; struct cvmx_iob_ctl_status_cn30xx cn56xxp1; struct cvmx_iob_ctl_status_cn30xx cn58xx; struct cvmx_iob_ctl_status_cn30xx cn58xxp1; } cvmx_iob_ctl_status_t; /** * cvmx_iob_dwb_pri_cnt * * DWB To CMB Priority Counter = Don't Write Back to CMB Priority Counter Enable and Timer Value * * Enables and supplies the timeout count for raising the priority of Don't Write Back request to the L2C. */ typedef union { uint64_t u64; struct cvmx_iob_dwb_pri_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt_enb : 1; /**< Enables the raising of CMB access priority when CNT_VAL is reached. */ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising the priority for access to CMB. */ #else uint64_t cnt_val : 15; uint64_t cnt_enb : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_iob_dwb_pri_cnt_s cn38xx; struct cvmx_iob_dwb_pri_cnt_s cn38xxp2; struct cvmx_iob_dwb_pri_cnt_s cn52xx; struct cvmx_iob_dwb_pri_cnt_s cn52xxp1; struct cvmx_iob_dwb_pri_cnt_s cn56xx; struct cvmx_iob_dwb_pri_cnt_s cn56xxp1; struct cvmx_iob_dwb_pri_cnt_s cn58xx; struct cvmx_iob_dwb_pri_cnt_s cn58xxp1; } cvmx_iob_dwb_pri_cnt_t; /** * cvmx_iob_fau_timeout * * FAU Timeout = Fetch and Add Unit Tag-Switch Timeout * * How many clokc ticks the FAU unit will wait for a tag-switch before timeing out. * for Queue 0. */ typedef union { uint64_t u64; struct cvmx_iob_fau_timeout_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t tout_enb : 1; /**< The enable for the FAU timeout feature. '1' will enable the timeout, '0' will disable. */ uint64_t tout_val : 12; /**< When a tag request arrives from the PP a timer is started associate with that PP. The timer which increments every 256 eclks is compared to TOUT_VAL. When the two are equal the IOB will flag the tag request to complete as a time-out tag operation. The 256 count timer used to increment the PP associated timer is always running so the first increment of the PP associated timer may occur any where within the first 256 eclks. Note that '0' is an illegal value. */ #else uint64_t tout_val : 12; uint64_t tout_enb : 1; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_iob_fau_timeout_s cn30xx; struct cvmx_iob_fau_timeout_s cn31xx; struct cvmx_iob_fau_timeout_s cn38xx; struct cvmx_iob_fau_timeout_s cn38xxp2; struct cvmx_iob_fau_timeout_s cn50xx; struct cvmx_iob_fau_timeout_s cn52xx; struct cvmx_iob_fau_timeout_s cn52xxp1; struct cvmx_iob_fau_timeout_s cn56xx; struct cvmx_iob_fau_timeout_s cn56xxp1; struct cvmx_iob_fau_timeout_s cn58xx; struct cvmx_iob_fau_timeout_s cn58xxp1; } cvmx_iob_fau_timeout_t; /** * cvmx_iob_i2c_pri_cnt * * IPD To CMB Store Priority Counter = IPD to CMB Store Priority Counter Enable and Timer Value * * Enables and supplies the timeout count for raising the priority of IPD Store access to the CMB. */ typedef union { uint64_t u64; struct cvmx_iob_i2c_pri_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt_enb : 1; /**< Enables the raising of CMB access priority when CNT_VAL is reached. */ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising the priority for access to CMB. */ #else uint64_t cnt_val : 15; uint64_t cnt_enb : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_iob_i2c_pri_cnt_s cn38xx; struct cvmx_iob_i2c_pri_cnt_s cn38xxp2; struct cvmx_iob_i2c_pri_cnt_s cn52xx; struct cvmx_iob_i2c_pri_cnt_s cn52xxp1; struct cvmx_iob_i2c_pri_cnt_s cn56xx; struct cvmx_iob_i2c_pri_cnt_s cn56xxp1; struct cvmx_iob_i2c_pri_cnt_s cn58xx; struct cvmx_iob_i2c_pri_cnt_s cn58xxp1; } cvmx_iob_i2c_pri_cnt_t; /** * cvmx_iob_inb_control_match * * IOB_INB_CONTROL_MATCH = IOB Inbound Control Match * * Match pattern for the inbound control to set the INB_MATCH_BIT. PASS-2 Register */ typedef union { uint64_t u64; struct cvmx_iob_inb_control_match_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t mask : 8; /**< Pattern to match on the inbound NCB. */ uint64_t opc : 4; /**< Pattern to match on the inbound NCB. */ uint64_t dst : 9; /**< Pattern to match on the inbound NCB. */ uint64_t src : 8; /**< Pattern to match on the inbound NCB. */ #else uint64_t src : 8; uint64_t dst : 9; uint64_t opc : 4; uint64_t mask : 8; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_iob_inb_control_match_s cn30xx; struct cvmx_iob_inb_control_match_s cn31xx; struct cvmx_iob_inb_control_match_s cn38xx; struct cvmx_iob_inb_control_match_s cn38xxp2; struct cvmx_iob_inb_control_match_s cn50xx; struct cvmx_iob_inb_control_match_s cn52xx; struct cvmx_iob_inb_control_match_s cn52xxp1; struct cvmx_iob_inb_control_match_s cn56xx; struct cvmx_iob_inb_control_match_s cn56xxp1; struct cvmx_iob_inb_control_match_s cn58xx; struct cvmx_iob_inb_control_match_s cn58xxp1; } cvmx_iob_inb_control_match_t; /** * cvmx_iob_inb_control_match_enb * * IOB_INB_CONTROL_MATCH_ENB = IOB Inbound Control Match Enable * * Enables the match of the corresponding bit in the IOB_INB_CONTROL_MATCH reister. PASS-2 Register */ typedef union { uint64_t u64; struct cvmx_iob_inb_control_match_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t mask : 8; /**< Pattern to match on the inbound NCB. */ uint64_t opc : 4; /**< Pattern to match on the inbound NCB. */ uint64_t dst : 9; /**< Pattern to match on the inbound NCB. */ uint64_t src : 8; /**< Pattern to match on the inbound NCB. */ #else uint64_t src : 8; uint64_t dst : 9; uint64_t opc : 4; uint64_t mask : 8; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_iob_inb_control_match_enb_s cn30xx; struct cvmx_iob_inb_control_match_enb_s cn31xx; struct cvmx_iob_inb_control_match_enb_s cn38xx; struct cvmx_iob_inb_control_match_enb_s cn38xxp2; struct cvmx_iob_inb_control_match_enb_s cn50xx; struct cvmx_iob_inb_control_match_enb_s cn52xx; struct cvmx_iob_inb_control_match_enb_s cn52xxp1; struct cvmx_iob_inb_control_match_enb_s cn56xx; struct cvmx_iob_inb_control_match_enb_s cn56xxp1; struct cvmx_iob_inb_control_match_enb_s cn58xx; struct cvmx_iob_inb_control_match_enb_s cn58xxp1; } cvmx_iob_inb_control_match_enb_t; /** * cvmx_iob_inb_data_match * * IOB_INB_DATA_MATCH = IOB Inbound Data Match * * Match pattern for the inbound data to set the INB_MATCH_BIT. PASS-2 Register */ typedef union { uint64_t u64; struct cvmx_iob_inb_data_match_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< Pattern to match on the inbound NCB. */ #else uint64_t data : 64; #endif } s; struct cvmx_iob_inb_data_match_s cn30xx; struct cvmx_iob_inb_data_match_s cn31xx; struct cvmx_iob_inb_data_match_s cn38xx; struct cvmx_iob_inb_data_match_s cn38xxp2; struct cvmx_iob_inb_data_match_s cn50xx; struct cvmx_iob_inb_data_match_s cn52xx; struct cvmx_iob_inb_data_match_s cn52xxp1; struct cvmx_iob_inb_data_match_s cn56xx; struct cvmx_iob_inb_data_match_s cn56xxp1; struct cvmx_iob_inb_data_match_s cn58xx; struct cvmx_iob_inb_data_match_s cn58xxp1; } cvmx_iob_inb_data_match_t; /** * cvmx_iob_inb_data_match_enb * * IOB_INB_DATA_MATCH_ENB = IOB Inbound Data Match Enable * * Enables the match of the corresponding bit in the IOB_INB_DATA_MATCH reister. PASS-2 Register */ typedef union { uint64_t u64; struct cvmx_iob_inb_data_match_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< Bit to enable match of. */ #else uint64_t data : 64; #endif } s; struct cvmx_iob_inb_data_match_enb_s cn30xx; struct cvmx_iob_inb_data_match_enb_s cn31xx; struct cvmx_iob_inb_data_match_enb_s cn38xx; struct cvmx_iob_inb_data_match_enb_s cn38xxp2; struct cvmx_iob_inb_data_match_enb_s cn50xx; struct cvmx_iob_inb_data_match_enb_s cn52xx; struct cvmx_iob_inb_data_match_enb_s cn52xxp1; struct cvmx_iob_inb_data_match_enb_s cn56xx; struct cvmx_iob_inb_data_match_enb_s cn56xxp1; struct cvmx_iob_inb_data_match_enb_s cn58xx; struct cvmx_iob_inb_data_match_enb_s cn58xxp1; } cvmx_iob_inb_data_match_enb_t; /** * cvmx_iob_int_enb * * IOB_INT_ENB = IOB's Interrupt Enable * * The IOB's interrupt enable register. This is a PASS-2 register. */ typedef union { uint64_t u64; struct cvmx_iob_int_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t p_dat : 1; /**< When set (1) and bit 5 of the IOB_INT_SUM register is asserted the IOB will assert an interrupt. */ uint64_t np_dat : 1; /**< When set (1) and bit 4 of the IOB_INT_SUM register is asserted the IOB will assert an interrupt. */ uint64_t p_eop : 1; /**< When set (1) and bit 3 of the IOB_INT_SUM register is asserted the IOB will assert an interrupt. */ uint64_t p_sop : 1; /**< When set (1) and bit 2 of the IOB_INT_SUM register is asserted the IOB will assert an interrupt. */ uint64_t np_eop : 1; /**< When set (1) and bit 1 of the IOB_INT_SUM register is asserted the IOB will assert an interrupt. */ uint64_t np_sop : 1; /**< When set (1) and bit 0 of the IOB_INT_SUM register is asserted the IOB will assert an interrupt. */ #else uint64_t np_sop : 1; uint64_t np_eop : 1; uint64_t p_sop : 1; uint64_t p_eop : 1; uint64_t np_dat : 1; uint64_t p_dat : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_iob_int_enb_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t p_eop : 1; /**< When set (1) and bit 3 of the IOB_INT_SUM register is asserted the IOB will assert an interrupt. */ uint64_t p_sop : 1; /**< When set (1) and bit 2 of the IOB_INT_SUM register is asserted the IOB will assert an interrupt. */ uint64_t np_eop : 1; /**< When set (1) and bit 1 of the IOB_INT_SUM register is asserted the IOB will assert an interrupt. */ uint64_t np_sop : 1; /**< When set (1) and bit 0 of the IOB_INT_SUM register is asserted the IOB will assert an interrupt. */ #else uint64_t np_sop : 1; uint64_t np_eop : 1; uint64_t p_sop : 1; uint64_t p_eop : 1; uint64_t reserved_4_63 : 60; #endif } cn30xx; struct cvmx_iob_int_enb_cn30xx cn31xx; struct cvmx_iob_int_enb_cn30xx cn38xx; struct cvmx_iob_int_enb_cn30xx cn38xxp2; struct cvmx_iob_int_enb_s cn50xx; struct cvmx_iob_int_enb_s cn52xx; struct cvmx_iob_int_enb_s cn52xxp1; struct cvmx_iob_int_enb_s cn56xx; struct cvmx_iob_int_enb_s cn56xxp1; struct cvmx_iob_int_enb_s cn58xx; struct cvmx_iob_int_enb_s cn58xxp1; } cvmx_iob_int_enb_t; /** * cvmx_iob_int_sum * * IOB_INT_SUM = IOB's Interrupt Summary Register * * Contains the diffrent interrupt summary bits of the IOB. This is a PASS-2 register. */ typedef union { uint64_t u64; struct cvmx_iob_int_sum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t p_dat : 1; /**< Set when a data arrives before a SOP for the same port for a passthrough packet. The first detected error associated with bits [5:0] of this register will only be set here. A new bit can be set when the previous reported bit is cleared. */ uint64_t np_dat : 1; /**< Set when a data arrives before a SOP for the same port for a non-passthrough packet. The first detected error associated with bits [5:0] of this register will only be set here. A new bit can be set when the previous reported bit is cleared. */ uint64_t p_eop : 1; /**< Set when a EOP is followed by an EOP for the same port for a passthrough packet. The first detected error associated with bits [5:0] of this register will only be set here. A new bit can be set when the previous reported bit is cleared. */ uint64_t p_sop : 1; /**< Set when a SOP is followed by an SOP for the same port for a passthrough packet. The first detected error associated with bits [5:0] of this register will only be set here. A new bit can be set when the previous reported bit is cleared. */ uint64_t np_eop : 1; /**< Set when a EOP is followed by an EOP for the same port for a non-passthrough packet. The first detected error associated with bits [5:0] of this register will only be set here. A new bit can be set when the previous reported bit is cleared. */ uint64_t np_sop : 1; /**< Set when a SOP is followed by an SOP for the same port for a non-passthrough packet. The first detected error associated with bits [5:0] of this register will only be set here. A new bit can be set when the previous reported bit is cleared. */ #else uint64_t np_sop : 1; uint64_t np_eop : 1; uint64_t p_sop : 1; uint64_t p_eop : 1; uint64_t np_dat : 1; uint64_t p_dat : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_iob_int_sum_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t p_eop : 1; /**< Set when a EOP is followed by an EOP for the same port for a passthrough packet. The first detected error associated with bits [3:0] of this register will only be set here. A new bit can be set when the previous reported bit is cleared. */ uint64_t p_sop : 1; /**< Set when a SOP is followed by an SOP for the same port for a passthrough packet. The first detected error associated with bits [3:0] of this register will only be set here. A new bit can be set when the previous reported bit is cleared. */ uint64_t np_eop : 1; /**< Set when a EOP is followed by an EOP for the same port for a non-passthrough packet. The first detected error associated with bits [3:0] of this register will only be set here. A new bit can be set when the previous reported bit is cleared. */ uint64_t np_sop : 1; /**< Set when a SOP is followed by an SOP for the same port for a non-passthrough packet. The first detected error associated with bits [3:0] of this register will only be set here. A new bit can be set when the previous reported bit is cleared. */ #else uint64_t np_sop : 1; uint64_t np_eop : 1; uint64_t p_sop : 1; uint64_t p_eop : 1; uint64_t reserved_4_63 : 60; #endif } cn30xx; struct cvmx_iob_int_sum_cn30xx cn31xx; struct cvmx_iob_int_sum_cn30xx cn38xx; struct cvmx_iob_int_sum_cn30xx cn38xxp2; struct cvmx_iob_int_sum_s cn50xx; struct cvmx_iob_int_sum_s cn52xx; struct cvmx_iob_int_sum_s cn52xxp1; struct cvmx_iob_int_sum_s cn56xx; struct cvmx_iob_int_sum_s cn56xxp1; struct cvmx_iob_int_sum_s cn58xx; struct cvmx_iob_int_sum_s cn58xxp1; } cvmx_iob_int_sum_t; /** * cvmx_iob_n2c_l2c_pri_cnt * * NCB To CMB L2C Priority Counter = NCB to CMB L2C Priority Counter Enable and Timer Value * * Enables and supplies the timeout count for raising the priority of NCB Store/Load access to the CMB. */ typedef union { uint64_t u64; struct cvmx_iob_n2c_l2c_pri_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt_enb : 1; /**< Enables the raising of CMB access priority when CNT_VAL is reached. */ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising the priority for access to CMB. */ #else uint64_t cnt_val : 15; uint64_t cnt_enb : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xx; struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xxp2; struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xx; struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xxp1; struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xx; struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xxp1; struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xx; struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xxp1; } cvmx_iob_n2c_l2c_pri_cnt_t; /** * cvmx_iob_n2c_rsp_pri_cnt * * NCB To CMB Response Priority Counter = NCB to CMB Response Priority Counter Enable and Timer Value * * Enables and supplies the timeout count for raising the priority of NCB Responses access to the CMB. */ typedef union { uint64_t u64; struct cvmx_iob_n2c_rsp_pri_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt_enb : 1; /**< Enables the raising of CMB access priority when CNT_VAL is reached. */ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising the priority for access to CMB. */ #else uint64_t cnt_val : 15; uint64_t cnt_enb : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xx; struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xxp2; struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xx; struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xxp1; struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xx; struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xxp1; struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xx; struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xxp1; } cvmx_iob_n2c_rsp_pri_cnt_t; /** * cvmx_iob_outb_com_pri_cnt * * Commit To NCB Priority Counter = Commit to NCB Priority Counter Enable and Timer Value * * Enables and supplies the timeout count for raising the priority of Commit request to the Outbound NCB. */ typedef union { uint64_t u64; struct cvmx_iob_outb_com_pri_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt_enb : 1; /**< Enables the raising of NCB access priority when CNT_VAL is reached. */ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising the priority for access to NCB. */ #else uint64_t cnt_val : 15; uint64_t cnt_enb : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_iob_outb_com_pri_cnt_s cn38xx; struct cvmx_iob_outb_com_pri_cnt_s cn38xxp2; struct cvmx_iob_outb_com_pri_cnt_s cn52xx; struct cvmx_iob_outb_com_pri_cnt_s cn52xxp1; struct cvmx_iob_outb_com_pri_cnt_s cn56xx; struct cvmx_iob_outb_com_pri_cnt_s cn56xxp1; struct cvmx_iob_outb_com_pri_cnt_s cn58xx; struct cvmx_iob_outb_com_pri_cnt_s cn58xxp1; } cvmx_iob_outb_com_pri_cnt_t; /** * cvmx_iob_outb_control_match * * IOB_OUTB_CONTROL_MATCH = IOB Outbound Control Match * * Match pattern for the outbound control to set the OUTB_MATCH_BIT. PASS-2 Register */ typedef union { uint64_t u64; struct cvmx_iob_outb_control_match_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_26_63 : 38; uint64_t mask : 8; /**< Pattern to match on the outbound NCB. */ uint64_t eot : 1; /**< Pattern to match on the outbound NCB. */ uint64_t dst : 8; /**< Pattern to match on the outbound NCB. */ uint64_t src : 9; /**< Pattern to match on the outbound NCB. */ #else uint64_t src : 9; uint64_t dst : 8; uint64_t eot : 1; uint64_t mask : 8; uint64_t reserved_26_63 : 38; #endif } s; struct cvmx_iob_outb_control_match_s cn30xx; struct cvmx_iob_outb_control_match_s cn31xx; struct cvmx_iob_outb_control_match_s cn38xx; struct cvmx_iob_outb_control_match_s cn38xxp2; struct cvmx_iob_outb_control_match_s cn50xx; struct cvmx_iob_outb_control_match_s cn52xx; struct cvmx_iob_outb_control_match_s cn52xxp1; struct cvmx_iob_outb_control_match_s cn56xx; struct cvmx_iob_outb_control_match_s cn56xxp1; struct cvmx_iob_outb_control_match_s cn58xx; struct cvmx_iob_outb_control_match_s cn58xxp1; } cvmx_iob_outb_control_match_t; /** * cvmx_iob_outb_control_match_enb * * IOB_OUTB_CONTROL_MATCH_ENB = IOB Outbound Control Match Enable * * Enables the match of the corresponding bit in the IOB_OUTB_CONTROL_MATCH reister. PASS-2 Register */ typedef union { uint64_t u64; struct cvmx_iob_outb_control_match_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_26_63 : 38; uint64_t mask : 8; /**< Pattern to match on the outbound NCB. */ uint64_t eot : 1; /**< Pattern to match on the outbound NCB. */ uint64_t dst : 8; /**< Pattern to match on the outbound NCB. */ uint64_t src : 9; /**< Pattern to match on the outbound NCB. */ #else uint64_t src : 9; uint64_t dst : 8; uint64_t eot : 1; uint64_t mask : 8; uint64_t reserved_26_63 : 38; #endif } s; struct cvmx_iob_outb_control_match_enb_s cn30xx; struct cvmx_iob_outb_control_match_enb_s cn31xx; struct cvmx_iob_outb_control_match_enb_s cn38xx; struct cvmx_iob_outb_control_match_enb_s cn38xxp2; struct cvmx_iob_outb_control_match_enb_s cn50xx; struct cvmx_iob_outb_control_match_enb_s cn52xx; struct cvmx_iob_outb_control_match_enb_s cn52xxp1; struct cvmx_iob_outb_control_match_enb_s cn56xx; struct cvmx_iob_outb_control_match_enb_s cn56xxp1; struct cvmx_iob_outb_control_match_enb_s cn58xx; struct cvmx_iob_outb_control_match_enb_s cn58xxp1; } cvmx_iob_outb_control_match_enb_t; /** * cvmx_iob_outb_data_match * * IOB_OUTB_DATA_MATCH = IOB Outbound Data Match * * Match pattern for the outbound data to set the OUTB_MATCH_BIT. PASS-2 Register */ typedef union { uint64_t u64; struct cvmx_iob_outb_data_match_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< Pattern to match on the outbound NCB. */ #else uint64_t data : 64; #endif } s; struct cvmx_iob_outb_data_match_s cn30xx; struct cvmx_iob_outb_data_match_s cn31xx; struct cvmx_iob_outb_data_match_s cn38xx; struct cvmx_iob_outb_data_match_s cn38xxp2; struct cvmx_iob_outb_data_match_s cn50xx; struct cvmx_iob_outb_data_match_s cn52xx; struct cvmx_iob_outb_data_match_s cn52xxp1; struct cvmx_iob_outb_data_match_s cn56xx; struct cvmx_iob_outb_data_match_s cn56xxp1; struct cvmx_iob_outb_data_match_s cn58xx; struct cvmx_iob_outb_data_match_s cn58xxp1; } cvmx_iob_outb_data_match_t; /** * cvmx_iob_outb_data_match_enb * * IOB_OUTB_DATA_MATCH_ENB = IOB Outbound Data Match Enable * * Enables the match of the corresponding bit in the IOB_OUTB_DATA_MATCH reister. PASS-2 Register */ typedef union { uint64_t u64; struct cvmx_iob_outb_data_match_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< Bit to enable match of. */ #else uint64_t data : 64; #endif } s; struct cvmx_iob_outb_data_match_enb_s cn30xx; struct cvmx_iob_outb_data_match_enb_s cn31xx; struct cvmx_iob_outb_data_match_enb_s cn38xx; struct cvmx_iob_outb_data_match_enb_s cn38xxp2; struct cvmx_iob_outb_data_match_enb_s cn50xx; struct cvmx_iob_outb_data_match_enb_s cn52xx; struct cvmx_iob_outb_data_match_enb_s cn52xxp1; struct cvmx_iob_outb_data_match_enb_s cn56xx; struct cvmx_iob_outb_data_match_enb_s cn56xxp1; struct cvmx_iob_outb_data_match_enb_s cn58xx; struct cvmx_iob_outb_data_match_enb_s cn58xxp1; } cvmx_iob_outb_data_match_enb_t; /** * cvmx_iob_outb_fpa_pri_cnt * * FPA To NCB Priority Counter = FPA Returns to NCB Priority Counter Enable and Timer Value * * Enables and supplies the timeout count for raising the priority of FPA Rreturn Page request to the Outbound NCB. */ typedef union { uint64_t u64; struct cvmx_iob_outb_fpa_pri_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt_enb : 1; /**< Enables the raising of NCB access priority when CNT_VAL is reached. */ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising the priority for access to NCB. */ #else uint64_t cnt_val : 15; uint64_t cnt_enb : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_iob_outb_fpa_pri_cnt_s cn38xx; struct cvmx_iob_outb_fpa_pri_cnt_s cn38xxp2; struct cvmx_iob_outb_fpa_pri_cnt_s cn52xx; struct cvmx_iob_outb_fpa_pri_cnt_s cn52xxp1; struct cvmx_iob_outb_fpa_pri_cnt_s cn56xx; struct cvmx_iob_outb_fpa_pri_cnt_s cn56xxp1; struct cvmx_iob_outb_fpa_pri_cnt_s cn58xx; struct cvmx_iob_outb_fpa_pri_cnt_s cn58xxp1; } cvmx_iob_outb_fpa_pri_cnt_t; /** * cvmx_iob_outb_req_pri_cnt * * Request To NCB Priority Counter = Request to NCB Priority Counter Enable and Timer Value * * Enables and supplies the timeout count for raising the priority of Request transfers to the Outbound NCB. */ typedef union { uint64_t u64; struct cvmx_iob_outb_req_pri_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt_enb : 1; /**< Enables the raising of NCB access priority when CNT_VAL is reached. */ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising the priority for access to NCB. */ #else uint64_t cnt_val : 15; uint64_t cnt_enb : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_iob_outb_req_pri_cnt_s cn38xx; struct cvmx_iob_outb_req_pri_cnt_s cn38xxp2; struct cvmx_iob_outb_req_pri_cnt_s cn52xx; struct cvmx_iob_outb_req_pri_cnt_s cn52xxp1; struct cvmx_iob_outb_req_pri_cnt_s cn56xx; struct cvmx_iob_outb_req_pri_cnt_s cn56xxp1; struct cvmx_iob_outb_req_pri_cnt_s cn58xx; struct cvmx_iob_outb_req_pri_cnt_s cn58xxp1; } cvmx_iob_outb_req_pri_cnt_t; /** * cvmx_iob_p2c_req_pri_cnt * * PKO To CMB Response Priority Counter = PKO to CMB Response Priority Counter Enable and Timer Value * * Enables and supplies the timeout count for raising the priority of PKO Load access to the CMB. */ typedef union { uint64_t u64; struct cvmx_iob_p2c_req_pri_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt_enb : 1; /**< Enables the raising of CMB access priority when CNT_VAL is reached. */ uint64_t cnt_val : 15; /**< Number of core clocks to wait before raising the priority for access to CMB. */ #else uint64_t cnt_val : 15; uint64_t cnt_enb : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_iob_p2c_req_pri_cnt_s cn38xx; struct cvmx_iob_p2c_req_pri_cnt_s cn38xxp2; struct cvmx_iob_p2c_req_pri_cnt_s cn52xx; struct cvmx_iob_p2c_req_pri_cnt_s cn52xxp1; struct cvmx_iob_p2c_req_pri_cnt_s cn56xx; struct cvmx_iob_p2c_req_pri_cnt_s cn56xxp1; struct cvmx_iob_p2c_req_pri_cnt_s cn58xx; struct cvmx_iob_p2c_req_pri_cnt_s cn58xxp1; } cvmx_iob_p2c_req_pri_cnt_t; /** * cvmx_iob_pkt_err * * IOB_PKT_ERR = IOB Packet Error Register * * Provides status about the failing packet recevie error. This is a PASS-2 register. */ typedef union { uint64_t u64; struct cvmx_iob_pkt_err_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t port : 6; /**< When IOB_INT_SUM[3:0] bit is set, this field latches the failing port associate with the IOB_INT_SUM[3:0] bit set. */ #else uint64_t port : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_iob_pkt_err_s cn30xx; struct cvmx_iob_pkt_err_s cn31xx; struct cvmx_iob_pkt_err_s cn38xx; struct cvmx_iob_pkt_err_s cn38xxp2; struct cvmx_iob_pkt_err_s cn50xx; struct cvmx_iob_pkt_err_s cn52xx; struct cvmx_iob_pkt_err_s cn52xxp1; struct cvmx_iob_pkt_err_s cn56xx; struct cvmx_iob_pkt_err_s cn56xxp1; struct cvmx_iob_pkt_err_s cn58xx; struct cvmx_iob_pkt_err_s cn58xxp1; } cvmx_iob_pkt_err_t; /** * cvmx_iob_to_cmb_credits * * IOB_TO_CMB_CREDITS = IOB To CMB Credits * * Controls the number of reads and writes that may be outstanding to the L2C (via the CMB). */ typedef union { uint64_t u64; struct cvmx_iob_to_cmb_credits_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t pko_rd : 3; /**< Number of PKO reads that can be out to L2C where 0 == 8-credits. */ uint64_t ncb_rd : 3; /**< Number of NCB reads that can be out to L2C where 0 == 8-credits. */ uint64_t ncb_wr : 3; /**< Number of NCB/PKI writes that can be out to L2C where 0 == 8-credits. */ #else uint64_t ncb_wr : 3; uint64_t ncb_rd : 3; uint64_t pko_rd : 3; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_iob_to_cmb_credits_s cn52xx; } cvmx_iob_to_cmb_credits_t; /** * cvmx_ipd_1st_mbuff_skip * * IPD_1ST_MBUFF_SKIP = IPD First MBUFF Word Skip Size * * The number of words that the IPD will skip when writing the first MBUFF. */ typedef union { uint64_t u64; struct cvmx_ipd_1st_mbuff_skip_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t skip_sz : 6; /**< The number of 8-byte words from the top of the 1st MBUFF that the IPD will store the next-pointer. Legal values are 0 to 32, where the MAX value is also limited to: IPD_PACKET_MBUFF_SIZE[MB_SIZE] - 18. */ #else uint64_t skip_sz : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_ipd_1st_mbuff_skip_s cn30xx; struct cvmx_ipd_1st_mbuff_skip_s cn31xx; struct cvmx_ipd_1st_mbuff_skip_s cn38xx; struct cvmx_ipd_1st_mbuff_skip_s cn38xxp2; struct cvmx_ipd_1st_mbuff_skip_s cn50xx; struct cvmx_ipd_1st_mbuff_skip_s cn52xx; struct cvmx_ipd_1st_mbuff_skip_s cn52xxp1; struct cvmx_ipd_1st_mbuff_skip_s cn56xx; struct cvmx_ipd_1st_mbuff_skip_s cn56xxp1; struct cvmx_ipd_1st_mbuff_skip_s cn58xx; struct cvmx_ipd_1st_mbuff_skip_s cn58xxp1; } cvmx_ipd_1st_mbuff_skip_t; /** * cvmx_ipd_1st_next_ptr_back * * IPD_1st_NEXT_PTR_BACK = IPD First Next Pointer Back Values * * Contains the Back Field for use in creating the Next Pointer Header for the First MBUF */ typedef union { uint64_t u64; struct cvmx_ipd_1st_next_ptr_back_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t back : 4; /**< Used to find head of buffer from the nxt-hdr-ptr. */ #else uint64_t back : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_ipd_1st_next_ptr_back_s cn30xx; struct cvmx_ipd_1st_next_ptr_back_s cn31xx; struct cvmx_ipd_1st_next_ptr_back_s cn38xx; struct cvmx_ipd_1st_next_ptr_back_s cn38xxp2; struct cvmx_ipd_1st_next_ptr_back_s cn50xx; struct cvmx_ipd_1st_next_ptr_back_s cn52xx; struct cvmx_ipd_1st_next_ptr_back_s cn52xxp1; struct cvmx_ipd_1st_next_ptr_back_s cn56xx; struct cvmx_ipd_1st_next_ptr_back_s cn56xxp1; struct cvmx_ipd_1st_next_ptr_back_s cn58xx; struct cvmx_ipd_1st_next_ptr_back_s cn58xxp1; } cvmx_ipd_1st_next_ptr_back_t; /** * cvmx_ipd_2nd_next_ptr_back * * IPD_2nd_NEXT_PTR_BACK = IPD Second Next Pointer Back Value * * Contains the Back Field for use in creating the Next Pointer Header for the First MBUF */ typedef union { uint64_t u64; struct cvmx_ipd_2nd_next_ptr_back_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t back : 4; /**< Used to find head of buffer from the nxt-hdr-ptr. */ #else uint64_t back : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_ipd_2nd_next_ptr_back_s cn30xx; struct cvmx_ipd_2nd_next_ptr_back_s cn31xx; struct cvmx_ipd_2nd_next_ptr_back_s cn38xx; struct cvmx_ipd_2nd_next_ptr_back_s cn38xxp2; struct cvmx_ipd_2nd_next_ptr_back_s cn50xx; struct cvmx_ipd_2nd_next_ptr_back_s cn52xx; struct cvmx_ipd_2nd_next_ptr_back_s cn52xxp1; struct cvmx_ipd_2nd_next_ptr_back_s cn56xx; struct cvmx_ipd_2nd_next_ptr_back_s cn56xxp1; struct cvmx_ipd_2nd_next_ptr_back_s cn58xx; struct cvmx_ipd_2nd_next_ptr_back_s cn58xxp1; } cvmx_ipd_2nd_next_ptr_back_t; /** * cvmx_ipd_bist_status * * IPD_BIST_STATUS = IPD BIST STATUS * * BIST Status for IPD's Memories. */ typedef union { uint64_t u64; struct cvmx_ipd_bist_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t csr_mem : 1; /**< CSR Register Memory Bist Status. */ uint64_t csr_ncmd : 1; /**< CSR NCB Commands Memory Bist Status. */ uint64_t pwq_wqed : 1; /**< PWQ PIP WQE DONE Memory Bist Status. */ uint64_t pwq_wp1 : 1; /**< PWQ WQE PAGE1 PTR Memory Bist Status. */ uint64_t pwq_pow : 1; /**< PWQ POW MEM Memory Bist Status. */ uint64_t ipq_pbe1 : 1; /**< IPQ PBE1 Memory Bist Status. */ uint64_t ipq_pbe0 : 1; /**< IPQ PBE0 Memory Bist Status. */ uint64_t pbm3 : 1; /**< PBM3 Memory Bist Status. */ uint64_t pbm2 : 1; /**< PBM2 Memory Bist Status. */ uint64_t pbm1 : 1; /**< PBM1 Memory Bist Status. */ uint64_t pbm0 : 1; /**< PBM0 Memory Bist Status. */ uint64_t pbm_word : 1; /**< PBM_WORD Memory Bist Status. */ uint64_t pwq1 : 1; /**< PWQ1 Memory Bist Status. */ uint64_t pwq0 : 1; /**< PWQ0 Memory Bist Status. */ uint64_t prc_off : 1; /**< PRC_OFF Memory Bist Status. */ uint64_t ipd_old : 1; /**< IPD_OLD Memory Bist Status. */ uint64_t ipd_new : 1; /**< IPD_NEW Memory Bist Status. */ uint64_t pwp : 1; /**< PWP Memory Bist Status. */ #else uint64_t pwp : 1; uint64_t ipd_new : 1; uint64_t ipd_old : 1; uint64_t prc_off : 1; uint64_t pwq0 : 1; uint64_t pwq1 : 1; uint64_t pbm_word : 1; uint64_t pbm0 : 1; uint64_t pbm1 : 1; uint64_t pbm2 : 1; uint64_t pbm3 : 1; uint64_t ipq_pbe0 : 1; uint64_t ipq_pbe1 : 1; uint64_t pwq_pow : 1; uint64_t pwq_wp1 : 1; uint64_t pwq_wqed : 1; uint64_t csr_ncmd : 1; uint64_t csr_mem : 1; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_ipd_bist_status_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t pwq_wqed : 1; /**< PWQ PIP WQE DONE Memory Bist Status. */ uint64_t pwq_wp1 : 1; /**< PWQ WQE PAGE1 PTR Memory Bist Status. */ uint64_t pwq_pow : 1; /**< PWQ POW MEM Memory Bist Status. */ uint64_t ipq_pbe1 : 1; /**< IPQ PBE1 Memory Bist Status. */ uint64_t ipq_pbe0 : 1; /**< IPQ PBE0 Memory Bist Status. */ uint64_t pbm3 : 1; /**< PBM3 Memory Bist Status. */ uint64_t pbm2 : 1; /**< PBM2 Memory Bist Status. */ uint64_t pbm1 : 1; /**< PBM1 Memory Bist Status. */ uint64_t pbm0 : 1; /**< PBM0 Memory Bist Status. */ uint64_t pbm_word : 1; /**< PBM_WORD Memory Bist Status. */ uint64_t pwq1 : 1; /**< PWQ1 Memory Bist Status. */ uint64_t pwq0 : 1; /**< PWQ0 Memory Bist Status. */ uint64_t prc_off : 1; /**< PRC_OFF Memory Bist Status. */ uint64_t ipd_old : 1; /**< IPD_OLD Memory Bist Status. */ uint64_t ipd_new : 1; /**< IPD_NEW Memory Bist Status. */ uint64_t pwp : 1; /**< PWP Memory Bist Status. */ #else uint64_t pwp : 1; uint64_t ipd_new : 1; uint64_t ipd_old : 1; uint64_t prc_off : 1; uint64_t pwq0 : 1; uint64_t pwq1 : 1; uint64_t pbm_word : 1; uint64_t pbm0 : 1; uint64_t pbm1 : 1; uint64_t pbm2 : 1; uint64_t pbm3 : 1; uint64_t ipq_pbe0 : 1; uint64_t ipq_pbe1 : 1; uint64_t pwq_pow : 1; uint64_t pwq_wp1 : 1; uint64_t pwq_wqed : 1; uint64_t reserved_16_63 : 48; #endif } cn30xx; struct cvmx_ipd_bist_status_cn30xx cn31xx; struct cvmx_ipd_bist_status_cn30xx cn38xx; struct cvmx_ipd_bist_status_cn30xx cn38xxp2; struct cvmx_ipd_bist_status_cn30xx cn50xx; struct cvmx_ipd_bist_status_s cn52xx; struct cvmx_ipd_bist_status_s cn52xxp1; struct cvmx_ipd_bist_status_s cn56xx; struct cvmx_ipd_bist_status_s cn56xxp1; struct cvmx_ipd_bist_status_cn30xx cn58xx; struct cvmx_ipd_bist_status_cn30xx cn58xxp1; } cvmx_ipd_bist_status_t; /** * cvmx_ipd_bp_prt_red_end * * IPD_BP_PRT_RED_END = IPD Backpressure Port RED Enable * * When IPD applies backpressure to a PORT and the corresponding bit in this register is set, * the RED Unit will drop packets for that port. */ typedef union { uint64_t u64; struct cvmx_ipd_bp_prt_red_end_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t prt_enb : 40; /**< The port corresponding to the bit position in this field, will allow RED to drop back when port level backpressure is applied to the port. The applying of port-level backpressure for this RED dropping does not take into consideration the value of IPD_PORTX_BP_PAGE_CNT[BP_ENB]. */ #else uint64_t prt_enb : 40; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_ipd_bp_prt_red_end_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t prt_enb : 36; /**< The port corresponding to the bit position in this field, will allow RED to drop back when port level backpressure is applied to the port. The applying of port-level backpressure for this RED dropping does not take into consideration the value of IPD_PORTX_BP_PAGE_CNT[BP_ENB]. */ #else uint64_t prt_enb : 36; uint64_t reserved_36_63 : 28; #endif } cn30xx; struct cvmx_ipd_bp_prt_red_end_cn30xx cn31xx; struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xx; struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xxp2; struct cvmx_ipd_bp_prt_red_end_cn30xx cn50xx; struct cvmx_ipd_bp_prt_red_end_s cn52xx; struct cvmx_ipd_bp_prt_red_end_s cn52xxp1; struct cvmx_ipd_bp_prt_red_end_s cn56xx; struct cvmx_ipd_bp_prt_red_end_s cn56xxp1; struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xx; struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xxp1; } cvmx_ipd_bp_prt_red_end_t; /** * cvmx_ipd_clk_count * * IPD_CLK_COUNT = IPD Clock Count * * Counts the number of core clocks periods since the de-asserition of reset. */ typedef union { uint64_t u64; struct cvmx_ipd_clk_count_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t clk_cnt : 64; /**< This counter will be zeroed when reset is applied and will increment every rising edgge of the core-clock. PASS2 FIELD. */ #else uint64_t clk_cnt : 64; #endif } s; struct cvmx_ipd_clk_count_s cn30xx; struct cvmx_ipd_clk_count_s cn31xx; struct cvmx_ipd_clk_count_s cn38xx; struct cvmx_ipd_clk_count_s cn38xxp2; struct cvmx_ipd_clk_count_s cn50xx; struct cvmx_ipd_clk_count_s cn52xx; struct cvmx_ipd_clk_count_s cn52xxp1; struct cvmx_ipd_clk_count_s cn56xx; struct cvmx_ipd_clk_count_s cn56xxp1; struct cvmx_ipd_clk_count_s cn58xx; struct cvmx_ipd_clk_count_s cn58xxp1; } cvmx_ipd_clk_count_t; /** * cvmx_ipd_ctl_status * * IPD_CTL_STATUS = IPS'd Control Status Register * * The number of words in a MBUFF used for packet data store. */ typedef union { uint64_t u64; struct cvmx_ipd_ctl_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_15_63 : 49; uint64_t no_wptr : 1; /**< When set '1' the WQE pointers will not be used and the WQE will be located at the front of the packet. */ uint64_t pq_apkt : 1; /**< When set IPD_PORT_QOS_X_CNT WILL be incremented by one for every work queue entry that is sent to POW. */ uint64_t pq_nabuf : 1; /**< When set IPD_PORT_QOS_X_CNT WILL NOT be incremented when IPD allocates a buffer for a packet. */ uint64_t ipd_full : 1; /**< When clear '0' the IPD acts normaly. When set '1' the IPD drive the IPD_BUFF_FULL line to the IOB-arbiter, telling it to not give grants to NCB devices sending packet data. */ uint64_t pkt_off : 1; /**< When clear '0' the IPD working normaly, buffering the received packet data. When set '1' the IPD will not buffer the received packet data. */ uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the data-length field in the header written wo the POW and the top of a MBUFF. OCTEAN PASS2 generates a length that includes the length of the data + 8 for the header-field. By setting this bit the 8 for the instr-field will not be included in the length field of the header. NOTE: IPD is compliant with the spec when this field is '1'. */ uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except RSL. */ uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set, IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] WILL be incremented by one for every work queue entry that is sent to POW. PASS-2 Field. */ uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set, IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] WILL NOT be incremented when IPD allocates a buffer for a packet on the port. PASS-2 Field. */ uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */ uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */ uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables the sending of port level backpressure to the Octane input-ports. Once enabled the sending of port-level-backpressure can not be disabled by changing the value of this bit. */ cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers) is written through to memory. 1 ==> All packet data (and next buffer pointers) is written into the cache. 2 ==> The first aligned cache block holding the packet data (and initial next buffer pointer) is written to the L2 cache, all remaining cache blocks are not written to the L2 cache. 3 ==> The first two aligned cache blocks holding the packet data (and initial next buffer pointer) are written to the L2 cache, all remaining cache blocks are not written to the L2 cache. */ uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. When clear '0', the IPD will appear to the IOB-arbiter to be applying backpressure, this causes the IOB-Arbiter to not send grants to NCB devices requesting to send packet data to the IPD. */ #else uint64_t ipd_en : 1; cvmx_ipd_mode_t opc_mode : 2; uint64_t pbp_en : 1; uint64_t wqe_lend : 1; uint64_t pkt_lend : 1; uint64_t naddbuf : 1; uint64_t addpkt : 1; uint64_t reset : 1; uint64_t len_m8 : 1; uint64_t pkt_off : 1; uint64_t ipd_full : 1; uint64_t pq_nabuf : 1; uint64_t pq_apkt : 1; uint64_t no_wptr : 1; uint64_t reserved_15_63 : 49; #endif } s; struct cvmx_ipd_ctl_status_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the data-length field in the header written wo the POW and the top of a MBUFF. OCTEAN generates a length that includes the length of the data + 8 for the header-field. By setting this bit the 8 for the instr-field will not be included in the length field of the header. NOTE: IPD is compliant with the spec when this field is '1'. */ uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except RSL. */ uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set, IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] WILL be incremented by one for every work queue entry that is sent to POW. */ uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set, IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] WILL NOT be incremented when IPD allocates a buffer for a packet on the port. */ uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */ uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */ uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables the sending of port level backpressure to the Octane input-ports. Once enabled the sending of port-level-backpressure can not be disabled by changing the value of this bit. GMXX_INF_MODE[EN] must be set to '1' for each packet interface which requires port back pressure prior to setting PBP_EN to '1'. */ cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers) is written through to memory. 1 ==> All packet data (and next buffer pointers) is written into the cache. 2 ==> The first aligned cache block holding the packet data (and initial next buffer pointer) is written to the L2 cache, all remaining cache blocks are not written to the L2 cache. 3 ==> The first two aligned cache blocks holding the packet data (and initial next buffer pointer) are written to the L2 cache, all remaining cache blocks are not written to the L2 cache. */ uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. */ #else uint64_t ipd_en : 1; cvmx_ipd_mode_t opc_mode : 2; uint64_t pbp_en : 1; uint64_t wqe_lend : 1; uint64_t pkt_lend : 1; uint64_t naddbuf : 1; uint64_t addpkt : 1; uint64_t reset : 1; uint64_t len_m8 : 1; uint64_t reserved_10_63 : 54; #endif } cn30xx; struct cvmx_ipd_ctl_status_cn30xx cn31xx; struct cvmx_ipd_ctl_status_cn30xx cn38xx; struct cvmx_ipd_ctl_status_cn38xxp2 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except RSL. */ uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set, IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] WILL be incremented by one for every work queue entry that is sent to POW. PASS-2 Field. */ uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set, IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] WILL NOT be incremented when IPD allocates a buffer for a packet on the port. PASS-2 Field. */ uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */ uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */ uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables the sending of port level backpressure to the Octane input-ports. Once enabled the sending of port-level-backpressure can not be disabled by changing the value of this bit. */ cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers) is written through to memory. 1 ==> All packet data (and next buffer pointers) is written into the cache. 2 ==> The first aligned cache block holding the packet data (and initial next buffer pointer) is written to the L2 cache, all remaining cache blocks are not written to the L2 cache. 3 ==> The first two aligned cache blocks holding the packet data (and initial next buffer pointer) are written to the L2 cache, all remaining cache blocks are not written to the L2 cache. */ uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. */ #else uint64_t ipd_en : 1; cvmx_ipd_mode_t opc_mode : 2; uint64_t pbp_en : 1; uint64_t wqe_lend : 1; uint64_t pkt_lend : 1; uint64_t naddbuf : 1; uint64_t addpkt : 1; uint64_t reset : 1; uint64_t reserved_9_63 : 55; #endif } cn38xxp2; struct cvmx_ipd_ctl_status_s cn50xx; struct cvmx_ipd_ctl_status_s cn52xx; struct cvmx_ipd_ctl_status_s cn52xxp1; struct cvmx_ipd_ctl_status_s cn56xx; struct cvmx_ipd_ctl_status_s cn56xxp1; struct cvmx_ipd_ctl_status_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t ipd_full : 1; /**< When clear '0' the IPD acts normaly. When set '1' the IPD drive the IPD_BUFF_FULL line to the IOB-arbiter, telling it to not give grants to NCB devices sending packet data. */ uint64_t pkt_off : 1; /**< When clear '0' the IPD working normaly, buffering the received packet data. When set '1' the IPD will not buffer the received packet data. */ uint64_t len_m8 : 1; /**< Setting of this bit will subtract 8 from the data-length field in the header written wo the POW and the top of a MBUFF. OCTEAN PASS2 generates a length that includes the length of the data + 8 for the header-field. By setting this bit the 8 for the instr-field will not be included in the length field of the header. NOTE: IPD is compliant with the spec when this field is '1'. */ uint64_t reset : 1; /**< When set '1' causes a reset of the IPD, except RSL. */ uint64_t addpkt : 1; /**< When IPD_CTL_STATUS[ADDPKT] is set, IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] WILL be incremented by one for every work queue entry that is sent to POW. PASS-2 Field. */ uint64_t naddbuf : 1; /**< When IPD_CTL_STATUS[NADDBUF] is set, IPD_PORT_BP_COUNTERS_PAIR(port)[CNT_VAL] WILL NOT be incremented when IPD allocates a buffer for a packet on the port. PASS-2 Field. */ uint64_t pkt_lend : 1; /**< Changes PKT to little endian writes to L2C */ uint64_t wqe_lend : 1; /**< Changes WQE to little endian writes to L2C */ uint64_t pbp_en : 1; /**< Port back pressure enable. When set '1' enables the sending of port level backpressure to the Octane input-ports. Once enabled the sending of port-level-backpressure can not be disabled by changing the value of this bit. */ cvmx_ipd_mode_t opc_mode : 2; /**< 0 ==> All packet data (and next buffer pointers) is written through to memory. 1 ==> All packet data (and next buffer pointers) is written into the cache. 2 ==> The first aligned cache block holding the packet data (and initial next buffer pointer) is written to the L2 cache, all remaining cache blocks are not written to the L2 cache. 3 ==> The first two aligned cache blocks holding the packet data (and initial next buffer pointer) are written to the L2 cache, all remaining cache blocks are not written to the L2 cache. */ uint64_t ipd_en : 1; /**< When set '1' enable the operation of the IPD. When clear '0', the IPD will appear to the IOB-arbiter to be applying backpressure, this causes the IOB-Arbiter to not send grants to NCB devices requesting to send packet data to the IPD. */ #else uint64_t ipd_en : 1; cvmx_ipd_mode_t opc_mode : 2; uint64_t pbp_en : 1; uint64_t wqe_lend : 1; uint64_t pkt_lend : 1; uint64_t naddbuf : 1; uint64_t addpkt : 1; uint64_t reset : 1; uint64_t len_m8 : 1; uint64_t pkt_off : 1; uint64_t ipd_full : 1; uint64_t reserved_12_63 : 52; #endif } cn58xx; struct cvmx_ipd_ctl_status_cn58xx cn58xxp1; } cvmx_ipd_ctl_status_t; /** * cvmx_ipd_int_enb * * IPD_INTERRUPT_ENB = IPD Interrupt Enable Register * * Used to enable the various interrupting conditions of IPD */ typedef union { uint64_t u64; struct cvmx_ipd_int_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t pq_sub : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. */ uint64_t pq_add : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. */ uint64_t bc_ovr : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. This is a PASS-3 Field. */ uint64_t d_coll : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. This is a PASS-3 Field. */ uint64_t c_coll : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. This is a PASS-3 Field. */ uint64_t cc_ovr : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. This is a PASS-3 Field. */ uint64_t dc_ovr : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. This is a PASS-3 Field. */ uint64_t bp_sub : 1; /**< Enables interrupts when a backpressure subtract has an illegal value. */ uint64_t prc_par3 : 1; /**< Enable parity error interrupts for bits [127:96] of the PBM memory. */ uint64_t prc_par2 : 1; /**< Enable parity error interrupts for bits [95:64] of the PBM memory. */ uint64_t prc_par1 : 1; /**< Enable parity error interrupts for bits [63:32] of the PBM memory. */ uint64_t prc_par0 : 1; /**< Enable parity error interrupts for bits [31:0] of the PBM memory. */ #else uint64_t prc_par0 : 1; uint64_t prc_par1 : 1; uint64_t prc_par2 : 1; uint64_t prc_par3 : 1; uint64_t bp_sub : 1; uint64_t dc_ovr : 1; uint64_t cc_ovr : 1; uint64_t c_coll : 1; uint64_t d_coll : 1; uint64_t bc_ovr : 1; uint64_t pq_add : 1; uint64_t pq_sub : 1; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_ipd_int_enb_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t bp_sub : 1; /**< Enables interrupts when a backpressure subtract has an illegal value. */ uint64_t prc_par3 : 1; /**< Enable parity error interrupts for bits [127:96] of the PBM memory. */ uint64_t prc_par2 : 1; /**< Enable parity error interrupts for bits [95:64] of the PBM memory. */ uint64_t prc_par1 : 1; /**< Enable parity error interrupts for bits [63:32] of the PBM memory. */ uint64_t prc_par0 : 1; /**< Enable parity error interrupts for bits [31:0] of the PBM memory. */ #else uint64_t prc_par0 : 1; uint64_t prc_par1 : 1; uint64_t prc_par2 : 1; uint64_t prc_par3 : 1; uint64_t bp_sub : 1; uint64_t reserved_5_63 : 59; #endif } cn30xx; struct cvmx_ipd_int_enb_cn30xx cn31xx; struct cvmx_ipd_int_enb_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t bc_ovr : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. This is a PASS-3 Field. */ uint64_t d_coll : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. This is a PASS-3 Field. */ uint64_t c_coll : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. This is a PASS-3 Field. */ uint64_t cc_ovr : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. This is a PASS-3 Field. */ uint64_t dc_ovr : 1; /**< Allows an interrupt to be sent when the corresponding bit in the IPD_INT_SUM is set. This is a PASS-3 Field. */ uint64_t bp_sub : 1; /**< Enables interrupts when a backpressure subtract has an illegal value. */ uint64_t prc_par3 : 1; /**< Enable parity error interrupts for bits [127:96] of the PBM memory. */ uint64_t prc_par2 : 1; /**< Enable parity error interrupts for bits [95:64] of the PBM memory. */ uint64_t prc_par1 : 1; /**< Enable parity error interrupts for bits [63:32] of the PBM memory. */ uint64_t prc_par0 : 1; /**< Enable parity error interrupts for bits [31:0] of the PBM memory. */ #else uint64_t prc_par0 : 1; uint64_t prc_par1 : 1; uint64_t prc_par2 : 1; uint64_t prc_par3 : 1; uint64_t bp_sub : 1; uint64_t dc_ovr : 1; uint64_t cc_ovr : 1; uint64_t c_coll : 1; uint64_t d_coll : 1; uint64_t bc_ovr : 1; uint64_t reserved_10_63 : 54; #endif } cn38xx; struct cvmx_ipd_int_enb_cn30xx cn38xxp2; struct cvmx_ipd_int_enb_cn38xx cn50xx; struct cvmx_ipd_int_enb_s cn52xx; struct cvmx_ipd_int_enb_s cn52xxp1; struct cvmx_ipd_int_enb_s cn56xx; struct cvmx_ipd_int_enb_s cn56xxp1; struct cvmx_ipd_int_enb_cn38xx cn58xx; struct cvmx_ipd_int_enb_cn38xx cn58xxp1; } cvmx_ipd_int_enb_t; /** * cvmx_ipd_int_sum * * IPD_INTERRUPT_SUM = IPD Interrupt Summary Register * * Set when an interrupt condition occurs, write '1' to clear. */ typedef union { uint64_t u64; struct cvmx_ipd_int_sum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t pq_sub : 1; /**< Set when a port-qos does an sub to the count that causes the counter to wrap. */ uint64_t pq_add : 1; /**< Set when a port-qos does an add to the count that causes the counter to wrap. */ uint64_t bc_ovr : 1; /**< Set when the byte-count to send to IOB overflows. This is a PASS-3 Field. */ uint64_t d_coll : 1; /**< Set when the packet/WQE data to be sent to IOB collides. This is a PASS-3 Field. */ uint64_t c_coll : 1; /**< Set when the packet/WQE commands to be sent to IOB collides. This is a PASS-3 Field. */ uint64_t cc_ovr : 1; /**< Set when the command credits to the IOB overflow. This is a PASS-3 Field. */ uint64_t dc_ovr : 1; /**< Set when the data credits to the IOB overflow. This is a PASS-3 Field. */ uint64_t bp_sub : 1; /**< Set when a backpressure subtract is done with a supplied illegal value. */ uint64_t prc_par3 : 1; /**< Set when a parity error is dected for bits [127:96] of the PBM memory. */ uint64_t prc_par2 : 1; /**< Set when a parity error is dected for bits [95:64] of the PBM memory. */ uint64_t prc_par1 : 1; /**< Set when a parity error is dected for bits [63:32] of the PBM memory. */ uint64_t prc_par0 : 1; /**< Set when a parity error is dected for bits [31:0] of the PBM memory. */ #else uint64_t prc_par0 : 1; uint64_t prc_par1 : 1; uint64_t prc_par2 : 1; uint64_t prc_par3 : 1; uint64_t bp_sub : 1; uint64_t dc_ovr : 1; uint64_t cc_ovr : 1; uint64_t c_coll : 1; uint64_t d_coll : 1; uint64_t bc_ovr : 1; uint64_t pq_add : 1; uint64_t pq_sub : 1; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_ipd_int_sum_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t bp_sub : 1; /**< Set when a backpressure subtract is done with a supplied illegal value. */ uint64_t prc_par3 : 1; /**< Set when a parity error is dected for bits [127:96] of the PBM memory. */ uint64_t prc_par2 : 1; /**< Set when a parity error is dected for bits [95:64] of the PBM memory. */ uint64_t prc_par1 : 1; /**< Set when a parity error is dected for bits [63:32] of the PBM memory. */ uint64_t prc_par0 : 1; /**< Set when a parity error is dected for bits [31:0] of the PBM memory. */ #else uint64_t prc_par0 : 1; uint64_t prc_par1 : 1; uint64_t prc_par2 : 1; uint64_t prc_par3 : 1; uint64_t bp_sub : 1; uint64_t reserved_5_63 : 59; #endif } cn30xx; struct cvmx_ipd_int_sum_cn30xx cn31xx; struct cvmx_ipd_int_sum_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t bc_ovr : 1; /**< Set when the byte-count to send to IOB overflows. This is a PASS-3 Field. */ uint64_t d_coll : 1; /**< Set when the packet/WQE data to be sent to IOB collides. This is a PASS-3 Field. */ uint64_t c_coll : 1; /**< Set when the packet/WQE commands to be sent to IOB collides. This is a PASS-3 Field. */ uint64_t cc_ovr : 1; /**< Set when the command credits to the IOB overflow. This is a PASS-3 Field. */ uint64_t dc_ovr : 1; /**< Set when the data credits to the IOB overflow. This is a PASS-3 Field. */ uint64_t bp_sub : 1; /**< Set when a backpressure subtract is done with a supplied illegal value. */ uint64_t prc_par3 : 1; /**< Set when a parity error is dected for bits [127:96] of the PBM memory. */ uint64_t prc_par2 : 1; /**< Set when a parity error is dected for bits [95:64] of the PBM memory. */ uint64_t prc_par1 : 1; /**< Set when a parity error is dected for bits [63:32] of the PBM memory. */ uint64_t prc_par0 : 1; /**< Set when a parity error is dected for bits [31:0] of the PBM memory. */ #else uint64_t prc_par0 : 1; uint64_t prc_par1 : 1; uint64_t prc_par2 : 1; uint64_t prc_par3 : 1; uint64_t bp_sub : 1; uint64_t dc_ovr : 1; uint64_t cc_ovr : 1; uint64_t c_coll : 1; uint64_t d_coll : 1; uint64_t bc_ovr : 1; uint64_t reserved_10_63 : 54; #endif } cn38xx; struct cvmx_ipd_int_sum_cn30xx cn38xxp2; struct cvmx_ipd_int_sum_cn38xx cn50xx; struct cvmx_ipd_int_sum_s cn52xx; struct cvmx_ipd_int_sum_s cn52xxp1; struct cvmx_ipd_int_sum_s cn56xx; struct cvmx_ipd_int_sum_s cn56xxp1; struct cvmx_ipd_int_sum_cn38xx cn58xx; struct cvmx_ipd_int_sum_cn38xx cn58xxp1; } cvmx_ipd_int_sum_t; /** * cvmx_ipd_not_1st_mbuff_skip * * IPD_NOT_1ST_MBUFF_SKIP = IPD Not First MBUFF Word Skip Size * * The number of words that the IPD will skip when writing any MBUFF that is not the first. */ typedef union { uint64_t u64; struct cvmx_ipd_not_1st_mbuff_skip_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t skip_sz : 6; /**< The number of 8-byte words from the top of any MBUFF, that is not the 1st MBUFF, that the IPD will write the next-pointer. Legal values are 0 to 32, where the MAX value is also limited to: IPD_PACKET_MBUFF_SIZE[MB_SIZE] - 16. */ #else uint64_t skip_sz : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_ipd_not_1st_mbuff_skip_s cn30xx; struct cvmx_ipd_not_1st_mbuff_skip_s cn31xx; struct cvmx_ipd_not_1st_mbuff_skip_s cn38xx; struct cvmx_ipd_not_1st_mbuff_skip_s cn38xxp2; struct cvmx_ipd_not_1st_mbuff_skip_s cn50xx; struct cvmx_ipd_not_1st_mbuff_skip_s cn52xx; struct cvmx_ipd_not_1st_mbuff_skip_s cn52xxp1; struct cvmx_ipd_not_1st_mbuff_skip_s cn56xx; struct cvmx_ipd_not_1st_mbuff_skip_s cn56xxp1; struct cvmx_ipd_not_1st_mbuff_skip_s cn58xx; struct cvmx_ipd_not_1st_mbuff_skip_s cn58xxp1; } cvmx_ipd_not_1st_mbuff_skip_t; /** * cvmx_ipd_packet_mbuff_size * * IPD_PACKET_MBUFF_SIZE = IPD's PACKET MUBUF Size In Words * * The number of words in a MBUFF used for packet data store. */ typedef union { uint64_t u64; struct cvmx_ipd_packet_mbuff_size_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t mb_size : 12; /**< The number of 8-byte words in a MBUF. This must be a number in the range of 32 to 2048. This is also the size of the FPA's Queue-0 Free-Page. */ #else uint64_t mb_size : 12; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_ipd_packet_mbuff_size_s cn30xx; struct cvmx_ipd_packet_mbuff_size_s cn31xx; struct cvmx_ipd_packet_mbuff_size_s cn38xx; struct cvmx_ipd_packet_mbuff_size_s cn38xxp2; struct cvmx_ipd_packet_mbuff_size_s cn50xx; struct cvmx_ipd_packet_mbuff_size_s cn52xx; struct cvmx_ipd_packet_mbuff_size_s cn52xxp1; struct cvmx_ipd_packet_mbuff_size_s cn56xx; struct cvmx_ipd_packet_mbuff_size_s cn56xxp1; struct cvmx_ipd_packet_mbuff_size_s cn58xx; struct cvmx_ipd_packet_mbuff_size_s cn58xxp1; } cvmx_ipd_packet_mbuff_size_t; /** * cvmx_ipd_pkt_ptr_valid * * IPD_PKT_PTR_VALID = IPD's Packet Pointer Valid * * The value of the packet-pointer fetched and in the valid register. */ typedef union { uint64_t u64; struct cvmx_ipd_pkt_ptr_valid_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t ptr : 29; /**< Pointer value. */ #else uint64_t ptr : 29; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_ipd_pkt_ptr_valid_s cn30xx; struct cvmx_ipd_pkt_ptr_valid_s cn31xx; struct cvmx_ipd_pkt_ptr_valid_s cn38xx; struct cvmx_ipd_pkt_ptr_valid_s cn50xx; struct cvmx_ipd_pkt_ptr_valid_s cn52xx; struct cvmx_ipd_pkt_ptr_valid_s cn52xxp1; struct cvmx_ipd_pkt_ptr_valid_s cn56xx; struct cvmx_ipd_pkt_ptr_valid_s cn56xxp1; struct cvmx_ipd_pkt_ptr_valid_s cn58xx; struct cvmx_ipd_pkt_ptr_valid_s cn58xxp1; } cvmx_ipd_pkt_ptr_valid_t; /** * cvmx_ipd_port#_bp_page_cnt * * IPD_PORTX_BP_PAGE_CNT = IPD Port Backpressure Page Count * * The number of pages in use by the port that when exceeded, backpressure will be applied to the port. */ typedef union { uint64_t u64; struct cvmx_ipd_portx_bp_page_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t bp_enb : 1; /**< When set '1' BP will be applied, if '0' BP will not be applied to port. */ uint64_t page_cnt : 17; /**< The number of page pointers assigned to the port, that when exceeded will cause back-pressure to be applied to the port. This value is in 256 page-pointer increments, (i.e. 0 = 0-page-ptrs, 1 = 256-page-ptrs,..) */ #else uint64_t page_cnt : 17; uint64_t bp_enb : 1; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_ipd_portx_bp_page_cnt_s cn30xx; struct cvmx_ipd_portx_bp_page_cnt_s cn31xx; struct cvmx_ipd_portx_bp_page_cnt_s cn38xx; struct cvmx_ipd_portx_bp_page_cnt_s cn38xxp2; struct cvmx_ipd_portx_bp_page_cnt_s cn50xx; struct cvmx_ipd_portx_bp_page_cnt_s cn52xx; struct cvmx_ipd_portx_bp_page_cnt_s cn52xxp1; struct cvmx_ipd_portx_bp_page_cnt_s cn56xx; struct cvmx_ipd_portx_bp_page_cnt_s cn56xxp1; struct cvmx_ipd_portx_bp_page_cnt_s cn58xx; struct cvmx_ipd_portx_bp_page_cnt_s cn58xxp1; } cvmx_ipd_portx_bp_page_cnt_t; /** * cvmx_ipd_port#_bp_page_cnt2 * * IPD_PORTX_BP_PAGE_CNT2 = IPD Port Backpressure Page Count * * The number of pages in use by the port that when exceeded, backpressure will be applied to the port. */ typedef union { uint64_t u64; struct cvmx_ipd_portx_bp_page_cnt2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t bp_enb : 1; /**< When set '1' BP will be applied, if '0' BP will not be applied to port. */ uint64_t page_cnt : 17; /**< The number of page pointers assigned to the port, that when exceeded will cause back-pressure to be applied to the port. This value is in 256 page-pointer increments, (i.e. 0 = 0-page-ptrs, 1 = 256-page-ptrs,..) */ #else uint64_t page_cnt : 17; uint64_t bp_enb : 1; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_ipd_portx_bp_page_cnt2_s cn52xx; struct cvmx_ipd_portx_bp_page_cnt2_s cn52xxp1; struct cvmx_ipd_portx_bp_page_cnt2_s cn56xx; struct cvmx_ipd_portx_bp_page_cnt2_s cn56xxp1; } cvmx_ipd_portx_bp_page_cnt2_t; /** * cvmx_ipd_port_bp_counters2_pair# * * IPD_PORT_BP_COUNTERS2_PAIRX = MBUF Counters port Ports used to generate Back Pressure Per Port. * */ typedef union { uint64_t u64; struct cvmx_ipd_port_bp_counters2_pairx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_25_63 : 39; uint64_t cnt_val : 25; /**< Number of MBUFs being used by data on this port. */ #else uint64_t cnt_val : 25; uint64_t reserved_25_63 : 39; #endif } s; struct cvmx_ipd_port_bp_counters2_pairx_s cn52xx; struct cvmx_ipd_port_bp_counters2_pairx_s cn52xxp1; struct cvmx_ipd_port_bp_counters2_pairx_s cn56xx; struct cvmx_ipd_port_bp_counters2_pairx_s cn56xxp1; } cvmx_ipd_port_bp_counters2_pairx_t; /** * cvmx_ipd_port_bp_counters_pair# * * IPD_PORT_BP_COUNTERS_PAIRX = MBUF Counters port Ports used to generate Back Pressure Per Port. * */ typedef union { uint64_t u64; struct cvmx_ipd_port_bp_counters_pairx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_25_63 : 39; uint64_t cnt_val : 25; /**< Number of MBUFs being used by data on this port. */ #else uint64_t cnt_val : 25; uint64_t reserved_25_63 : 39; #endif } s; struct cvmx_ipd_port_bp_counters_pairx_s cn30xx; struct cvmx_ipd_port_bp_counters_pairx_s cn31xx; struct cvmx_ipd_port_bp_counters_pairx_s cn38xx; struct cvmx_ipd_port_bp_counters_pairx_s cn38xxp2; struct cvmx_ipd_port_bp_counters_pairx_s cn50xx; struct cvmx_ipd_port_bp_counters_pairx_s cn52xx; struct cvmx_ipd_port_bp_counters_pairx_s cn52xxp1; struct cvmx_ipd_port_bp_counters_pairx_s cn56xx; struct cvmx_ipd_port_bp_counters_pairx_s cn56xxp1; struct cvmx_ipd_port_bp_counters_pairx_s cn58xx; struct cvmx_ipd_port_bp_counters_pairx_s cn58xxp1; } cvmx_ipd_port_bp_counters_pairx_t; /** * cvmx_ipd_port_qos_#_cnt * * IPD_PORT_QOS_X_CNT = IPD PortX QOS-0 Count * * A counter per port/qos. Counter are originzed in sequence where the first 8 counter (0-7) belong to Port-0 * QOS 0-7 respectively followed by port 1 at (8-15), etc * Ports 0-3, 36-39 */ typedef union { uint64_t u64; struct cvmx_ipd_port_qos_x_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t wmark : 32; /**< When the field CNT after being modified is equal to or crosses this value (i.e. value was greater than then becomes less then, or value was less than and becomes greater than) the cooresponding bit in IPD_PORT_QOS_INTX is set. */ uint64_t cnt : 32; /**< The packet related count that is incremented as specified by IPD_SUB_PORT_QOS_CNT. */ #else uint64_t cnt : 32; uint64_t wmark : 32; #endif } s; struct cvmx_ipd_port_qos_x_cnt_s cn52xx; struct cvmx_ipd_port_qos_x_cnt_s cn52xxp1; struct cvmx_ipd_port_qos_x_cnt_s cn56xx; struct cvmx_ipd_port_qos_x_cnt_s cn56xxp1; } cvmx_ipd_port_qos_x_cnt_t; /** * cvmx_ipd_port_qos_int# * * IPD_PORT_QOS_INTX = IPD PORT-QOS Interrupt * * See the description for IPD_PORT_QOS_X_CNT * * 0=P0-7; 1=P8-15; 2=P16-23; 3=P24-31; 4=P32-39; 5=P40-47; 6=P48-55; 7=P56-63 * Only ports used are: P0-3, and P32-39. Therefore only IPD_PORT_QOS_INT0[31:0] and IPD_PORT_QOS_INT4[63:0] exist. * Unused registers and register fields are reserved. */ typedef union { uint64_t u64; struct cvmx_ipd_port_qos_intx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t intr : 64; /**< Interrupt bits. */ #else uint64_t intr : 64; #endif } s; struct cvmx_ipd_port_qos_intx_s cn52xx; struct cvmx_ipd_port_qos_intx_s cn52xxp1; struct cvmx_ipd_port_qos_intx_s cn56xx; struct cvmx_ipd_port_qos_intx_s cn56xxp1; } cvmx_ipd_port_qos_intx_t; /** * cvmx_ipd_port_qos_int_enb# * * IPD_PORT_QOS_INT_ENBX = IPD PORT-QOS Interrupt Enable * * When the IPD_PORT_QOS_INTX[\#] is '1' and IPD_PORT_QOS_INT_ENBX[\#] is '1' a interrupt will be generated. */ typedef union { uint64_t u64; struct cvmx_ipd_port_qos_int_enbx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t enb : 64; /**< Enable bits. */ #else uint64_t enb : 64; #endif } s; struct cvmx_ipd_port_qos_int_enbx_s cn52xx; struct cvmx_ipd_port_qos_int_enbx_s cn52xxp1; struct cvmx_ipd_port_qos_int_enbx_s cn56xx; struct cvmx_ipd_port_qos_int_enbx_s cn56xxp1; } cvmx_ipd_port_qos_int_enbx_t; /** * cvmx_ipd_prc_hold_ptr_fifo_ctl * * IPD_PRC_HOLD_PTR_FIFO_CTL = IPD's PRC Holding Pointer FIFO Control * * Allows reading of the Page-Pointers stored in the IPD's PRC Holding Fifo. */ typedef union { uint64_t u64; struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_39_63 : 25; uint64_t max_pkt : 3; /**< Maximum number of Packet-Pointers that COULD be in the FIFO. */ uint64_t praddr : 3; /**< Present Packet-Pointer read address. */ uint64_t ptr : 29; /**< The output of the prc-holding-fifo. */ uint64_t cena : 1; /**< Active low Chip Enable that controls the MUX-select that steers [RADDR] to the fifo. *WARNING - Setting this field to '0' will allow reading of the memories thorugh the PTR field, but will cause unpredictable operation of the IPD under normal operation. */ uint64_t raddr : 3; /**< Sets the address to read from in the holding. fifo in the PRC. This FIFO holds Packet-Pointers to be used for packet data storage. */ #else uint64_t raddr : 3; uint64_t cena : 1; uint64_t ptr : 29; uint64_t praddr : 3; uint64_t max_pkt : 3; uint64_t reserved_39_63 : 25; #endif } s; struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn30xx; struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn31xx; struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn38xx; struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn50xx; struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xx; struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xxp1; struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xx; struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xxp1; struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xx; struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xxp1; } cvmx_ipd_prc_hold_ptr_fifo_ctl_t; /** * cvmx_ipd_prc_port_ptr_fifo_ctl * * IPD_PRC_PORT_PTR_FIFO_CTL = IPD's PRC PORT Pointer FIFO Control * * Allows reading of the Page-Pointers stored in the IPD's PRC PORT Fifo. */ typedef union { uint64_t u64; struct cvmx_ipd_prc_port_ptr_fifo_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_44_63 : 20; uint64_t max_pkt : 7; /**< Maximum number of Packet-Pointers that are in in the FIFO. */ uint64_t ptr : 29; /**< The output of the prc-port-ptr-fifo. */ uint64_t cena : 1; /**< Active low Chip Enable to the read port of the pwp_fifo. This bit also controls the MUX-select that steers [RADDR] to the pwp_fifo. *WARNING - Setting this field to '0' will allow reading of the memories thorugh the PTR field, but will cause unpredictable operation of the IPD under normal operation. */ uint64_t raddr : 7; /**< Sets the address to read from in the port fifo in the PRC. This FIFO holds Packet-Pointers to be used for packet data storage. */ #else uint64_t raddr : 7; uint64_t cena : 1; uint64_t ptr : 29; uint64_t max_pkt : 7; uint64_t reserved_44_63 : 20; #endif } s; struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn30xx; struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn31xx; struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn38xx; struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn50xx; struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xx; struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xxp1; struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xx; struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xxp1; struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xx; struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xxp1; } cvmx_ipd_prc_port_ptr_fifo_ctl_t; /** * cvmx_ipd_ptr_count * * IPD_PTR_COUNT = IPD Page Pointer Count * * Shows the number of WQE and Packet Page Pointers stored in the IPD. */ typedef union { uint64_t u64; struct cvmx_ipd_ptr_count_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t pktv_cnt : 1; /**< PKT Ptr Valid. PASS2 Field */ uint64_t wqev_cnt : 1; /**< WQE Ptr Valid. This value is '1' when a WQE is being for use by the IPD. The value of this field shoould be added to tha value of the WQE_PCNT field, of this register, for a total count of the WQE Page Pointers being held by IPD. PASS2 Field. */ uint64_t pfif_cnt : 3; /**< See PKT_PCNT. */ uint64_t pkt_pcnt : 7; /**< This value plus PFIF_CNT plus 36 is the number of PKT Page Pointers in IPD. */ uint64_t wqe_pcnt : 7; /**< Number of page pointers for WQE storage that are buffered in the IPD. The total count is the value of this buffer plus the field [WQEV_CNT]. For PASS-1 (which does not have the WQEV_CNT field) when the value of this register is '0' there still may be 1 pointer being help by IPD. */ #else uint64_t wqe_pcnt : 7; uint64_t pkt_pcnt : 7; uint64_t pfif_cnt : 3; uint64_t wqev_cnt : 1; uint64_t pktv_cnt : 1; uint64_t reserved_19_63 : 45; #endif } s; struct cvmx_ipd_ptr_count_s cn30xx; struct cvmx_ipd_ptr_count_s cn31xx; struct cvmx_ipd_ptr_count_s cn38xx; struct cvmx_ipd_ptr_count_s cn38xxp2; struct cvmx_ipd_ptr_count_s cn50xx; struct cvmx_ipd_ptr_count_s cn52xx; struct cvmx_ipd_ptr_count_s cn52xxp1; struct cvmx_ipd_ptr_count_s cn56xx; struct cvmx_ipd_ptr_count_s cn56xxp1; struct cvmx_ipd_ptr_count_s cn58xx; struct cvmx_ipd_ptr_count_s cn58xxp1; } cvmx_ipd_ptr_count_t; /** * cvmx_ipd_pwp_ptr_fifo_ctl * * IPD_PWP_PTR_FIFO_CTL = IPD's PWP Pointer FIFO Control * * Allows reading of the Page-Pointers stored in the IPD's PWP Fifo. */ typedef union { uint64_t u64; struct cvmx_ipd_pwp_ptr_fifo_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_61_63 : 3; uint64_t max_cnts : 7; /**< Maximum number of Packet-Pointers or WQE-Pointers that COULD be in the FIFO. */ uint64_t wraddr : 8; /**< Present FIFO WQE Read address. */ uint64_t praddr : 8; /**< Present FIFO Packet Read address. */ uint64_t ptr : 29; /**< The output of the pwp_fifo. */ uint64_t cena : 1; /**< Active low Chip Enable to the read port of the pwp_fifo. This bit also controls the MUX-select that steers [RADDR] to the pwp_fifo. *WARNING - Setting this field to '0' will allow reading of the memories thorugh the PTR field, but will cause unpredictable operation of the IPD under normal operation. */ uint64_t raddr : 8; /**< Sets the address to read from in the pwp_fifo. Addresses 0 through 7 contain Packet-Pointers and addresses 8 through 15 contain WQE-Pointers. */ #else uint64_t raddr : 8; uint64_t cena : 1; uint64_t ptr : 29; uint64_t praddr : 8; uint64_t wraddr : 8; uint64_t max_cnts : 7; uint64_t reserved_61_63 : 3; #endif } s; struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn30xx; struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn31xx; struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn38xx; struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn50xx; struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xx; struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xxp1; struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xx; struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xxp1; struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xx; struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xxp1; } cvmx_ipd_pwp_ptr_fifo_ctl_t; /** * cvmx_ipd_qos#_red_marks * * IPD_QOS0_RED_MARKS = IPD QOS 0 Marks Red High Low * * Set the pass-drop marks for qos level. */ typedef union { uint64_t u64; struct cvmx_ipd_qosx_red_marks_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t drop : 32; /**< Packets will be dropped when the average value of IPD_QUE0_FREE_PAGE_CNT is equal to or less than this value. */ uint64_t pass : 32; /**< Packets will be passed when the average value of IPD_QUE0_FREE_PAGE_CNT is larger than this value. */ #else uint64_t pass : 32; uint64_t drop : 32; #endif } s; struct cvmx_ipd_qosx_red_marks_s cn30xx; struct cvmx_ipd_qosx_red_marks_s cn31xx; struct cvmx_ipd_qosx_red_marks_s cn38xx; struct cvmx_ipd_qosx_red_marks_s cn38xxp2; struct cvmx_ipd_qosx_red_marks_s cn50xx; struct cvmx_ipd_qosx_red_marks_s cn52xx; struct cvmx_ipd_qosx_red_marks_s cn52xxp1; struct cvmx_ipd_qosx_red_marks_s cn56xx; struct cvmx_ipd_qosx_red_marks_s cn56xxp1; struct cvmx_ipd_qosx_red_marks_s cn58xx; struct cvmx_ipd_qosx_red_marks_s cn58xxp1; } cvmx_ipd_qosx_red_marks_t; /** * cvmx_ipd_que0_free_page_cnt * * IPD_QUE0_FREE_PAGE_CNT = IPD Queue0 Free Page Count * * Number of Free-Page Pointer that are available for use in the FPA for Queue-0. */ typedef union { uint64_t u64; struct cvmx_ipd_que0_free_page_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t q0_pcnt : 32; /**< Number of Queue-0 Page Pointers Available. */ #else uint64_t q0_pcnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_ipd_que0_free_page_cnt_s cn30xx; struct cvmx_ipd_que0_free_page_cnt_s cn31xx; struct cvmx_ipd_que0_free_page_cnt_s cn38xx; struct cvmx_ipd_que0_free_page_cnt_s cn38xxp2; struct cvmx_ipd_que0_free_page_cnt_s cn50xx; struct cvmx_ipd_que0_free_page_cnt_s cn52xx; struct cvmx_ipd_que0_free_page_cnt_s cn52xxp1; struct cvmx_ipd_que0_free_page_cnt_s cn56xx; struct cvmx_ipd_que0_free_page_cnt_s cn56xxp1; struct cvmx_ipd_que0_free_page_cnt_s cn58xx; struct cvmx_ipd_que0_free_page_cnt_s cn58xxp1; } cvmx_ipd_que0_free_page_cnt_t; /** * cvmx_ipd_red_port_enable * * IPD_RED_PORT_ENABLE = IPD RED Port Enable * * Set the pass-drop marks for qos level. */ typedef union { uint64_t u64; struct cvmx_ipd_red_port_enable_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t prb_dly : 14; /**< Number (core clocks periods + 68) * 8 to wait before caluclating the new packet drop probability for each QOS level. */ uint64_t avg_dly : 14; /**< Number (core clocks periods + 10) * 8 to wait before caluclating the moving average for wach QOS level. Larger AVG_DLY values cause the moving averages of ALL QOS levels to track changes in the actual free space more slowly. Smaller NEW_CON (and larger AVG_CON) values can have a similar effect, but only affect an individual QOS level, rather than all. */ uint64_t prt_enb : 36; /**< The bit position will enable the corresponding Ports ability to have packets dropped by RED probability. */ #else uint64_t prt_enb : 36; uint64_t avg_dly : 14; uint64_t prb_dly : 14; #endif } s; struct cvmx_ipd_red_port_enable_s cn30xx; struct cvmx_ipd_red_port_enable_s cn31xx; struct cvmx_ipd_red_port_enable_s cn38xx; struct cvmx_ipd_red_port_enable_s cn38xxp2; struct cvmx_ipd_red_port_enable_s cn50xx; struct cvmx_ipd_red_port_enable_s cn52xx; struct cvmx_ipd_red_port_enable_s cn52xxp1; struct cvmx_ipd_red_port_enable_s cn56xx; struct cvmx_ipd_red_port_enable_s cn56xxp1; struct cvmx_ipd_red_port_enable_s cn58xx; struct cvmx_ipd_red_port_enable_s cn58xxp1; } cvmx_ipd_red_port_enable_t; /** * cvmx_ipd_red_port_enable2 * * IPD_RED_PORT_ENABLE2 = IPD RED Port Enable2 * * Set the pass-drop marks for qos level. */ typedef union { uint64_t u64; struct cvmx_ipd_red_port_enable2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t prt_enb : 4; /**< Bits 3-0 cooresponds to ports 39-36. These bits have the same meaning as the PRT_ENB field of IPD_RED_PORT_ENABLE. */ #else uint64_t prt_enb : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_ipd_red_port_enable2_s cn52xx; struct cvmx_ipd_red_port_enable2_s cn52xxp1; struct cvmx_ipd_red_port_enable2_s cn56xx; struct cvmx_ipd_red_port_enable2_s cn56xxp1; } cvmx_ipd_red_port_enable2_t; /** * cvmx_ipd_red_que#_param * * IPD_RED_QUE0_PARAM = IPD RED Queue-0 Parameters * * Value control the Passing and Dropping of packets by the red engine for QOS Level-0. */ typedef union { uint64_t u64; struct cvmx_ipd_red_quex_param_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t use_pcnt : 1; /**< When set '1' red will use the actual Packet-Page Count in place of the Average for RED calculations. */ uint64_t new_con : 8; /**< This value is used control how much of the present Actual Queue Size is used to calculate the new Average Queue Size. The value is a number from 0 256, which represents NEW_CON/256 of the Actual Queue Size that will be used in the calculation. The number in this field plus the value of AVG_CON must be equal to 256. Larger AVG_DLY values cause the moving averages of ALL QOS levels to track changes in the actual free space more slowly. Smaller NEW_CON (and larger AVG_CON) values can have a similar effect, but only affect an individual QOS level, rather than all. */ uint64_t avg_con : 8; /**< This value is used control how much of the present Average Queue Size is used to calculate the new Average Queue Size. The value is a number from 0 256, which represents AVG_CON/256 of the Average Queue Size that will be used in the calculation. The number in this field plus the value of NEW_CON must be equal to 256. Larger AVG_DLY values cause the moving averages of ALL QOS levels to track changes in the actual free space more slowly. Smaller NEW_CON (and larger AVG_CON) values can have a similar effect, but only affect an individual QOS level, rather than all. */ uint64_t prb_con : 32; /**< Used in computing the probability of a packet being passed or drop by the WRED engine. The field is calculated to be (255 * 2^24)/(PASS-DROP). Where PASS and DROP are the field from the IPD_QOS0_RED_MARKS CSR. */ #else uint64_t prb_con : 32; uint64_t avg_con : 8; uint64_t new_con : 8; uint64_t use_pcnt : 1; uint64_t reserved_49_63 : 15; #endif } s; struct cvmx_ipd_red_quex_param_s cn30xx; struct cvmx_ipd_red_quex_param_s cn31xx; struct cvmx_ipd_red_quex_param_s cn38xx; struct cvmx_ipd_red_quex_param_s cn38xxp2; struct cvmx_ipd_red_quex_param_s cn50xx; struct cvmx_ipd_red_quex_param_s cn52xx; struct cvmx_ipd_red_quex_param_s cn52xxp1; struct cvmx_ipd_red_quex_param_s cn56xx; struct cvmx_ipd_red_quex_param_s cn56xxp1; struct cvmx_ipd_red_quex_param_s cn58xx; struct cvmx_ipd_red_quex_param_s cn58xxp1; } cvmx_ipd_red_quex_param_t; /** * cvmx_ipd_sub_port_bp_page_cnt * * IPD_SUB_PORT_BP_PAGE_CNT = IPD Subtract Port Backpressure Page Count * * Will add the value to the indicated port count register, the number of pages supplied. The value added should * be the 2's complement of the vallue that needs to be subtracted. Users would add 2's compliment values to the * port-mbuf-count register to return (lower the count) mbufs to the counter in order to avoid port-level * backpressure being applied to the port. Backpressure is applied when the MBUF used count of a port exceeds the * value in the IPD_PORTX_BP_PAGE_CNT. * * This register can't be written from the PCI via a window write. */ typedef union { uint64_t u64; struct cvmx_ipd_sub_port_bp_page_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t port : 6; /**< The port to add the PAGE_CNT field to. */ uint64_t page_cnt : 25; /**< The number of page pointers to add to the port counter pointed to by the PORT Field. */ #else uint64_t page_cnt : 25; uint64_t port : 6; uint64_t reserved_31_63 : 33; #endif } s; struct cvmx_ipd_sub_port_bp_page_cnt_s cn30xx; struct cvmx_ipd_sub_port_bp_page_cnt_s cn31xx; struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xx; struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xxp2; struct cvmx_ipd_sub_port_bp_page_cnt_s cn50xx; struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xx; struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xxp1; struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xx; struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xxp1; struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xx; struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xxp1; } cvmx_ipd_sub_port_bp_page_cnt_t; /** * cvmx_ipd_sub_port_fcs * * IPD_SUB_PORT_FCS = IPD Subtract Ports FCS Register * * When set '1' the port corresponding to the but set will subtract 4 bytes from the end of * the packet. */ typedef union { uint64_t u64; struct cvmx_ipd_sub_port_fcs_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t port_bit2 : 4; /**< When set '1', the port corresponding to the bit position set, will subtract the FCS for packets on that port. */ uint64_t reserved_32_35 : 4; uint64_t port_bit : 32; /**< When set '1', the port corresponding to the bit position set, will subtract the FCS for packets on that port. */ #else uint64_t port_bit : 32; uint64_t reserved_32_35 : 4; uint64_t port_bit2 : 4; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_ipd_sub_port_fcs_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t port_bit : 3; /**< When set '1', the port corresponding to the bit position set, will subtract the FCS for packets on that port. */ #else uint64_t port_bit : 3; uint64_t reserved_3_63 : 61; #endif } cn30xx; struct cvmx_ipd_sub_port_fcs_cn30xx cn31xx; struct cvmx_ipd_sub_port_fcs_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t port_bit : 32; /**< When set '1', the port corresponding to the bit position set, will subtract the FCS for packets on that port. */ #else uint64_t port_bit : 32; uint64_t reserved_32_63 : 32; #endif } cn38xx; struct cvmx_ipd_sub_port_fcs_cn38xx cn38xxp2; struct cvmx_ipd_sub_port_fcs_cn30xx cn50xx; struct cvmx_ipd_sub_port_fcs_s cn52xx; struct cvmx_ipd_sub_port_fcs_s cn52xxp1; struct cvmx_ipd_sub_port_fcs_s cn56xx; struct cvmx_ipd_sub_port_fcs_s cn56xxp1; struct cvmx_ipd_sub_port_fcs_cn38xx cn58xx; struct cvmx_ipd_sub_port_fcs_cn38xx cn58xxp1; } cvmx_ipd_sub_port_fcs_t; /** * cvmx_ipd_sub_port_qos_cnt * * IPD_SUB_PORT_QOS_CNT = IPD Subtract Port QOS Count * * Will add the value (CNT) to the indicated Port-QOS register (PORT_QOS). The value added must be * be the 2's complement of the value that needs to be subtracted. */ typedef union { uint64_t u64; struct cvmx_ipd_sub_port_qos_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_41_63 : 23; uint64_t port_qos : 9; /**< The port to add the CNT field to. */ uint64_t cnt : 32; /**< The value to be added to the register selected in the PORT_QOS field. */ #else uint64_t cnt : 32; uint64_t port_qos : 9; uint64_t reserved_41_63 : 23; #endif } s; struct cvmx_ipd_sub_port_qos_cnt_s cn52xx; struct cvmx_ipd_sub_port_qos_cnt_s cn52xxp1; struct cvmx_ipd_sub_port_qos_cnt_s cn56xx; struct cvmx_ipd_sub_port_qos_cnt_s cn56xxp1; } cvmx_ipd_sub_port_qos_cnt_t; /** * cvmx_ipd_wqe_fpa_queue * * IPD_WQE_FPA_QUEUE = IPD Work-Queue-Entry FPA Page Size * * Which FPA Queue (0-7) to fetch page-pointers from for WQE's */ typedef union { uint64_t u64; struct cvmx_ipd_wqe_fpa_queue_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t wqe_pool : 3; /**< Which FPA Queue to fetch page-pointers from for WQE's. */ #else uint64_t wqe_pool : 3; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_ipd_wqe_fpa_queue_s cn30xx; struct cvmx_ipd_wqe_fpa_queue_s cn31xx; struct cvmx_ipd_wqe_fpa_queue_s cn38xx; struct cvmx_ipd_wqe_fpa_queue_s cn38xxp2; struct cvmx_ipd_wqe_fpa_queue_s cn50xx; struct cvmx_ipd_wqe_fpa_queue_s cn52xx; struct cvmx_ipd_wqe_fpa_queue_s cn52xxp1; struct cvmx_ipd_wqe_fpa_queue_s cn56xx; struct cvmx_ipd_wqe_fpa_queue_s cn56xxp1; struct cvmx_ipd_wqe_fpa_queue_s cn58xx; struct cvmx_ipd_wqe_fpa_queue_s cn58xxp1; } cvmx_ipd_wqe_fpa_queue_t; /** * cvmx_ipd_wqe_ptr_valid * * IPD_WQE_PTR_VALID = IPD's WQE Pointer Valid * * The value of the WQE-pointer fetched and in the valid register. */ typedef union { uint64_t u64; struct cvmx_ipd_wqe_ptr_valid_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t ptr : 29; /**< Pointer value. */ #else uint64_t ptr : 29; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_ipd_wqe_ptr_valid_s cn30xx; struct cvmx_ipd_wqe_ptr_valid_s cn31xx; struct cvmx_ipd_wqe_ptr_valid_s cn38xx; struct cvmx_ipd_wqe_ptr_valid_s cn50xx; struct cvmx_ipd_wqe_ptr_valid_s cn52xx; struct cvmx_ipd_wqe_ptr_valid_s cn52xxp1; struct cvmx_ipd_wqe_ptr_valid_s cn56xx; struct cvmx_ipd_wqe_ptr_valid_s cn56xxp1; struct cvmx_ipd_wqe_ptr_valid_s cn58xx; struct cvmx_ipd_wqe_ptr_valid_s cn58xxp1; } cvmx_ipd_wqe_ptr_valid_t; /** * cvmx_key_bist_reg * * KEY_BIST_REG = KEY's BIST Status Register * * The KEY's BIST status for memories. */ typedef union { uint64_t u64; struct cvmx_key_bist_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t rrc : 1; /**< RRC bist status. */ uint64_t mem1 : 1; /**< MEM - 1 bist status. */ uint64_t mem0 : 1; /**< MEM - 0 bist status. */ #else uint64_t mem0 : 1; uint64_t mem1 : 1; uint64_t rrc : 1; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_key_bist_reg_s cn38xx; struct cvmx_key_bist_reg_s cn38xxp2; struct cvmx_key_bist_reg_s cn56xx; struct cvmx_key_bist_reg_s cn56xxp1; struct cvmx_key_bist_reg_s cn58xx; struct cvmx_key_bist_reg_s cn58xxp1; } cvmx_key_bist_reg_t; /** * cvmx_key_ctl_status * * KEY_CTL_STATUS = KEY's Control/Status Register * * The KEY's interrupt enable register. */ typedef union { uint64_t u64; struct cvmx_key_ctl_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t mem1_err : 7; /**< Causes a flip of the ECC bit associated 38:32 respective to bit 13:7 of this field, for FPF FIFO 1. */ uint64_t mem0_err : 7; /**< Causes a flip of the ECC bit associated 38:32 respective to bit 6:0 of this field, for FPF FIFO 0. */ #else uint64_t mem0_err : 7; uint64_t mem1_err : 7; uint64_t reserved_14_63 : 50; #endif } s; struct cvmx_key_ctl_status_s cn38xx; struct cvmx_key_ctl_status_s cn38xxp2; struct cvmx_key_ctl_status_s cn56xx; struct cvmx_key_ctl_status_s cn56xxp1; struct cvmx_key_ctl_status_s cn58xx; struct cvmx_key_ctl_status_s cn58xxp1; } cvmx_key_ctl_status_t; /** * cvmx_key_int_enb * * KEY_INT_ENB = KEY's Interrupt Enable * * The KEY's interrupt enable register. */ typedef union { uint64_t u64; struct cvmx_key_int_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t ked1_dbe : 1; /**< When set (1) and bit 3 of the KEY_INT_SUM register is asserted the KEY will assert an interrupt. */ uint64_t ked1_sbe : 1; /**< When set (1) and bit 2 of the KEY_INT_SUM register is asserted the KEY will assert an interrupt. */ uint64_t ked0_dbe : 1; /**< When set (1) and bit 1 of the KEY_INT_SUM register is asserted the KEY will assert an interrupt. */ uint64_t ked0_sbe : 1; /**< When set (1) and bit 0 of the KEY_INT_SUM register is asserted the KEY will assert an interrupt. */ #else uint64_t ked0_sbe : 1; uint64_t ked0_dbe : 1; uint64_t ked1_sbe : 1; uint64_t ked1_dbe : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_key_int_enb_s cn38xx; struct cvmx_key_int_enb_s cn38xxp2; struct cvmx_key_int_enb_s cn56xx; struct cvmx_key_int_enb_s cn56xxp1; struct cvmx_key_int_enb_s cn58xx; struct cvmx_key_int_enb_s cn58xxp1; } cvmx_key_int_enb_t; /** * cvmx_key_int_sum * * KEY_INT_SUM = KEY's Interrupt Summary Register * * Contains the diffrent interrupt summary bits of the KEY. */ typedef union { uint64_t u64; struct cvmx_key_int_sum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t ked1_dbe : 1; uint64_t ked1_sbe : 1; uint64_t ked0_dbe : 1; uint64_t ked0_sbe : 1; #else uint64_t ked0_sbe : 1; uint64_t ked0_dbe : 1; uint64_t ked1_sbe : 1; uint64_t ked1_dbe : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_key_int_sum_s cn38xx; struct cvmx_key_int_sum_s cn38xxp2; struct cvmx_key_int_sum_s cn56xx; struct cvmx_key_int_sum_s cn56xxp1; struct cvmx_key_int_sum_s cn58xx; struct cvmx_key_int_sum_s cn58xxp1; } cvmx_key_int_sum_t; /** * cvmx_l2c_bst0 * * L2C_BST0 = L2C BIST 0 CTL/STAT * */ typedef union { uint64_t u64; struct cvmx_l2c_bst0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t dtbnk : 1; /**< DuTag Bank# When DT=1(BAD), this field provides additional information about which DuTag Bank (0/1) failed. *** NOTE: O9N PASS1 Addition */ uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t dtcnt : 13; /**< DuTag BiST Counter (used to help isolate the failure) [12]: i (0=FORWARD/1=REVERSE pass) [11:10]: j (Pattern# 1 of 4) [9:4]: k (DT Index 1 of 64) [3:0]: l (DT# 1 of 16 DTs) */ uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t wlb_dat : 4; uint64_t stin_msk : 1; uint64_t dt : 1; uint64_t dtcnt : 13; uint64_t wlb_msk : 4; uint64_t dtbnk : 1; uint64_t reserved_24_63 : 40; #endif } s; struct cvmx_l2c_bst0_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_15_18 : 4; uint64_t dtcnt : 9; /**< DuTag BiST Counter (used to help isolate the failure) [8]: i (0=FORWARD/1=REVERSE pass) [7:6]: j (Pattern# 1 of 4) [5:0]: k (DT Index 1 of 64) */ uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_4_4 : 1; uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t wlb_dat : 4; uint64_t reserved_4_4 : 1; uint64_t dt : 1; uint64_t dtcnt : 9; uint64_t reserved_15_18 : 4; uint64_t wlb_msk : 4; uint64_t reserved_23_63 : 41; #endif } cn30xx; struct cvmx_l2c_bst0_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_16_18 : 3; uint64_t dtcnt : 10; /**< DuTag BiST Counter (used to help isolate the failure) [9]: i (0=FORWARD/1=REVERSE pass) [8:7]: j (Pattern# 1 of 4) [6:1]: k (DT Index 1 of 64) [0]: l (DT# 1 of 2 DTs) */ uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t wlb_dat : 4; uint64_t stin_msk : 1; uint64_t dt : 1; uint64_t dtcnt : 10; uint64_t reserved_16_18 : 3; uint64_t wlb_msk : 4; uint64_t reserved_23_63 : 41; #endif } cn31xx; struct cvmx_l2c_bst0_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t dtcnt : 13; /**< DuTag BiST Counter (used to help isolate the failure) [12]: i (0=FORWARD/1=REVERSE pass) [11:10]: j (Pattern# 1 of 4) [9:4]: k (DT Index 1 of 64) [3:0]: l (DT# 1 of 16 DTs) */ uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t wlb_dat : 4; uint64_t stin_msk : 1; uint64_t dt : 1; uint64_t dtcnt : 13; uint64_t reserved_19_63 : 45; #endif } cn38xx; struct cvmx_l2c_bst0_cn38xx cn38xxp2; struct cvmx_l2c_bst0_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t dtbnk : 1; /**< DuTag Bank# When DT=1(BAD), this field provides additional information about which DuTag Bank (0/1) failed. */ uint64_t wlb_msk : 4; /**< Bist Results for WLB-MSK RAM [DP0-3] - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_16_18 : 3; uint64_t dtcnt : 10; /**< DuTag BiST Counter (used to help isolate the failure) [9]: i (0=FORWARD/1=REVERSE pass) [8:7]: j (Pattern# 1 of 4) [6:1]: k (DT Index 1 of 64) [0]: l (DT# 1 of 2 DTs) */ uint64_t dt : 1; /**< Bist Results for DuTAG RAM(s) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t stin_msk : 1; /**< Bist Results for STIN-MSK RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t wlb_dat : 4; /**< Bist Results for WLB-DAT RAM [DP0-3] - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t wlb_dat : 4; uint64_t stin_msk : 1; uint64_t dt : 1; uint64_t dtcnt : 10; uint64_t reserved_16_18 : 3; uint64_t wlb_msk : 4; uint64_t dtbnk : 1; uint64_t reserved_24_63 : 40; #endif } cn50xx; struct cvmx_l2c_bst0_cn50xx cn52xx; struct cvmx_l2c_bst0_cn50xx cn52xxp1; struct cvmx_l2c_bst0_s cn56xx; struct cvmx_l2c_bst0_s cn56xxp1; struct cvmx_l2c_bst0_s cn58xx; struct cvmx_l2c_bst0_s cn58xxp1; } cvmx_l2c_bst0_t; /** * cvmx_l2c_bst1 * * L2C_BST1 = L2C BIST 1 CTL/STAT * */ typedef union { uint64_t u64; struct cvmx_l2c_bst1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t l2t : 9; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_l2c_bst1_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t lrf : 2; /**< Bist Results for LRF RAMs (PLC+ILC) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_5_8 : 4; uint64_t l2t : 5; /**< Bist Results for L2T (USE+4SET RAMs) - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t l2t : 5; uint64_t reserved_5_8 : 4; uint64_t vab_vwcf : 1; uint64_t lrf : 2; uint64_t vwdf : 4; uint64_t reserved_16_63 : 48; #endif } cn30xx; struct cvmx_l2c_bst1_cn30xx cn31xx; struct cvmx_l2c_bst1_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t lrf : 2; /**< Bist Results for LRF RAMs (PLC+ILC) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t l2t : 9; uint64_t vab_vwcf : 1; uint64_t lrf : 2; uint64_t vwdf : 4; uint64_t reserved_16_63 : 48; #endif } cn38xx; struct cvmx_l2c_bst1_cn38xx cn38xxp2; struct cvmx_l2c_bst1_cn38xx cn50xx; struct cvmx_l2c_bst1_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t plc2 : 1; /**< Bist Results for PLC2 RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t plc1 : 1; /**< Bist Results for PLC1 RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t plc0 : 1; /**< Bist Results for PLC0 RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t vwdf : 4; /**< Bist Results for VWDF RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_11_11 : 1; uint64_t ilc : 1; /**< Bist Results for ILC RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t vab_vwcf : 1; /**< Bist Results for VAB VWCF_MEM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t l2t : 9; uint64_t vab_vwcf : 1; uint64_t ilc : 1; uint64_t reserved_11_11 : 1; uint64_t vwdf : 4; uint64_t plc0 : 1; uint64_t plc1 : 1; uint64_t plc2 : 1; uint64_t reserved_19_63 : 45; #endif } cn52xx; struct cvmx_l2c_bst1_cn52xx cn52xxp1; struct cvmx_l2c_bst1_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t plc2 : 1; /**< Bist Results for LRF RAMs (ILC) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t plc1 : 1; /**< Bist Results for LRF RAMs (ILC) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t plc0 : 1; /**< Bist Results for LRF RAMs (ILC) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t ilc : 1; /**< Bist Results for LRF RAMs (ILC) - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t vwdf1 : 4; /**< Bist Results for VWDF1 RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t vwdf0 : 4; /**< Bist Results for VWDF0 RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t vab_vwcf1 : 1; /**< Bist Results for VAB VWCF1_MEM */ uint64_t reserved_10_10 : 1; uint64_t vab_vwcf0 : 1; /**< Bist Results for VAB VWCF0_MEM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t l2t : 9; /**< Bist Results for L2T (USE+8SET RAMs) - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t l2t : 9; uint64_t vab_vwcf0 : 1; uint64_t reserved_10_10 : 1; uint64_t vab_vwcf1 : 1; uint64_t vwdf0 : 4; uint64_t vwdf1 : 4; uint64_t ilc : 1; uint64_t plc0 : 1; uint64_t plc1 : 1; uint64_t plc2 : 1; uint64_t reserved_24_63 : 40; #endif } cn56xx; struct cvmx_l2c_bst1_cn56xx cn56xxp1; struct cvmx_l2c_bst1_cn38xx cn58xx; struct cvmx_l2c_bst1_cn38xx cn58xxp1; } cvmx_l2c_bst1_t; /** * cvmx_l2c_bst2 * * L2C_BST2 = L2C BIST 2 CTL/STAT * */ typedef union { uint64_t u64; struct cvmx_l2c_bst2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t mrb : 4; /**< Bist Results for MRB RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_4_11 : 8; uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM - 1: BAD */ uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM - 1: BAD */ uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t xrddat : 1; uint64_t xrdmsk : 1; uint64_t picbst : 1; uint64_t ipcbst : 1; uint64_t reserved_4_11 : 8; uint64_t mrb : 4; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_l2c_bst2_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t mrb : 4; /**< Bist Results for MRB RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t rmdf : 4; /**< Bist Results for RMDF RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_4_7 : 4; uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t reserved_2_2 : 1; uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t xrddat : 1; uint64_t xrdmsk : 1; uint64_t reserved_2_2 : 1; uint64_t ipcbst : 1; uint64_t reserved_4_7 : 4; uint64_t rmdf : 4; uint64_t mrb : 4; uint64_t reserved_16_63 : 48; #endif } cn30xx; struct cvmx_l2c_bst2_cn30xx cn31xx; struct cvmx_l2c_bst2_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t mrb : 4; /**< Bist Results for MRB RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t rmdf : 4; /**< Bist Results for RMDF RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t rhdf : 4; /**< Bist Results for RHDF RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM - 1: BAD */ uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM - 1: BAD */ uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t xrddat : 1; uint64_t xrdmsk : 1; uint64_t picbst : 1; uint64_t ipcbst : 1; uint64_t rhdf : 4; uint64_t rmdf : 4; uint64_t mrb : 4; uint64_t reserved_16_63 : 48; #endif } cn38xx; struct cvmx_l2c_bst2_cn38xx cn38xxp2; struct cvmx_l2c_bst2_cn30xx cn50xx; struct cvmx_l2c_bst2_cn30xx cn52xx; struct cvmx_l2c_bst2_cn30xx cn52xxp1; struct cvmx_l2c_bst2_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t mrb : 4; /**< Bist Results for MRB RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t rmdb : 4; /**< Bist Results for RMDB RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t rhdb : 4; /**< Bist Results for RHDB RAMs - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t ipcbst : 1; /**< Bist Results for RFB IPC RAM - 1: BAD */ uint64_t picbst : 1; /**< Bist Results for RFB PIC RAM - 1: BAD */ uint64_t xrdmsk : 1; /**< Bist Results for RFB XRD-MSK RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t xrddat : 1; /**< Bist Results for RFB XRD-DAT RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t xrddat : 1; uint64_t xrdmsk : 1; uint64_t picbst : 1; uint64_t ipcbst : 1; uint64_t rhdb : 4; uint64_t rmdb : 4; uint64_t mrb : 4; uint64_t reserved_16_63 : 48; #endif } cn56xx; struct cvmx_l2c_bst2_cn56xx cn56xxp1; struct cvmx_l2c_bst2_cn56xx cn58xx; struct cvmx_l2c_bst2_cn56xx cn58xxp1; } cvmx_l2c_bst2_t; /** * cvmx_l2c_cfg * * Specify the RSL base addresses for the block * * L2C_CFG = L2C Configuration * * Description: */ typedef union { uint64_t u64; struct cvmx_l2c_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t bstrun : 1; /**< L2 Data Store Bist Running Indicates when the L2C HW Bist sequence(short or long) is running. [L2C ECC Bist FSM is not in the RESET/DONE state] *** NOTE: O9N PASS2 Addition */ uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence When the previous state was '0' and SW writes a '1', the long bist sequence (enhanced 13N March) is performed. SW can then read the L2C_CFG[BSTRUN] which will indicate that the long bist sequence is running. When BSTRUN-=0, the state of the L2D_BST[0-3] registers contain information which reflects the status of the recent long bist sequence. NOTE: SW must never write LBIST=0 while Long Bist is running (ie: when BSTRUN=1 never write LBIST=0). NOTE: LBIST is disabled if the MIO_FUS_DAT2.BIST_DIS Fuse is blown. *** NOTE: O9N PASS2 Addition */ uint64_t xor_bank : 1; /**< L2C XOR Bank Bit When both LMC's are enabled(DPRES1=1/DPRES0=1), this bit determines how addresses are assigned to LMC port(s). XOR_BANK| LMC# ----------+--------------------------------- 0 | byte address[7] 1 | byte address[7] XOR byte address[12] Example: If both LMC ports are enabled (DPRES1=1/DPRES0=1) and XOR_BANK=1, then addr[7] XOR addr[12] is used to determine which LMC Port# a reference is directed to. *** NOTE: O56 PASS1 Addition */ uint64_t dpres1 : 1; /**< DDR1 Present/LMC1 Enable When DPRES1 is set, LMC#1 is enabled(DDR1 pins at the BOTTOM of the chip are active). NOTE: When both LMC ports are enabled(DPRES1=1/DPRES0=1), see XOR_BANK bit to determine how a reference is assigned to a DDR/LMC port. (Also, in dual-LMC configuration, the address sent to the targeted LMC port is the address shifted right by one). NOTE: For power-savings, the DPRES1 is also used to disable DDR1/LMC1 clocks. SW Constraint: When dual LMC is enabled (L2C_CFG[DPRES0/1]=1), the LMCx_DDR2_CTL[DDR_EOF] must be increased by +1 to account for an additional cycle of uncertainty. *** NOTE: O56 PASS1 Addition */ uint64_t dpres0 : 1; /**< DDR0 Present/LMC0 Enable When DPRES0 is set, LMC#0 is enabled(DDR0 pins at the BOTTOM of the chip are active). NOTE: When both LMC ports are enabled(DPRES1=1/DPRES0=1), see XOR_BANK bit to determine how a reference is assigned to a DDR/LMC port. (Also, in dual-LMC configuration, the address sent to the targeted LMC port is the address shifted right by one). NOTE: For power-savings, the DPRES0 is also used to disable DDR0/LMC0 clocks. SW Constraint: When dual LMC is enabled (L2C_CFG[DPRES0/1]=1), the LMCx_DDR2_CTL[DDR_EOF] must be increased by +1 to account for an additional cycle of uncertainty. *** NOTE: O56 PASS1 Addition */ uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable When set, the L2C dual-fill performance feature is disabled. NOTE: This bit is only intended to evaluate the effectiveness of the dual-fill feature. For OPTIMAL performance, this bit should ALWAYS be zero. *** NOTE: O9N PASS1 Addition */ uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When FPEN is enabled and the LFB is empty, the forward progress counter (FPCNT) is initialized to: FPCNT[24:0] = 2^(9+FPEXP) When the LFB is non-empty the FPCNT is decremented (every eclk interval). If the FPCNT reaches zero, the LFB no longer accepts new requests until either a) all of the current LFB entries have completed (to ensure forward progress). b) FPEMPTY=0 and another forward progress count interval timeout expires. EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. (For eclk=500MHz(2ns), this would be ~4us). */ uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When set, if the forward progress counter expires, all new LFB-NQs are stopped UNTIL all current LFB entries have completed. When clear, if the forward progress counter expires, all new LFB-NQs are stopped UNTIL either a) all current LFB entries have completed. b) another forward progress interval expires NOTE: We may want to FREEZE/HANG the system when we encounter an LFB entry cannot complete, and there may be times when we want to allow further LFB-NQs to be permitted to help in further analyzing the source */ uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When set, enables the Forward Progress Counter to prevent new LFB entries from enqueueing until ALL current LFB entries have completed. */ uint64_t idxalias : 1; /**< L2C Index Alias Enable When set, the L2 Tag/Data Store will alias the 11-bit index with the low order 11-bits of the tag. index[17:7] = (tag[28:18] ^ index[17:7]) NOTE: This bit must only be modified at boot time, when it can be guaranteed that no blocks have been loaded into the L2 Cache. The index aliasing is a performance enhancement feature which reduces the L2 cache thrashing experienced for regular stride references. NOTE: The index alias is stored in the LFB and VAB, and its effects are reversed for memory references (Victims, STT-Misses and Read-Misses) */ uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits become less than or equal to the MWF_CRD, the L2C will assert l2c__lmi_mwd_hiwater_a to signal the LMC to give writes (victims) higher priority. */ uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), RHCF(RdHit), STRSP(ST RSP w/ invalidate), STRSC(ST RSP no invalidate)] */ uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: - 0: Fixed Priority - IOB->PP requests are higher priority than PP->IOB requests - 1: Round Robin - I/O requests from PP and IOB are serviced in round robin */ uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: - 0: Fixed Priority - IOB memory requests are higher priority than PP memory requests. - 1: Round Robin - Memory requests from PP and IOB are serviced in round robin. */ #else uint64_t lrf_arb_mode : 1; uint64_t rfb_arb_mode : 1; uint64_t rsp_arb_mode : 1; uint64_t mwf_crd : 4; uint64_t idxalias : 1; uint64_t fpen : 1; uint64_t fpempty : 1; uint64_t fpexp : 4; uint64_t dfill_dis : 1; uint64_t dpres0 : 1; uint64_t dpres1 : 1; uint64_t xor_bank : 1; uint64_t lbist : 1; uint64_t bstrun : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_l2c_cfg_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When FPEN is enabled and the LFB is empty, the forward progress counter (FPCNT) is initialized to: FPCNT[24:0] = 2^(9+FPEXP) When the LFB is non-empty the FPCNT is decremented (every eclk interval). If the FPCNT reaches zero, the LFB no longer accepts new requests until either a) all of the current LFB entries have completed (to ensure forward progress). b) FPEMPTY=0 and another forward progress count interval timeout expires. EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. (For eclk=500MHz(2ns), this would be ~4us). */ uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When set, if the forward progress counter expires, all new LFB-NQs are stopped UNTIL all current LFB entries have completed. When clear, if the forward progress counter expires, all new LFB-NQs are stopped UNTIL either a) all current LFB entries have completed. b) another forward progress interval expires NOTE: We may want to FREEZE/HANG the system when we encounter an LFB entry cannot complete, and there may be times when we want to allow further LFB-NQs to be permitted to help in further analyzing the source */ uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When set, enables the Forward Progress Counter to prevent new LFB entries from enqueueing until ALL current LFB entries have completed. */ uint64_t idxalias : 1; /**< L2C Index Alias Enable When set, the L2 Tag/Data Store will alias the 8-bit index with the low order 8-bits of the tag. index[14:7] = (tag[22:15] ^ index[14:7]) NOTE: This bit must only be modified at boot time, when it can be guaranteed that no blocks have been loaded into the L2 Cache. The index aliasing is a performance enhancement feature which reduces the L2 cache thrashing experienced for regular stride references. NOTE: The index alias is stored in the LFB and VAB, and its effects are reversed for memory references (Victims, STT-Misses and Read-Misses) */ uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits become less than or equal to the MWF_CRD, the L2C will assert l2c__lmi_mwd_hiwater_a to signal the LMC to give writes (victims) higher priority. */ uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), RHCF(RdHit), STRSP(ST RSP w/ invalidate), STRSC(ST RSP no invalidate)] */ uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: - 0: Fixed Priority - IOB->PP requests are higher priority than PP->IOB requests - 1: Round Robin - I/O requests from PP and IOB are serviced in round robin */ uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: - 0: Fixed Priority - IOB memory requests are higher priority than PP memory requests. - 1: Round Robin - Memory requests from PP and IOB are serviced in round robin. */ #else uint64_t lrf_arb_mode : 1; uint64_t rfb_arb_mode : 1; uint64_t rsp_arb_mode : 1; uint64_t mwf_crd : 4; uint64_t idxalias : 1; uint64_t fpen : 1; uint64_t fpempty : 1; uint64_t fpexp : 4; uint64_t reserved_14_63 : 50; #endif } cn30xx; struct cvmx_l2c_cfg_cn30xx cn31xx; struct cvmx_l2c_cfg_cn30xx cn38xx; struct cvmx_l2c_cfg_cn30xx cn38xxp2; struct cvmx_l2c_cfg_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t bstrun : 1; /**< L2 Data Store Bist Running Indicates when the L2C HW Bist sequence(short or long) is running. [L2C ECC Bist FSM is not in the RESET/DONE state] *** NOTE: O56 PASS1 Addition */ uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence When the previous state was '0' and SW writes a '1', the long bist sequence (enhanced 13N March) is performed. SW can then read the L2C_CFG[BSTRUN] which will indicate that the long bist sequence is running. When BSTRUN-=0, the state of the L2D_BST[0-3] registers contain information which reflects the status of the recent long bist sequence. NOTE: SW must never write LBIST=0 while Long Bist is running (ie: when BSTRUN=1 never write LBIST=0). */ uint64_t reserved_14_17 : 4; uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When FPEN is enabled and the LFB is empty, the forward progress counter (FPCNT) is initialized to: FPCNT[24:0] = 2^(9+FPEXP) When the LFB is non-empty the FPCNT is decremented (every eclk interval). If the FPCNT reaches zero, the LFB no longer accepts new requests until either a) all of the current LFB entries have completed (to ensure forward progress). b) FPEMPTY=0 and another forward progress count interval timeout expires. EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. (For eclk=500MHz(2ns), this would be ~4us). */ uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When set, if the forward progress counter expires, all new LFB-NQs are stopped UNTIL all current LFB entries have completed. When clear, if the forward progress counter expires, all new LFB-NQs are stopped UNTIL either a) all current LFB entries have completed. b) another forward progress interval expires NOTE: We may want to FREEZE/HANG the system when we encounter an LFB entry cannot complete, and there may be times when we want to allow further LFB-NQs to be permitted to help in further analyzing the source */ uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When set, enables the Forward Progress Counter to prevent new LFB entries from enqueueing until ALL current LFB entries have completed. */ uint64_t idxalias : 1; /**< L2C Index Alias Enable When set, the L2 Tag/Data Store will alias the 7-bit index with the low order 7-bits of the tag. index[13:7] = (tag[20:14] ^ index[13:7]) NOTE: This bit must only be modified at boot time, when it can be guaranteed that no blocks have been loaded into the L2 Cache. The index aliasing is a performance enhancement feature which reduces the L2 cache thrashing experienced for regular stride references. NOTE: The index alias is stored in the LFB and VAB, and its effects are reversed for memory references (Victims, STT-Misses and Read-Misses) */ uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits become less than or equal to the MWF_CRD, the L2C will assert l2c__lmi_mwd_hiwater_a to signal the LMC to give writes (victims) higher priority. */ uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), RHCF(RdHit), STRSP(ST RSP w/ invalidate), STRSC(ST RSP no invalidate)] */ uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: - 0: Fixed Priority - IOB->PP requests are higher priority than PP->IOB requests - 1: Round Robin - I/O requests from PP and IOB are serviced in round robin */ uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: - 0: Fixed Priority - IOB memory requests are higher priority than PP memory requests. - 1: Round Robin - Memory requests from PP and IOB are serviced in round robin. */ #else uint64_t lrf_arb_mode : 1; uint64_t rfb_arb_mode : 1; uint64_t rsp_arb_mode : 1; uint64_t mwf_crd : 4; uint64_t idxalias : 1; uint64_t fpen : 1; uint64_t fpempty : 1; uint64_t fpexp : 4; uint64_t reserved_14_17 : 4; uint64_t lbist : 1; uint64_t bstrun : 1; uint64_t reserved_20_63 : 44; #endif } cn50xx; struct cvmx_l2c_cfg_cn50xx cn52xx; struct cvmx_l2c_cfg_cn50xx cn52xxp1; struct cvmx_l2c_cfg_s cn56xx; struct cvmx_l2c_cfg_s cn56xxp1; struct cvmx_l2c_cfg_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t bstrun : 1; /**< L2 Data Store Bist Running Indicates when the L2C HW Bist sequence(short or long) is running. [L2C ECC Bist FSM is not in the RESET/DONE state] *** NOTE: O9N PASS2 Addition */ uint64_t lbist : 1; /**< L2C Data Store Long Bist Sequence When the previous state was '0' and SW writes a '1', the long bist sequence (enhanced 13N March) is performed. SW can then read the L2C_CFG[BSTRUN] which will indicate that the long bist sequence is running. When BSTRUN-=0, the state of the L2D_BST[0-3] registers contain information which reflects the status of the recent long bist sequence. NOTE: SW must never write LBIST=0 while Long Bist is running (ie: when BSTRUN=1 never write LBIST=0). NOTE: LBIST is disabled if the MIO_FUS_DAT2.BIST_DIS Fuse is blown. *** NOTE: O9N PASS2 Addition */ uint64_t reserved_15_17 : 3; uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable When set, the L2C dual-fill performance feature is disabled. NOTE: This bit is only intended to evaluate the effectiveness of the dual-fill feature. For OPTIMAL performance, this bit should ALWAYS be zero. *** NOTE: O9N PASS1 Addition */ uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When FPEN is enabled and the LFB is empty, the forward progress counter (FPCNT) is initialized to: FPCNT[24:0] = 2^(9+FPEXP) When the LFB is non-empty the FPCNT is decremented (every eclk interval). If the FPCNT reaches zero, the LFB no longer accepts new requests until either a) all of the current LFB entries have completed (to ensure forward progress). b) FPEMPTY=0 and another forward progress count interval timeout expires. EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. (For eclk=500MHz(2ns), this would be ~4us). */ uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When set, if the forward progress counter expires, all new LFB-NQs are stopped UNTIL all current LFB entries have completed. When clear, if the forward progress counter expires, all new LFB-NQs are stopped UNTIL either a) all current LFB entries have completed. b) another forward progress interval expires NOTE: We may want to FREEZE/HANG the system when we encounter an LFB entry cannot complete, and there may be times when we want to allow further LFB-NQs to be permitted to help in further analyzing the source */ uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When set, enables the Forward Progress Counter to prevent new LFB entries from enqueueing until ALL current LFB entries have completed. */ uint64_t idxalias : 1; /**< L2C Index Alias Enable When set, the L2 Tag/Data Store will alias the 11-bit index with the low order 11-bits of the tag. index[17:7] = (tag[28:18] ^ index[17:7]) NOTE: This bit must only be modified at boot time, when it can be guaranteed that no blocks have been loaded into the L2 Cache. The index aliasing is a performance enhancement feature which reduces the L2 cache thrashing experienced for regular stride references. NOTE: The index alias is stored in the LFB and VAB, and its effects are reversed for memory references (Victims, STT-Misses and Read-Misses) */ uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits become less than or equal to the MWF_CRD, the L2C will assert l2c__lmi_mwd_hiwater_a to signal the LMC to give writes (victims) higher priority. */ uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), RHCF(RdHit), STRSP(ST RSP w/ invalidate), STRSC(ST RSP no invalidate)] */ uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: - 0: Fixed Priority - IOB->PP requests are higher priority than PP->IOB requests - 1: Round Robin - I/O requests from PP and IOB are serviced in round robin */ uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: - 0: Fixed Priority - IOB memory requests are higher priority than PP memory requests. - 1: Round Robin - Memory requests from PP and IOB are serviced in round robin. */ #else uint64_t lrf_arb_mode : 1; uint64_t rfb_arb_mode : 1; uint64_t rsp_arb_mode : 1; uint64_t mwf_crd : 4; uint64_t idxalias : 1; uint64_t fpen : 1; uint64_t fpempty : 1; uint64_t fpexp : 4; uint64_t dfill_dis : 1; uint64_t reserved_15_17 : 3; uint64_t lbist : 1; uint64_t bstrun : 1; uint64_t reserved_20_63 : 44; #endif } cn58xx; struct cvmx_l2c_cfg_cn58xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_15_63 : 49; uint64_t dfill_dis : 1; /**< L2C Dual Fill Disable When set, the L2C dual-fill performance feature is disabled. NOTE: This bit is only intended to evaluate the effectiveness of the dual-fill feature. For OPTIMAL performance, this bit should ALWAYS be zero. *** NOTE: O9N PASS1 Addition */ uint64_t fpexp : 4; /**< [CYA] Forward Progress Counter Exponent NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When FPEN is enabled and the LFB is empty, the forward progress counter (FPCNT) is initialized to: FPCNT[24:0] = 2^(9+FPEXP) When the LFB is non-empty the FPCNT is decremented (every eclk interval). If the FPCNT reaches zero, the LFB no longer accepts new requests until either a) all of the current LFB entries have completed (to ensure forward progress). b) FPEMPTY=0 and another forward progress count interval timeout expires. EXAMPLE USE: If FPEXP=2, the FPCNT = 2048 eclks. (For eclk=500MHz(2ns), this would be ~4us). */ uint64_t fpempty : 1; /**< [CYA] Forward Progress Counter Empty NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When set, if the forward progress counter expires, all new LFB-NQs are stopped UNTIL all current LFB entries have completed. When clear, if the forward progress counter expires, all new LFB-NQs are stopped UNTIL either a) all current LFB entries have completed. b) another forward progress interval expires NOTE: We may want to FREEZE/HANG the system when we encounter an LFB entry cannot complete, and there may be times when we want to allow further LFB-NQs to be permitted to help in further analyzing the source */ uint64_t fpen : 1; /**< [CYA] Forward Progress Counter Enable NOTE: Should NOT be exposed to customer! [FOR DEBUG ONLY] When set, enables the Forward Progress Counter to prevent new LFB entries from enqueueing until ALL current LFB entries have completed. */ uint64_t idxalias : 1; /**< L2C Index Alias Enable When set, the L2 Tag/Data Store will alias the 11-bit index with the low order 11-bits of the tag. index[17:7] = (tag[28:18] ^ index[17:7]) NOTE: This bit must only be modified at boot time, when it can be guaranteed that no blocks have been loaded into the L2 Cache. The index aliasing is a performance enhancement feature which reduces the L2 cache thrashing experienced for regular stride references. NOTE: The index alias is stored in the LFB and VAB, and its effects are reversed for memory references (Victims, STT-Misses and Read-Misses) */ uint64_t mwf_crd : 4; /**< MWF Credit Threshold: When the remaining MWF credits become less than or equal to the MWF_CRD, the L2C will assert l2c__lmi_mwd_hiwater_a to signal the LMC to give writes (victims) higher priority. */ uint64_t rsp_arb_mode : 1; /**< RSP Arbitration Mode: - 0: Fixed Priority [HP=RFB, RMCF, RHCF, STRSP, LP=STRSC] - 1: Round Robin: [RFB(reflected I/O), RMCF(RdMiss), RHCF(RdHit), STRSP(ST RSP w/ invalidate), STRSC(ST RSP no invalidate)] */ uint64_t rfb_arb_mode : 1; /**< RFB Arbitration Mode: - 0: Fixed Priority - IOB->PP requests are higher priority than PP->IOB requests - 1: Round Robin - I/O requests from PP and IOB are serviced in round robin */ uint64_t lrf_arb_mode : 1; /**< RF Arbitration Mode: - 0: Fixed Priority - IOB memory requests are higher priority than PP memory requests. - 1: Round Robin - Memory requests from PP and IOB are serviced in round robin. */ #else uint64_t lrf_arb_mode : 1; uint64_t rfb_arb_mode : 1; uint64_t rsp_arb_mode : 1; uint64_t mwf_crd : 4; uint64_t idxalias : 1; uint64_t fpen : 1; uint64_t fpempty : 1; uint64_t fpexp : 4; uint64_t dfill_dis : 1; uint64_t reserved_15_63 : 49; #endif } cn58xxp1; } cvmx_l2c_cfg_t; /** * cvmx_l2c_dbg * * L2C_DBG = L2C DEBUG Register * * Description: L2C Tag/Data Store Debug Register * * Notes: * (1) When using the L2T, L2D or FINV Debug probe feature, the LDD command WILL NOT update the DuTags. * (2) L2T, L2D, FINV MUST BE mutually exclusive (only one set) * (3) Force Invalidate is intended as a means for SW to invalidate the L2 Cache while also writing back * dirty data to memory to maintain coherency. * (4) L2 Cache Lock Down feature MUST BE disabled (L2C_LCKBASE[LCK_ENA]=0) if ANY of the L2C debug * features (L2T, L2D, FINV) are enabled. */ typedef union { uint64_t u64; struct cvmx_l2c_dbg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_15_63 : 49; uint64_t lfb_enum : 4; /**< Specifies the LFB Entry# which is to be captured. */ uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of the LFB specified by LFB_ENUM[3:0] are captured into the L2C_LFB(0/1/2) registers. NOTE: Some fields of the LFB entry are unpredictable and dependent on usage. This is only intended to be used for HW debug. */ uint64_t ppnum : 4; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] is enabled, this field determines which one-of-16 PPs is selected as the diagnostic PP. */ uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] is enabled, this field determines 1-of-n targeted sets to act upon. NOTE: L2C_DBG[SET] must never equal a crippled or unusable set (see UMSK* registers and Cripple mode fuses). */ uint64_t finv : 1; /**< Flush-Invalidate. When flush-invalidate is enable (FINV=1), all STF (L1 store-miss) commands generated from the diagnostic PP (L2C_DBG[PPNUM]) will invalidate the specified set (L2C_DBG[SET]) at the index specified in the STF address[17:7]. If a dirty block is detected (D=1), it is written back to memory. The contents of the invalid L2 Cache line is also 'scrubbed' with the STF write data. NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in STF address[17:7] refers to the 'aliased' address. NOTE: An STF command with write data=ZEROES can be generated by SW using the Prefetch instruction with Hint=30d "prepare for Store", followed by a SYNCW. What is seen at the L2C as an STF w/wrdcnt=0 with all of its mask bits clear (indicates zero-fill data). A flush-invalidate will 'force-hit' the L2 cache at [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). If the cache block is dirty, it is also written back to memory. The DuTag state is probed/updated as normal for an STF request. TYPICAL APPLICATIONS: 1) L2 Tag/Data ECC SW Recovery 2) Cache Unlocking NOTE: If the cacheline had been previously LOCKED(L=1), a flush-invalidate operation will explicitly UNLOCK (L=0) the set/index specified. NOTE: The diagnostic PP cores can generate STF commands to the L2 Cache whenever all 128 bytes in a block are written. SW must take this into consideration to avoid 'errant' Flush-Invalidates. */ uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is returned directly from the L2 Data Store (regardless of hit/miss) when an LDD(L1 load-miss) command is issued from a PP determined by the L2C_DBG[PPNUM] field. The selected set# is determined by the L2C_DBG[SET] field, and the index is determined from the address[17:7] associated with the LDD command. This 'force-hit' will NOT alter the current L2 Tag state OR the DuTag state. */ uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:18]] is returned on the data bus starting at +32(and +96) bytes offset from the beginning of cacheline when an LDD (L1 load-miss) command is issued from a PP determined by the L2C_DBG[PPNUM] field. The selected L2 set# is determined by the L2C_DBG[SET] field, and the L2 index is determined from the phys_addr[17:7] associated with the LDD command. This 'L2 force-hit' will NOT alter the current L2 Tag state OR the DuTag state. NOTE: The diagnostic PP should issue a d-stream load to an aligned cacheline+0x20(+0x60) in order to have the return VDLUTAG information (in OW2/OW6) written directly into the proper PP register. The diagnostic PP should also flush it's local L1 cache after use(to ensure data coherency). NOTE: The position of the VDLUTAG data in the destination register is dependent on the endian mode(big/little). NOTE: N3K-Pass2 modification. (This bit's functionality has changed since Pass1-in the following way). NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected half cacheline (see: L2D_ERR[BMHCLSEL] is also conditionally latched into the L2D_FSYN0/1 CSRs if an LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */ #else uint64_t l2t : 1; uint64_t l2d : 1; uint64_t finv : 1; uint64_t set : 3; uint64_t ppnum : 4; uint64_t lfb_dmp : 1; uint64_t lfb_enum : 4; uint64_t reserved_15_63 : 49; #endif } s; struct cvmx_l2c_dbg_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t lfb_enum : 2; /**< Specifies the LFB Entry# which is to be captured. */ uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of the LFB specified by LFB_ENUM are captured into the L2C_LFB(0/1/2) registers. NOTE: Some fields of the LFB entry are unpredictable and dependent on usage. This is only intended to be used for HW debug. */ uint64_t reserved_7_9 : 3; uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] is enabled, this field determines which PP is selected as the diagnostic PP. NOTE: For O1P single core PPNUM=0 (MBZ) */ uint64_t reserved_5_5 : 1; uint64_t set : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] is enabled, this field determines 1-of-n targeted sets to act upon. NOTE: L2C_DBG[SET] must never equal a crippled or unusable set (see UMSK* registers and Cripple mode fuses). */ uint64_t finv : 1; /**< Flush-Invalidate. When flush-invalidate is enable (FINV=1), all STF (L1 store-miss) commands generated from the PP will invalidate the specified set(L2C_DBG[SET]) at the index specified in the STF address[14:7]. If a dirty block is detected(D=1), it is written back to memory. The contents of the invalid L2 Cache line is also 'scrubbed' with the STF write data. NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in STF address[14:7] refers to the 'aliased' address. NOTE: An STF command with write data=ZEROES can be generated by SW using the Prefetch instruction with Hint=30d "prepare for Store", followed by a SYNCW. What is seen at the L2C as an STF w/wrdcnt=0 with all of its mask bits clear (indicates zero-fill data). A flush-invalidate will 'force-hit' the L2 cache at [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). If the cache block is dirty, it is also written back to memory. The DuTag state is probed/updated as normal for an STF request. TYPICAL APPLICATIONS: 1) L2 Tag/Data ECC SW Recovery 2) Cache Unlocking NOTE: If the cacheline had been previously LOCKED(L=1), a flush-invalidate operation will explicitly UNLOCK (L=0) the set/index specified. NOTE: The PP can generate STF(L1 store-miss) commands to the L2 Cache whenever all 128 bytes in a block are written. SW must take this into consideration to avoid 'errant' Flush-Invalidates. */ uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is returned directly from the L2 Data Store (regardless of hit/miss) when an LDD(L1 load-miss) command is issued from the PP. The selected set# is determined by the L2C_DBG[SET] field, and the index is determined from the address[14:7] associated with the LDD command. This 'force-hit' will NOT alter the current L2 Tag state OR the DuTag state. */ uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:15]] is returned on the data bus starting at +32(and +96) bytes offset from the beginning of cacheline when an LDD (L1 load-miss) command is issued from the PP. The selected L2 set# is determined by the L2C_DBG[SET] field, and the L2 index is determined from the phys_addr[14:7] associated with the LDD command. This 'L2 force-hit' will NOT alter the current L2 Tag state OR the DuTag state. NOTE: The diagnostic PP should issue a d-stream load to an aligned cacheline+0x20(+0x60) in order to have the return VDLUTAG information (in OW2/OW6) written directly into the proper PP register. The diagnostic PP should also flush it's local L1 cache after use(to ensure data coherency). NOTE: The position of the VDLUTAG data in the destination register is dependent on the endian mode(big/little). NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected half cacheline (see: L2D_ERR[BMHCLSEL] is also conditionally latched into the L2D_FSYN0/1 CSRs if an LDD(L1 load-miss) is detected. */ #else uint64_t l2t : 1; uint64_t l2d : 1; uint64_t finv : 1; uint64_t set : 2; uint64_t reserved_5_5 : 1; uint64_t ppnum : 1; uint64_t reserved_7_9 : 3; uint64_t lfb_dmp : 1; uint64_t lfb_enum : 2; uint64_t reserved_13_63 : 51; #endif } cn30xx; struct cvmx_l2c_dbg_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */ uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of the LFB specified by LFB_ENUM are captured into the L2C_LFB(0/1/2) registers. NOTE: Some fields of the LFB entry are unpredictable and dependent on usage. This is only intended to be used for HW debug. */ uint64_t reserved_7_9 : 3; uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] is enabled, this field determines which PP is selected as the diagnostic PP. */ uint64_t reserved_5_5 : 1; uint64_t set : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] is enabled, this field determines 1-of-n targeted sets to act upon. NOTE: L2C_DBG[SET] must never equal a crippled or unusable set (see UMSK* registers and Cripple mode fuses). */ uint64_t finv : 1; /**< Flush-Invalidate. When flush-invalidate is enable (FINV=1), all STF (L1 store-miss) commands generated from the diagnostic PP (L2C_DBG[PPNUM]) will invalidate the specified set (L2C_DBG[SET]) at the index specified in the STF address[15:7]. If a dirty block is detected (D=1), it is written back to memory. The contents of the invalid L2 Cache line is also 'scrubbed' with the STF write data. NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in STF address[15:7] refers to the 'aliased' address. NOTE: An STF command with write data=ZEROES can be generated by SW using the Prefetch instruction with Hint=30d "prepare for Store", followed by a SYNCW. What is seen at the L2C as an STF w/wrdcnt=0 with all of its mask bits clear (indicates zero-fill data). A flush-invalidate will 'force-hit' the L2 cache at [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). If the cache block is dirty, it is also written back to memory. The DuTag state is probed/updated as normal for an STF request. TYPICAL APPLICATIONS: 1) L2 Tag/Data ECC SW Recovery 2) Cache Unlocking NOTE: If the cacheline had been previously LOCKED(L=1), a flush-invalidate operation will explicitly UNLOCK (L=0) the set/index specified. NOTE: The diagnostic PP cores can generate STF(L1 store-miss) commands to the L2 Cache whenever all 128 bytes in a block are written. SW must take this into consideration to avoid 'errant' Flush-Invalidates. */ uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is returned directly from the L2 Data Store (regardless of hit/miss) when an LDD(L1 load-miss) command is issued from a PP determined by the L2C_DBG[PPNUM] field. The selected set# is determined by the L2C_DBG[SET] field, and the index is determined from the address[15:7] associated with the LDD command. This 'L2 force-hit' will NOT alter the current L2 Tag state OR the DuTag state. */ uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:16]] is returned on the data bus starting at +32(and +96) bytes offset from the beginning of cacheline when an LDD (L1 load-miss) command is issued from a PP determined by the L2C_DBG[PPNUM] field. The selected L2 set# is determined by the L2C_DBG[SET] field, and the L2 index is determined from the phys_addr[15:7] associated with the LDD command. This 'L2 force-hit' will NOT alter the current L2 Tag state OR the DuTag state. NOTE: The diagnostic PP should issue a d-stream load to an aligned cacheline+0x20(+0x60) in order to have the return VDLUTAG information (in OW2/OW6) written directly into the proper PP register. The diagnostic PP should also flush it's local L1 cache after use(to ensure data coherency). NOTE: The position of the VDLUTAG data in the destination register is dependent on the endian mode(big/little). NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected half cacheline (see: L2D_ERR[BMHCLSEL] is also conditionally latched into the L2D_FSYN0/1 CSRs if an LDD(L1 load-miss) is detected from the diagnostic PP (L2C_DBG[PPNUM]). */ #else uint64_t l2t : 1; uint64_t l2d : 1; uint64_t finv : 1; uint64_t set : 2; uint64_t reserved_5_5 : 1; uint64_t ppnum : 1; uint64_t reserved_7_9 : 3; uint64_t lfb_dmp : 1; uint64_t lfb_enum : 3; uint64_t reserved_14_63 : 50; #endif } cn31xx; struct cvmx_l2c_dbg_s cn38xx; struct cvmx_l2c_dbg_s cn38xxp2; struct cvmx_l2c_dbg_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */ uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of the LFB specified by LFB_ENUM[2:0] are captured into the L2C_LFB(0/1/2) registers. NOTE: Some fields of the LFB entry are unpredictable and dependent on usage. This is only intended to be used for HW debug. */ uint64_t reserved_7_9 : 3; uint64_t ppnum : 1; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] is enabled, this field determines which 1-of-2 PPs is selected as the diagnostic PP. */ uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] is enabled, this field determines 1-of-n targeted sets to act upon. NOTE: L2C_DBG[SET] must never equal a crippled or unusable set (see UMSK* registers and Cripple mode fuses). */ uint64_t finv : 1; /**< Flush-Invalidate. When flush-invalidate is enable (FINV=1), all STF (L1 store-miss) commands generated from the diagnostic PP (L2C_DBG[PPNUM]) will invalidate the specified set (L2C_DBG[SET]) at the index specified in the STF address[13:7]. If a dirty block is detected (D=1), it is written back to memory. The contents of the invalid L2 Cache line is also 'scrubbed' with the STF write data. NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in STF address[13:7] refers to the 'aliased' address. NOTE: An STF command with write data=ZEROES can be generated by SW using the Prefetch instruction with Hint=30d "prepare for Store", followed by a SYNCW. What is seen at the L2C as an STF w/wrdcnt=0 with all of its mask bits clear (indicates zero-fill data). A flush-invalidate will 'force-hit' the L2 cache at [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). If the cache block is dirty, it is also written back to memory. The DuTag state is probed/updated as normal for an STF request. TYPICAL APPLICATIONS: 1) L2 Tag/Data ECC SW Recovery 2) Cache Unlocking NOTE: If the cacheline had been previously LOCKED(L=1), a flush-invalidate operation will explicitly UNLOCK (L=0) the set/index specified. NOTE: The diagnostic PP cores can generate STF commands to the L2 Cache whenever all 128 bytes in a block are written. SW must take this into consideration to avoid 'errant' Flush-Invalidates. */ uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is returned directly from the L2 Data Store (regardless of hit/miss) when an LDD(L1 load-miss) command is issued from a PP determined by the L2C_DBG[PPNUM] field. The selected set# is determined by the L2C_DBG[SET] field, and the index is determined from the address[13:7] associated with the LDD command. This 'force-hit' will NOT alter the current L2 Tag state OR the DuTag state. */ uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:14]] is returned on the data bus starting at +32(and +96) bytes offset from the beginning of cacheline when an LDD (L1 load-miss) command is issued from a PP determined by the L2C_DBG[PPNUM] field. The selected L2 set# is determined by the L2C_DBG[SET] field, and the L2 index is determined from the phys_addr[13:7] associated with the LDD command. This 'L2 force-hit' will NOT alter the current L2 Tag state OR the DuTag state. NOTE: The diagnostic PP should issue a d-stream load to an aligned cacheline+0x20(+0x60) in order to have the return VDLUTAG information (in OW2/OW6) written directly into the proper PP register. The diagnostic PP should also flush it's local L1 cache after use(to ensure data coherency). NOTE: The position of the VDLUTAG data in the destination register is dependent on the endian mode(big/little). NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected half cacheline (see: L2D_ERR[BMHCLSEL] is also conditionally latched into the L2D_FSYN0/1 CSRs if an LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */ #else uint64_t l2t : 1; uint64_t l2d : 1; uint64_t finv : 1; uint64_t set : 3; uint64_t ppnum : 1; uint64_t reserved_7_9 : 3; uint64_t lfb_dmp : 1; uint64_t lfb_enum : 3; uint64_t reserved_14_63 : 50; #endif } cn50xx; struct cvmx_l2c_dbg_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t lfb_enum : 3; /**< Specifies the LFB Entry# which is to be captured. */ uint64_t lfb_dmp : 1; /**< LFB Dump Enable: When written(=1), the contents of the LFB specified by LFB_ENUM[2:0] are captured into the L2C_LFB(0/1/2) registers. NOTE: Some fields of the LFB entry are unpredictable and dependent on usage. This is only intended to be used for HW debug. */ uint64_t reserved_8_9 : 2; uint64_t ppnum : 2; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] is enabled, this field determines which 1-of-4 PPs is selected as the diagnostic PP. */ uint64_t set : 3; /**< When L2C_DBG[L2T] or L2C_DBG[L2D] or L2C_DBG[FINV] is enabled, this field determines 1-of-n targeted sets to act upon. NOTE: L2C_DBG[SET] must never equal a crippled or unusable set (see UMSK* registers and Cripple mode fuses). */ uint64_t finv : 1; /**< Flush-Invalidate. When flush-invalidate is enable (FINV=1), all STF (L1 store-miss) commands generated from the diagnostic PP (L2C_DBG[PPNUM]) will invalidate the specified set (L2C_DBG[SET]) at the index specified in the STF address[15:7]. If a dirty block is detected (D=1), it is written back to memory. The contents of the invalid L2 Cache line is also 'scrubbed' with the STF write data. NOTE: If L2C_CFG[IDXALIAS]=1, the index specified in STF address[15:7] refers to the 'aliased' address. NOTE: An STF command with write data=ZEROES can be generated by SW using the Prefetch instruction with Hint=30d "prepare for Store", followed by a SYNCW. What is seen at the L2C as an STF w/wrdcnt=0 with all of its mask bits clear (indicates zero-fill data). A flush-invalidate will 'force-hit' the L2 cache at [index,set] and invalidate the entry (V=0/D=0/L=0/U=0). If the cache block is dirty, it is also written back to memory. The DuTag state is probed/updated as normal for an STF request. TYPICAL APPLICATIONS: 1) L2 Tag/Data ECC SW Recovery 2) Cache Unlocking NOTE: If the cacheline had been previously LOCKED(L=1), a flush-invalidate operation will explicitly UNLOCK (L=0) the set/index specified. NOTE: The diagnostic PP cores can generate STF commands to the L2 Cache whenever all 128 bytes in a block are written. SW must take this into consideration to avoid 'errant' Flush-Invalidates. */ uint64_t l2d : 1; /**< When enabled (and L2C_DBG[L2T]=0), fill data is returned directly from the L2 Data Store (regardless of hit/miss) when an LDD(L1 load-miss) command is issued from a PP determined by the L2C_DBG[PPNUM] field. The selected set# is determined by the L2C_DBG[SET] field, and the index is determined from the address[15:7] associated with the LDD command. This 'force-hit' will NOT alter the current L2 Tag state OR the DuTag state. */ uint64_t l2t : 1; /**< When enabled, L2 Tag information [V,D,L,U,phys_addr[33:16]] is returned on the data bus starting at +32(and +96) bytes offset from the beginning of cacheline when an LDD (L1 load-miss) command is issued from a PP determined by the L2C_DBG[PPNUM] field. The selected L2 set# is determined by the L2C_DBG[SET] field, and the L2 index is determined from the phys_addr[15:7] associated with the LDD command. This 'L2 force-hit' will NOT alter the current L2 Tag state OR the DuTag state. NOTE: The diagnostic PP should issue a d-stream load to an aligned cacheline+0x20(+0x60) in order to have the return VDLUTAG information (in OW2/OW6) written directly into the proper PP register. The diagnostic PP should also flush it's local L1 cache after use(to ensure data coherency). NOTE: The position of the VDLUTAG data in the destination register is dependent on the endian mode(big/little). NOTE: (For L2C BitMap testing of L2 Data Store OW ECC): If L2D_ERR[ECC_ENA]=0, the OW ECC from the selected half cacheline (see: L2D_ERR[BMHCLSEL] is also conditionally latched into the L2D_FSYN0/1 CSRs if an LDD command is detected from the diagnostic PP(L2C_DBG[PPNUM]). */ #else uint64_t l2t : 1; uint64_t l2d : 1; uint64_t finv : 1; uint64_t set : 3; uint64_t ppnum : 2; uint64_t reserved_8_9 : 2; uint64_t lfb_dmp : 1; uint64_t lfb_enum : 3; uint64_t reserved_14_63 : 50; #endif } cn52xx; struct cvmx_l2c_dbg_cn52xx cn52xxp1; struct cvmx_l2c_dbg_s cn56xx; struct cvmx_l2c_dbg_s cn56xxp1; struct cvmx_l2c_dbg_s cn58xx; struct cvmx_l2c_dbg_s cn58xxp1; } cvmx_l2c_dbg_t; /** * cvmx_l2c_dut * * L2C_DUT = L2C DUTAG Register * * Description: L2C Duplicate Tag State Register * * Notes: * (1) When using the L2T, L2D or FINV Debug probe feature, an LDD command issued by the diagnostic PP * WILL NOT update the DuTags. * (2) L2T, L2D, FINV MUST BE mutually exclusive (only one enabled at a time). * (3) Force Invalidate is intended as a means for SW to invalidate the L2 Cache while also writing back * dirty data to memory to maintain coherency. (A side effect of FINV is that an LDD L2 fill is * launched which fills data into the L2 DS). */ typedef union { uint64_t u64; struct cvmx_l2c_dut_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t dtena : 1; /**< DuTag Diagnostic read enable. When L2C_DUT[DTENA]=1, all LDD(L1 load-miss) commands issued from the diagnostic PP (L2C_DBG[PPNUM]) will capture the DuTag state (V|L1TAG) of the PP#(specified in the LDD address[29:26] into the L2C_DUT CSR register. This allows the diagPP to read ALL DuTags (from any PP). The DuTag Set# to capture is extracted from the LDD address[25:20]. The diagnostic PP would issue the LDD then read the L2C_DUT register (one at a time). This LDD 'L2 force-hit' will NOT alter the current L2 Tag State OR the DuTag state. NOTE: For O9N the DuTag SIZE has doubled (to 16KB) where each DuTag is organized as 2x 64-way entries. The LDD address[7] determines which 1(of-2) internal 64-ways to select. The fill data is returned directly from the L2 Data Store(regardless of hit/miss) when an LDD command is issued from a PP determined by the L2C_DBG[PPNUM] field. The selected L2 Set# is determined by the L2C_DBG[SET] field, and the index is determined from the address[17:7] associated with the LDD command. This 'L2 force-hit' will NOT alter the current L2 Tag state OR the DuTag state. NOTE: In order for the DiagPP to generate an LDD command to the L2C, it must first force an L1 Dcache flush. */ uint64_t reserved_30_30 : 1; uint64_t dt_vld : 1; /**< Duplicate L1 Tag Valid bit latched in for previous LDD(L1 load-miss) command sourced by diagnostic PP. */ uint64_t dt_tag : 29; /**< Duplicate L1 Tag[35:7] latched in for previous LDD(L1 load-miss) command sourced by diagnostic PP. */ #else uint64_t dt_tag : 29; uint64_t dt_vld : 1; uint64_t reserved_30_30 : 1; uint64_t dtena : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_l2c_dut_s cn30xx; struct cvmx_l2c_dut_s cn31xx; struct cvmx_l2c_dut_s cn38xx; struct cvmx_l2c_dut_s cn38xxp2; struct cvmx_l2c_dut_s cn50xx; struct cvmx_l2c_dut_s cn52xx; struct cvmx_l2c_dut_s cn52xxp1; struct cvmx_l2c_dut_s cn56xx; struct cvmx_l2c_dut_s cn56xxp1; struct cvmx_l2c_dut_s cn58xx; struct cvmx_l2c_dut_s cn58xxp1; } cvmx_l2c_dut_t; /** * cvmx_l2c_grpwrr0 * * L2C_GRPWRR0 = L2C PP Weighted Round \#0 Register * * Description: Defines Weighted rounds(32) for Group PLC0,PLC1 * * Notes: * - Starvation of a group 'could' occur, unless SW takes the precaution to ensure that each GROUP * participates in at least 1(of 32) rounds (ie: At least 1 bit(of 32) should be clear). */ typedef union { uint64_t u64; struct cvmx_l2c_grpwrr0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t plc1rmsk : 32; /**< PLC1 Group#1 Weighted Round Mask Each bit represents 1 of 32 rounds for Group \#1's participation. When a 'round' bit is set, Group#1 is 'masked' and DOES NOT participate. When a 'round' bit is clear, Group#1 WILL participate in the arbitration for this round. */ uint64_t plc0rmsk : 32; /**< PLC Group#0 Weighted Round Mask Each bit represents 1 of 32 rounds for Group \#0's participation. When a 'round' bit is set, Group#0 is 'masked' and DOES NOT participate. When a 'round' bit is clear, Group#0 WILL participate in the arbitration for this round. */ #else uint64_t plc0rmsk : 32; uint64_t plc1rmsk : 32; #endif } s; struct cvmx_l2c_grpwrr0_s cn52xx; struct cvmx_l2c_grpwrr0_s cn52xxp1; struct cvmx_l2c_grpwrr0_s cn56xx; struct cvmx_l2c_grpwrr0_s cn56xxp1; } cvmx_l2c_grpwrr0_t; /** * cvmx_l2c_grpwrr1 * * L2C_GRPWRR1 = L2C PP Weighted Round \#1 Register * * Description: Defines Weighted Rounds(32) for Group PLC2,ILC * * Notes: * - Starvation of a group 'could' occur, unless SW takes the precaution to ensure that each GROUP * participates in at least 1(of 32) rounds (ie: At least 1 bit(of 32) should be clear). */ typedef union { uint64_t u64; struct cvmx_l2c_grpwrr1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t ilcrmsk : 32; /**< ILC (IOB) Weighted Round Mask Each bit represents 1 of 32 rounds for IOB participation. When a 'round' bit is set, IOB is 'masked' and DOES NOT participate. When a 'round' bit is clear, IOB WILL participate in the arbitration for this round. */ uint64_t plc2rmsk : 32; /**< PLC Group#2 Weighted Round Mask Each bit represents 1 of 32 rounds for Group \#2's participation. When a 'round' bit is set, Group#2 is 'masked' and DOES NOT participate. When a 'round' bit is clear, Group#2 WILL participate in the arbitration for this round. */ #else uint64_t plc2rmsk : 32; uint64_t ilcrmsk : 32; #endif } s; struct cvmx_l2c_grpwrr1_s cn52xx; struct cvmx_l2c_grpwrr1_s cn52xxp1; struct cvmx_l2c_grpwrr1_s cn56xx; struct cvmx_l2c_grpwrr1_s cn56xxp1; } cvmx_l2c_grpwrr1_t; /** * cvmx_l2c_int_en * * L2C_INT_EN = L2C Global Interrupt Enable Register * * Description: */ typedef union { uint64_t u64; struct cvmx_l2c_int_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t lck2ena : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit NOTE: This is the 'same' bit as L2T_ERR[LCK_INTENA2] */ uint64_t lckena : 1; /**< L2 Tag Lock Error Interrupt Enable bit NOTE: This is the 'same' bit as L2T_ERR[LCK_INTENA] */ uint64_t l2ddeden : 1; /**< L2 Data ECC Double Error Detect(DED) Interrupt Enable bit When set, allows interrupts to be reported on double bit (uncorrectable) errors from the L2 Data Arrays. NOTE: This is the 'same' bit as L2D_ERR[DED_INTENA] */ uint64_t l2dsecen : 1; /**< L2 Data ECC Single Error Correct(SEC) Interrupt Enable bit When set, allows interrupts to be reported on single bit (correctable) errors from the L2 Data Arrays. NOTE: This is the 'same' bit as L2D_ERR[SEC_INTENA] */ uint64_t l2tdeden : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt NOTE: This is the 'same' bit as L2T_ERR[DED_INTENA] */ uint64_t l2tsecen : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt Enable bit. When set, allows interrupts to be reported on single bit (correctable) errors from the L2 Tag Arrays. NOTE: This is the 'same' bit as L2T_ERR[SEC_INTENA] */ uint64_t oob3en : 1; /**< DMA Out of Bounds Interrupt Enable Range#3 */ uint64_t oob2en : 1; /**< DMA Out of Bounds Interrupt Enable Range#2 */ uint64_t oob1en : 1; /**< DMA Out of Bounds Interrupt Enable Range#1 */ #else uint64_t oob1en : 1; uint64_t oob2en : 1; uint64_t oob3en : 1; uint64_t l2tsecen : 1; uint64_t l2tdeden : 1; uint64_t l2dsecen : 1; uint64_t l2ddeden : 1; uint64_t lckena : 1; uint64_t lck2ena : 1; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_l2c_int_en_s cn52xx; struct cvmx_l2c_int_en_s cn52xxp1; struct cvmx_l2c_int_en_s cn56xx; struct cvmx_l2c_int_en_s cn56xxp1; } cvmx_l2c_int_en_t; /** * cvmx_l2c_int_stat * * L2C_INT_STAT = L2C Global Interrupt Status Register * * Description: */ typedef union { uint64_t u64; struct cvmx_l2c_int_stat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t lck2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n could not find an available/unlocked set (for replacement). Most likely, this is a result of SW mixing SET PARTITIONING with ADDRESS LOCKING. If SW allows another PP to LOCKDOWN all SETs available to PP#n, then a Rd/Wr Miss from PP#n will be unable to determine a 'valid' replacement set (since LOCKED addresses should NEVER be replaced). If such an event occurs, the HW will select the smallest available SET(specified by UMSK'x)' as the replacement set, and the address is unlocked. NOTE: This is the 'same' bit as L2T_ERR[LCKERR2] */ uint64_t lck : 1; /**< SW attempted to LOCK DOWN the last available set of the INDEX (which is ignored by HW - but reported to SW). The LDD(L1 load-miss) for the LOCK operation is completed successfully, however the address is NOT locked. NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*] into account. For example, if diagnostic PPx has UMSKx defined to only use SETs [1:0], and SET1 had been previously LOCKED, then an attempt to LOCK the last available SET0 would result in a LCKERR. (This is to ensure that at least 1 SET at each INDEX is not LOCKED for general use by other PPs). NOTE: This is the 'same' bit as L2T_ERR[LCKERR] */ uint64_t l2dded : 1; /**< L2D Double Error detected (DED) NOTE: This is the 'same' bit as L2D_ERR[DED_ERR] */ uint64_t l2dsec : 1; /**< L2D Single Error corrected (SEC) NOTE: This is the 'same' bit as L2D_ERR[SEC_ERR] */ uint64_t l2tded : 1; /**< L2T Double Bit Error detected (DED) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for double bit errors(DBEs). This bit is set if ANY of the 8 sets contains a DBE. DBEs also generated an interrupt(if enabled). NOTE: This is the 'same' bit as L2T_ERR[DED_ERR] */ uint64_t l2tsec : 1; /**< L2T Single Bit Error corrected (SEC) status During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for single bit errors(SBEs). This bit is set if ANY of the 8 sets contains an SBE. SBEs are auto corrected in HW and generate an interrupt(if enabled). NOTE: This is the 'same' bit as L2T_ERR[SEC_ERR] */ uint64_t oob3 : 1; /**< DMA Out of Bounds Interrupt Status Range#3 */ uint64_t oob2 : 1; /**< DMA Out of Bounds Interrupt Status Range#2 */ uint64_t oob1 : 1; /**< DMA Out of Bounds Interrupt Status Range#1 */ #else uint64_t oob1 : 1; uint64_t oob2 : 1; uint64_t oob3 : 1; uint64_t l2tsec : 1; uint64_t l2tded : 1; uint64_t l2dsec : 1; uint64_t l2dded : 1; uint64_t lck : 1; uint64_t lck2 : 1; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_l2c_int_stat_s cn52xx; struct cvmx_l2c_int_stat_s cn52xxp1; struct cvmx_l2c_int_stat_s cn56xx; struct cvmx_l2c_int_stat_s cn56xxp1; } cvmx_l2c_int_stat_t; /** * cvmx_l2c_lckbase * * L2C_LCKBASE = L2C LockDown Base Register * * Description: L2C LockDown Base Register * * Notes: * (1) SW RESTRICTION \#1: SW must manage the L2 Data Store lockdown space such that at least 1 * set per cache line remains in the 'unlocked' (normal) state to allow general caching operations. * If SW violates this restriction, a status bit is set (LCK_ERR) and an interrupt is posted. * [this limits the total lockdown space to 7/8ths of the total L2 data store = 896KB] * (2) IOB initiated LDI commands are ignored (only PP initiated LDI/LDD commands are considered * for lockdown). * (3) To 'unlock' a locked cache line, SW can use the FLUSH-INVAL CSR mechanism (see L2C_DBG[FINV]). * (4) LCK_ENA MUST only be activated when debug modes are disabled (L2C_DBG[L2T], L2C_DBG[L2D], L2C_DBG[FINV]). */ typedef union { uint64_t u64; struct cvmx_l2c_lckbase_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t lck_base : 27; /**< Base Memory block address[33:7]. Specifies the starting address of the lockdown region. */ uint64_t reserved_1_3 : 3; uint64_t lck_ena : 1; /**< L2 Cache Lock Enable When the LCK_ENA=1, all LDI(I-stream Load) or LDD(L1 load-miss) commands issued from the diagnostic PP (specified by the L2C_DBG[PPNUM]), which fall within a predefined lockdown address range (specified by: [lck_base:lck_base+lck_offset]) are LOCKED in the L2 cache. The LOCKED state is denoted using an explicit L2 Tag bit (L=1). If the LOCK request L2-Hits (on ANY SET), then data is returned from the L2 and the hit set is updated to the LOCKED state. NOTE: If the Hit Set# is outside the available sets for a given PP (see UMSK'x'), the the LOCK bit is still SET. If the programmer's intent is to explicitly LOCK addresses into 'available' sets, care must be taken to flush-invalidate the cache first (to avoid such situations). Not following this procedure can lead to LCKERR2 interrupts. If the LOCK request L2-Misses, a replacment set is chosen(from the available sets (UMSK'x'). If the replacement set contains a dirty-victim it is written back to memory. Memory read data is then written into the replacement set, and the replacment SET is updated to the LOCKED state(L=1). NOTE: SETs that contain LOCKED addresses are excluded from the replacement set selection algorithm. NOTE: The LDD command will allocate the DuTag as normal. NOTE: If L2C_CFG[IDXALIAS]=1, the address is 'aliased' first before being checked against the lockdown address range. To ensure an 'aliased' address is properly locked, it is recommmended that SW preload the 'aliased' locked adddress into the L2C_LCKBASE[LCK_BASE] register (while keeping L2C_LCKOFF[LCK_OFFSET]=0). NOTE: The OCTEON(N3) implementation only supports 16GB(MAX) of physical memory. Therefore, only byte address[33:0] are used (ie: address[35:34] are ignored). */ #else uint64_t lck_ena : 1; uint64_t reserved_1_3 : 3; uint64_t lck_base : 27; uint64_t reserved_31_63 : 33; #endif } s; struct cvmx_l2c_lckbase_s cn30xx; struct cvmx_l2c_lckbase_s cn31xx; struct cvmx_l2c_lckbase_s cn38xx; struct cvmx_l2c_lckbase_s cn38xxp2; struct cvmx_l2c_lckbase_s cn50xx; struct cvmx_l2c_lckbase_s cn52xx; struct cvmx_l2c_lckbase_s cn52xxp1; struct cvmx_l2c_lckbase_s cn56xx; struct cvmx_l2c_lckbase_s cn56xxp1; struct cvmx_l2c_lckbase_s cn58xx; struct cvmx_l2c_lckbase_s cn58xxp1; } cvmx_l2c_lckbase_t; /** * cvmx_l2c_lckoff * * L2C_LCKOFF = L2C LockDown OFFSET Register * * Description: L2C LockDown OFFSET Register * * Notes: * (1) The generation of the end lockdown block address will 'wrap'. * (2) The minimum granularity for lockdown is 1 cache line (= 128B block) */ typedef union { uint64_t u64; struct cvmx_l2c_lckoff_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t lck_offset : 10; /**< LockDown block Offset. Used in determining the ending block address of the lockdown region: End Lockdown block Address[33:7] = LCK_BASE[33:7]+LCK_OFFSET[9:0] */ #else uint64_t lck_offset : 10; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_l2c_lckoff_s cn30xx; struct cvmx_l2c_lckoff_s cn31xx; struct cvmx_l2c_lckoff_s cn38xx; struct cvmx_l2c_lckoff_s cn38xxp2; struct cvmx_l2c_lckoff_s cn50xx; struct cvmx_l2c_lckoff_s cn52xx; struct cvmx_l2c_lckoff_s cn52xxp1; struct cvmx_l2c_lckoff_s cn56xx; struct cvmx_l2c_lckoff_s cn56xxp1; struct cvmx_l2c_lckoff_s cn58xx; struct cvmx_l2c_lckoff_s cn58xxp1; } cvmx_l2c_lckoff_t; /** * cvmx_l2c_lfb0 * * L2C_LFB0 = L2C LFB DEBUG 0 Register * * Description: L2C LFB Contents (Status Bits) */ typedef union { uint64_t u64; struct cvmx_l2c_lfb0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t stcpnd : 1; /**< LFB STC Pending Status */ uint64_t stpnd : 1; /**< LFB ST* Pending Status */ uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ uint64_t vam : 1; /**< Valid Full Address Match Status */ uint64_t inxt : 4; /**< Next LFB Pointer(invalid if ITL=1) */ uint64_t itl : 1; /**< LFB Tail of List Indicator */ uint64_t ihd : 1; /**< LFB Head of List Indicator */ uint64_t set : 3; /**< SET# used for DS-OP (hit=hset/miss=rset) */ uint64_t vabnum : 4; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ uint64_t sid : 9; /**< LFB Source ID */ uint64_t cmd : 4; /**< LFB Command */ uint64_t vld : 1; /**< LFB Valid */ #else uint64_t vld : 1; uint64_t cmd : 4; uint64_t sid : 9; uint64_t vabnum : 4; uint64_t set : 3; uint64_t ihd : 1; uint64_t itl : 1; uint64_t inxt : 4; uint64_t vam : 1; uint64_t stcfl : 1; uint64_t stinv : 1; uint64_t stpnd : 1; uint64_t stcpnd : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_l2c_lfb0_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t stcpnd : 1; /**< LFB STC Pending Status */ uint64_t stpnd : 1; /**< LFB ST* Pending Status */ uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ uint64_t vam : 1; /**< Valid Full Address Match Status */ uint64_t reserved_25_26 : 2; uint64_t inxt : 2; /**< Next LFB Pointer(invalid if ITL=1) */ uint64_t itl : 1; /**< LFB Tail of List Indicator */ uint64_t ihd : 1; /**< LFB Head of List Indicator */ uint64_t reserved_20_20 : 1; uint64_t set : 2; /**< SET# used for DS-OP (hit=hset/miss=rset) */ uint64_t reserved_16_17 : 2; uint64_t vabnum : 2; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ uint64_t sid : 9; /**< LFB Source ID */ uint64_t cmd : 4; /**< LFB Command */ uint64_t vld : 1; /**< LFB Valid */ #else uint64_t vld : 1; uint64_t cmd : 4; uint64_t sid : 9; uint64_t vabnum : 2; uint64_t reserved_16_17 : 2; uint64_t set : 2; uint64_t reserved_20_20 : 1; uint64_t ihd : 1; uint64_t itl : 1; uint64_t inxt : 2; uint64_t reserved_25_26 : 2; uint64_t vam : 1; uint64_t stcfl : 1; uint64_t stinv : 1; uint64_t stpnd : 1; uint64_t stcpnd : 1; uint64_t reserved_32_63 : 32; #endif } cn30xx; struct cvmx_l2c_lfb0_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t stcpnd : 1; /**< LFB STC Pending Status */ uint64_t stpnd : 1; /**< LFB ST* Pending Status */ uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ uint64_t vam : 1; /**< Valid Full Address Match Status */ uint64_t reserved_26_26 : 1; uint64_t inxt : 3; /**< Next LFB Pointer(invalid if ITL=1) */ uint64_t itl : 1; /**< LFB Tail of List Indicator */ uint64_t ihd : 1; /**< LFB Head of List Indicator */ uint64_t reserved_20_20 : 1; uint64_t set : 2; /**< SET# used for DS-OP (hit=hset/miss=rset) */ uint64_t reserved_17_17 : 1; uint64_t vabnum : 3; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ uint64_t sid : 9; /**< LFB Source ID */ uint64_t cmd : 4; /**< LFB Command */ uint64_t vld : 1; /**< LFB Valid */ #else uint64_t vld : 1; uint64_t cmd : 4; uint64_t sid : 9; uint64_t vabnum : 3; uint64_t reserved_17_17 : 1; uint64_t set : 2; uint64_t reserved_20_20 : 1; uint64_t ihd : 1; uint64_t itl : 1; uint64_t inxt : 3; uint64_t reserved_26_26 : 1; uint64_t vam : 1; uint64_t stcfl : 1; uint64_t stinv : 1; uint64_t stpnd : 1; uint64_t stcpnd : 1; uint64_t reserved_32_63 : 32; #endif } cn31xx; struct cvmx_l2c_lfb0_s cn38xx; struct cvmx_l2c_lfb0_s cn38xxp2; struct cvmx_l2c_lfb0_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t stcpnd : 1; /**< LFB STC Pending Status */ uint64_t stpnd : 1; /**< LFB ST* Pending Status */ uint64_t stinv : 1; /**< LFB ST* Invalidate Status */ uint64_t stcfl : 1; /**< LFB STC=FAIL Status */ uint64_t vam : 1; /**< Valid Full Address Match Status */ uint64_t reserved_26_26 : 1; uint64_t inxt : 3; /**< Next LFB Pointer(invalid if ITL=1) */ uint64_t itl : 1; /**< LFB Tail of List Indicator */ uint64_t ihd : 1; /**< LFB Head of List Indicator */ uint64_t set : 3; /**< SET# used for DS-OP (hit=hset/miss=rset) */ uint64_t reserved_17_17 : 1; uint64_t vabnum : 3; /**< VAB# used for LMC Miss Launch(valid only if VAM=1) */ uint64_t sid : 9; /**< LFB Source ID */ uint64_t cmd : 4; /**< LFB Command */ uint64_t vld : 1; /**< LFB Valid */ #else uint64_t vld : 1; uint64_t cmd : 4; uint64_t sid : 9; uint64_t vabnum : 3; uint64_t reserved_17_17 : 1; uint64_t set : 3; uint64_t ihd : 1; uint64_t itl : 1; uint64_t inxt : 3; uint64_t reserved_26_26 : 1; uint64_t vam : 1; uint64_t stcfl : 1; uint64_t stinv : 1; uint64_t stpnd : 1; uint64_t stcpnd : 1; uint64_t reserved_32_63 : 32; #endif } cn50xx; struct cvmx_l2c_lfb0_cn50xx cn52xx; struct cvmx_l2c_lfb0_cn50xx cn52xxp1; struct cvmx_l2c_lfb0_s cn56xx; struct cvmx_l2c_lfb0_s cn56xxp1; struct cvmx_l2c_lfb0_s cn58xx; struct cvmx_l2c_lfb0_s cn58xxp1; } cvmx_l2c_lfb0_t; /** * cvmx_l2c_lfb1 * * L2C_LFB1 = L2C LFB DEBUG 1 Register * * Description: L2C LFB Contents (Wait Bits) */ typedef union { uint64_t u64; struct cvmx_l2c_lfb1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t dsgoing : 1; /**< LFB DS Going (in flight) */ uint64_t bid : 2; /**< LFB DS Bid# */ uint64_t wtrsp : 1; /**< LFB Waiting for RSC Response [FILL,STRSP] completion */ uint64_t wtdw : 1; /**< LFB Waiting for DS-WR completion */ uint64_t wtdq : 1; /**< LFB Waiting for LFB-DQ */ uint64_t wtwhp : 1; /**< LFB Waiting for Write-Hit Partial L2 DS-WR completion */ uint64_t wtwhf : 1; /**< LFB Waiting for Write-Hit Full L2 DS-WR completion */ uint64_t wtwrm : 1; /**< LFB Waiting for Write-Miss L2 DS-WR completion */ uint64_t wtstm : 1; /**< LFB Waiting for Write-Miss L2 DS-WR completion */ uint64_t wtrda : 1; /**< LFB Waiting for Read-Miss L2 DS-WR completion */ uint64_t wtstdt : 1; /**< LFB Waiting for all ST write Data to arrive on XMD bus */ uint64_t wtstrsp : 1; /**< LFB Waiting for ST RSC/RSD to be issued on RSP (with invalidates) */ uint64_t wtstrsc : 1; /**< LFB Waiting for ST RSC-Only to be issued on RSP (no-invalidates) */ uint64_t wtvtm : 1; /**< LFB Waiting for Victim Read L2 DS-RD completion */ uint64_t wtmfl : 1; /**< LFB Waiting for Memory Fill completion to MRB */ uint64_t prbrty : 1; /**< Probe-Retry Detected - waiting for probe completion */ uint64_t wtprb : 1; /**< LFB Waiting for Probe */ uint64_t vld : 1; /**< LFB Valid */ #else uint64_t vld : 1; uint64_t wtprb : 1; uint64_t prbrty : 1; uint64_t wtmfl : 1; uint64_t wtvtm : 1; uint64_t wtstrsc : 1; uint64_t wtstrsp : 1; uint64_t wtstdt : 1; uint64_t wtrda : 1; uint64_t wtstm : 1; uint64_t wtwrm : 1; uint64_t wtwhf : 1; uint64_t wtwhp : 1; uint64_t wtdq : 1; uint64_t wtdw : 1; uint64_t wtrsp : 1; uint64_t bid : 2; uint64_t dsgoing : 1; uint64_t reserved_19_63 : 45; #endif } s; struct cvmx_l2c_lfb1_s cn30xx; struct cvmx_l2c_lfb1_s cn31xx; struct cvmx_l2c_lfb1_s cn38xx; struct cvmx_l2c_lfb1_s cn38xxp2; struct cvmx_l2c_lfb1_s cn50xx; struct cvmx_l2c_lfb1_s cn52xx; struct cvmx_l2c_lfb1_s cn52xxp1; struct cvmx_l2c_lfb1_s cn56xx; struct cvmx_l2c_lfb1_s cn56xxp1; struct cvmx_l2c_lfb1_s cn58xx; struct cvmx_l2c_lfb1_s cn58xxp1; } cvmx_l2c_lfb1_t; /** * cvmx_l2c_lfb2 * * L2C_LFB2 = L2C LFB DEBUG 2 Register * * Description: L2C LFB Contents Tag/Index */ typedef union { uint64_t u64; struct cvmx_l2c_lfb2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_l2c_lfb2_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_27_63 : 37; uint64_t lfb_tag : 19; /**< LFB TAG[33:15] */ uint64_t lfb_idx : 8; /**< LFB IDX[14:7] */ #else uint64_t lfb_idx : 8; uint64_t lfb_tag : 19; uint64_t reserved_27_63 : 37; #endif } cn30xx; struct cvmx_l2c_lfb2_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_27_63 : 37; uint64_t lfb_tag : 17; /**< LFB TAG[33:16] */ uint64_t lfb_idx : 10; /**< LFB IDX[15:7] */ #else uint64_t lfb_idx : 10; uint64_t lfb_tag : 17; uint64_t reserved_27_63 : 37; #endif } cn31xx; struct cvmx_l2c_lfb2_cn31xx cn38xx; struct cvmx_l2c_lfb2_cn31xx cn38xxp2; struct cvmx_l2c_lfb2_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_27_63 : 37; uint64_t lfb_tag : 20; /**< LFB TAG[33:14] */ uint64_t lfb_idx : 7; /**< LFB IDX[13:7] */ #else uint64_t lfb_idx : 7; uint64_t lfb_tag : 20; uint64_t reserved_27_63 : 37; #endif } cn50xx; struct cvmx_l2c_lfb2_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_27_63 : 37; uint64_t lfb_tag : 18; /**< LFB TAG[33:16] */ uint64_t lfb_idx : 9; /**< LFB IDX[15:7] */ #else uint64_t lfb_idx : 9; uint64_t lfb_tag : 18; uint64_t reserved_27_63 : 37; #endif } cn52xx; struct cvmx_l2c_lfb2_cn52xx cn52xxp1; struct cvmx_l2c_lfb2_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_27_63 : 37; uint64_t lfb_tag : 16; /**< LFB TAG[33:18] */ uint64_t lfb_idx : 11; /**< LFB IDX[17:7] */ #else uint64_t lfb_idx : 11; uint64_t lfb_tag : 16; uint64_t reserved_27_63 : 37; #endif } cn56xx; struct cvmx_l2c_lfb2_cn56xx cn56xxp1; struct cvmx_l2c_lfb2_cn56xx cn58xx; struct cvmx_l2c_lfb2_cn56xx cn58xxp1; } cvmx_l2c_lfb2_t; /** * cvmx_l2c_lfb3 * * L2C_LFB3 = L2C LFB DEBUG 3 Register * * Description: LFB High Water Mark Register */ typedef union { uint64_t u64; struct cvmx_l2c_lfb3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable When clear, all STP/C(store partials) will take 2 cycles to complete (power-on default). When set, all STP/C(store partials) will take 4 cycles to complete. NOTE: It is recommended to keep this bit ALWAYS ZERO. *** NOTE: PASS2 Addition */ uint64_t lfb_hwm : 4; /**< LFB High Water Mark Determines \#of LFB Entries in use before backpressure is asserted. HWM=0: 1 LFB Entry available - ... HWM=15: 16 LFB Entries available *** NOTE: PASS2 Addition */ #else uint64_t lfb_hwm : 4; uint64_t stpartdis : 1; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_l2c_lfb3_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable When clear, all STP/C(store partials) will take 2 cycles to complete (power-on default). When set, all STP/C(store partials) will take 4 cycles to complete. NOTE: It is recommended to keep this bit ALWAYS ZERO. */ uint64_t reserved_2_3 : 2; uint64_t lfb_hwm : 2; /**< LFB High Water Mark Determines \#of LFB Entries in use before backpressure is asserted. HWM=0: 1 LFB Entry available - ... HWM=3: 4 LFB Entries available */ #else uint64_t lfb_hwm : 2; uint64_t reserved_2_3 : 2; uint64_t stpartdis : 1; uint64_t reserved_5_63 : 59; #endif } cn30xx; struct cvmx_l2c_lfb3_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t stpartdis : 1; /**< STP/C Performance Enhancement Disable When clear, all STP/C(store partials) will take 2 cycles to complete (power-on default). When set, all STP/C(store partials) will take 4 cycles to complete. NOTE: It is recommended to keep this bit ALWAYS ZERO. */ uint64_t reserved_3_3 : 1; uint64_t lfb_hwm : 3; /**< LFB High Water Mark Determines \#of LFB Entries in use before backpressure is asserted. HWM=0: 1 LFB Entry available - ... HWM=7: 8 LFB Entries available */ #else uint64_t lfb_hwm : 3; uint64_t reserved_3_3 : 1; uint64_t stpartdis : 1; uint64_t reserved_5_63 : 59; #endif } cn31xx; struct cvmx_l2c_lfb3_s cn38xx; struct cvmx_l2c_lfb3_s cn38xxp2; struct cvmx_l2c_lfb3_cn31xx cn50xx; struct cvmx_l2c_lfb3_cn31xx cn52xx; struct cvmx_l2c_lfb3_cn31xx cn52xxp1; struct cvmx_l2c_lfb3_s cn56xx; struct cvmx_l2c_lfb3_s cn56xxp1; struct cvmx_l2c_lfb3_s cn58xx; struct cvmx_l2c_lfb3_s cn58xxp1; } cvmx_l2c_lfb3_t; /** * cvmx_l2c_oob * * L2C_OOB = L2C Out of Bounds Global Enables * * Description: Defines DMA "Out of Bounds" global enables. */ typedef union { uint64_t u64; struct cvmx_l2c_oob_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t dwbena : 1; /**< DMA Out of Bounds Range Checker for DMA DWB commands (Don't WriteBack). When enabled, any DMA DWB commands which hit 1-of-3 out of bounds regions will be logged into L2C_INT_STAT[OOB*] CSRs and the DMA store WILL NOT occur. If the corresponding L2C_INT_EN[OOB*] is enabled, an interrupt will also be reported. */ uint64_t stena : 1; /**< DMA Out of Bounds Range Checker for DMA store commands (STF/P/T). When enabled, any DMA store commands (STF/P/T) which hit 1-of-3 out of bounds regions will be logged into L2C_INT_STAT[OOB*] CSRs and the DMA store WILL NOT occur. If the corresponding L2C_INT_EN[OOB*] is enabled, an interrupt will also be reported. */ #else uint64_t stena : 1; uint64_t dwbena : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_l2c_oob_s cn52xx; struct cvmx_l2c_oob_s cn52xxp1; struct cvmx_l2c_oob_s cn56xx; struct cvmx_l2c_oob_s cn56xxp1; } cvmx_l2c_oob_t; /** * cvmx_l2c_oob1 * * L2C_OOB1 = L2C Out of Bounds Range Checker * * Description: Defines DMA "Out of Bounds" region \#1. If a DMA initiated write transaction generates an address * within the specified region, the write is 'ignored' and an interrupt is generated to alert software. */ typedef union { uint64_t u64; struct cvmx_l2c_oob1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address When L2C_INT_STAT[OOB1]=1, this field indicates the DMA cacheline address. (addr[33:7] = full cacheline address captured) NOTE: FADR is locked down until L2C_INT_STAT[OOB1] is cleared. */ uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command When L2C_INT_STAT[OOB1]=1, this field indicates the type of DMA command. - 0: ST* (STF/P/T) - 1: DWB (Don't WriteBack) NOTE: FSRC is locked down until L2C_INT_STAT[OOB1] is cleared. */ uint64_t reserved_34_35 : 2; uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address (1MB granularity) */ uint64_t reserved_14_19 : 6; uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size (1MB granularity) Example: 0: 0MB / 1: 1MB The range check is for: (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20) SW NOTE: SADR+SIZE could be setup to potentially wrap the 34bit ending bounds address. */ #else uint64_t size : 14; uint64_t reserved_14_19 : 6; uint64_t sadr : 14; uint64_t reserved_34_35 : 2; uint64_t fsrc : 1; uint64_t fadr : 27; #endif } s; struct cvmx_l2c_oob1_s cn52xx; struct cvmx_l2c_oob1_s cn52xxp1; struct cvmx_l2c_oob1_s cn56xx; struct cvmx_l2c_oob1_s cn56xxp1; } cvmx_l2c_oob1_t; /** * cvmx_l2c_oob2 * * L2C_OOB2 = L2C Out of Bounds Range Checker * * Description: Defines DMA "Out of Bounds" region \#2. If a DMA initiated write transaction generates an address * within the specified region, the write is 'ignored' and an interrupt is generated to alert software. */ typedef union { uint64_t u64; struct cvmx_l2c_oob2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address When L2C_INT_STAT[OOB2]=1, this field indicates the DMA cacheline address. (addr[33:7] = full cacheline address captured) NOTE: FADR is locked down until L2C_INT_STAT[OOB2] is cleared. */ uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command When L2C_INT_STAT[OOB2]=1, this field indicates the type of DMA command. - 0: ST* (STF/P/T) - 1: DWB (Don't WriteBack) NOTE: FSRC is locked down until L2C_INT_STAT[OOB2] is cleared. */ uint64_t reserved_34_35 : 2; uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address (1MB granularity) */ uint64_t reserved_14_19 : 6; uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size (1MB granularity) Example: 0: 0MB / 1: 1MB The range check is for: (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20) SW NOTE: SADR+SIZE could be setup to potentially wrap the 34bit ending bounds address. */ #else uint64_t size : 14; uint64_t reserved_14_19 : 6; uint64_t sadr : 14; uint64_t reserved_34_35 : 2; uint64_t fsrc : 1; uint64_t fadr : 27; #endif } s; struct cvmx_l2c_oob2_s cn52xx; struct cvmx_l2c_oob2_s cn52xxp1; struct cvmx_l2c_oob2_s cn56xx; struct cvmx_l2c_oob2_s cn56xxp1; } cvmx_l2c_oob2_t; /** * cvmx_l2c_oob3 * * L2C_OOB3 = L2C Out of Bounds Range Checker * * Description: Defines DMA "Out of Bounds" region \#3. If a DMA initiated write transaction generates an address * within the specified region, the write is 'ignored' and an interrupt is generated to alert software. */ typedef union { uint64_t u64; struct cvmx_l2c_oob3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t fadr : 27; /**< DMA initated Memory Range Checker Failing Address When L2C_INT_STAT[OOB3]=1, this field indicates the DMA cacheline address. (addr[33:7] = full cacheline address captured) NOTE: FADR is locked down until L2C_INT_STAT[00B3] is cleared. */ uint64_t fsrc : 1; /**< DMA Out of Bounds Failing Source Command When L2C_INT_STAT[OOB3]=1, this field indicates the type of DMA command. - 0: ST* (STF/P/T) - 1: DWB (Don't WriteBack) NOTE: FSRC is locked down until L2C_INT_STAT[00B3] is cleared. */ uint64_t reserved_34_35 : 2; uint64_t sadr : 14; /**< DMA initated Memory Range Checker Starting Address (1MB granularity) */ uint64_t reserved_14_19 : 6; uint64_t size : 14; /**< DMA Out of Bounds Range Checker Size (1MB granularity) Example: 0: 0MB / 1: 1MB The range check is for: (SADR<<20) <= addr[33:0] < (((SADR+SIZE) & 0x3FFF)<<20) SW NOTE: SADR+SIZE could be setup to potentially wrap the 34bit ending bounds address. */ #else uint64_t size : 14; uint64_t reserved_14_19 : 6; uint64_t sadr : 14; uint64_t reserved_34_35 : 2; uint64_t fsrc : 1; uint64_t fadr : 27; #endif } s; struct cvmx_l2c_oob3_s cn52xx; struct cvmx_l2c_oob3_s cn52xxp1; struct cvmx_l2c_oob3_s cn56xx; struct cvmx_l2c_oob3_s cn56xxp1; } cvmx_l2c_oob3_t; /** * cvmx_l2c_pfc# * * L2C_PFC0 = L2 Performance Counter \#0 * * Description: */ typedef union { uint64_t u64; struct cvmx_l2c_pfcx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t pfcnt0 : 36; /**< Performance Counter \#0 */ #else uint64_t pfcnt0 : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_l2c_pfcx_s cn30xx; struct cvmx_l2c_pfcx_s cn31xx; struct cvmx_l2c_pfcx_s cn38xx; struct cvmx_l2c_pfcx_s cn38xxp2; struct cvmx_l2c_pfcx_s cn50xx; struct cvmx_l2c_pfcx_s cn52xx; struct cvmx_l2c_pfcx_s cn52xxp1; struct cvmx_l2c_pfcx_s cn56xx; struct cvmx_l2c_pfcx_s cn56xxp1; struct cvmx_l2c_pfcx_s cn58xx; struct cvmx_l2c_pfcx_s cn58xxp1; } cvmx_l2c_pfcx_t; /** * cvmx_l2c_pfctl * * L2C_PFCTL = L2 Performance Counter Control Register * * Description: Controls the actions of the 4 Performance Counters * * Notes: * - There are four 36b performance counter registers which can simultaneously count events. * Each Counter's event is programmably selected via the corresponding CNTxSEL field: * CNTxSEL[5:0] Event * -----------------+----------------------- * 0 | Cycles * 1 | L2 Instruction Miss * 2 | L2 Instruction Hit * 3 | L2 Data Miss * 4 | L2 Data Hit * 5 | L2 Miss (I/D) * 6 | L2 Hit (I/D) * 7 | L2 Victim Buffer Hit (Retry Probe) * 8 | LFB-NQ Index Conflict * 9 | L2 Tag Probe (issued - could be VB-Retried) * 10 | L2 Tag Update (completed - note: some CMD types do not update) * 11 | L2 Tag Probe Completed (beyond VB-RTY window) * 12 | L2 Tag Dirty Victim * 13 | L2 Data Store NOP * 14 | L2 Data Store READ * 15 | L2 Data Store WRITE * 16 | Memory Fill Data valid (1 strobe/32B) * 17 | Memory Write Request * 18 | Memory Read Request * 19 | Memory Write Data valid (1 strobe/32B) * 20 | XMC NOP (XMC Bus Idle) * 21 | XMC LDT (Load-Through Request) * 22 | XMC LDI (L2 Load I-Stream Request) * 23 | XMC LDD (L2 Load D-stream Request) * 24 | XMC STF (L2 Store Full cacheline Request) * 25 | XMC STT (L2 Store Through Request) * 26 | XMC STP (L2 Store Partial Request) * 27 | XMC STC (L2 Store Conditional Request) * 28 | XMC DWB (L2 Don't WriteBack Request) * 29 | XMC PL2 (L2 Prefetch Request) * 30 | XMC PSL1 (L1 Prefetch Request) * 31 | XMC IOBLD * 32 | XMC IOBST * 33 | XMC IOBDMA * 34 | XMC IOBRSP * 35 | XMD Bus valid (all) * 36 | XMD Bus valid (DST=L2C) Memory Data * 37 | XMD Bus valid (DST=IOB) REFL Data * 38 | XMD Bus valid (DST=PP) IOBRSP Data * 39 | RSC NOP * 40 | RSC STDN * 41 | RSC FILL * 42 | RSC REFL * 43 | RSC STIN * 44 | RSC SCIN * 45 | RSC SCFL * 46 | RSC SCDN * 47 | RSD Data Valid * 48 | RSD Data Valid (FILL) * 49 | RSD Data Valid (STRSP) * 50 | RSD Data Valid (REFL) * 51 | LRF-REQ (LFB-NQ) * 52 | DT RD-ALLOC (LDD/PSL1 Commands) * 53 | DT WR-INVAL (ST* Commands) */ typedef union { uint64_t u64; struct cvmx_l2c_pfctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t cnt3rdclr : 1; /**< Performance Counter 3 Read Clear When set, all CSR reads of the L2C_PFC3 register will auto-clear the counter. This allows SW to maintain 'cumulative' counters in SW. NOTE: If the CSR read occurs in the same cycle as the 'event' to be counted, the counter will properly reflect the event. *** NOTE: PASS2 Addition */ uint64_t cnt2rdclr : 1; /**< Performance Counter 2 Read Clear When set, all CSR reads of the L2C_PFC2 register will auto-clear the counter. This allows SW to maintain 'cumulative' counters in SW. NOTE: If the CSR read occurs in the same cycle as the 'event' to be counted, the counter will properly reflect the event. *** NOTE: PASS2 Addition */ uint64_t cnt1rdclr : 1; /**< Performance Counter 1 Read Clear When set, all CSR reads of the L2C_PFC1 register will auto-clear the counter. This allows SW to maintain 'cumulative' counters in SW. NOTE: If the CSR read occurs in the same cycle as the 'event' to be counted, the counter will properly reflect the event. *** NOTE: PASS2 Addition */ uint64_t cnt0rdclr : 1; /**< Performance Counter 0 Read Clear When set, all CSR reads of the L2C_PFC0 register will 'auto-clear' the counter. This allows SW to maintain accurate 'cumulative' counters. NOTE: If the CSR read occurs in the same cycle as the 'event' to be counted, the counter will properly reflect the event. *** NOTE: PASS2 Addition */ uint64_t cnt3ena : 1; /**< Performance Counter 3 Enable When this bit is set, the performance counter is enabled. */ uint64_t cnt3clr : 1; /**< Performance Counter 3 Clear When the CSR write occurs, if this bit is set, the performance counter is cleared. Otherwise, it will resume counting from its current value. */ uint64_t cnt3sel : 6; /**< Performance Counter 3 Event Selector (see list of selectable events to count in NOTES) */ uint64_t cnt2ena : 1; /**< Performance Counter 2 Enable When this bit is set, the performance counter is enabled. */ uint64_t cnt2clr : 1; /**< Performance Counter 2 Clear When the CSR write occurs, if this bit is set, the performance counter is cleared. Otherwise, it will resume counting from its current value. */ uint64_t cnt2sel : 6; /**< Performance Counter 2 Event Selector (see list of selectable events to count in NOTES) */ uint64_t cnt1ena : 1; /**< Performance Counter 1 Enable When this bit is set, the performance counter is enabled. */ uint64_t cnt1clr : 1; /**< Performance Counter 1 Clear When the CSR write occurs, if this bit is set, the performance counter is cleared. Otherwise, it will resume counting from its current value. */ uint64_t cnt1sel : 6; /**< Performance Counter 1 Event Selector (see list of selectable events to count in NOTES) */ uint64_t cnt0ena : 1; /**< Performance Counter 0 Enable When this bit is set, the performance counter is enabled. */ uint64_t cnt0clr : 1; /**< Performance Counter 0 Clear When the CSR write occurs, if this bit is set, the performance counter is cleared. Otherwise, it will resume counting from its current value. */ uint64_t cnt0sel : 6; /**< Performance Counter 0 Event Selector (see list of selectable events to count in NOTES) */ #else uint64_t cnt0sel : 6; uint64_t cnt0clr : 1; uint64_t cnt0ena : 1; uint64_t cnt1sel : 6; uint64_t cnt1clr : 1; uint64_t cnt1ena : 1; uint64_t cnt2sel : 6; uint64_t cnt2clr : 1; uint64_t cnt2ena : 1; uint64_t cnt3sel : 6; uint64_t cnt3clr : 1; uint64_t cnt3ena : 1; uint64_t cnt0rdclr : 1; uint64_t cnt1rdclr : 1; uint64_t cnt2rdclr : 1; uint64_t cnt3rdclr : 1; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_l2c_pfctl_s cn30xx; struct cvmx_l2c_pfctl_s cn31xx; struct cvmx_l2c_pfctl_s cn38xx; struct cvmx_l2c_pfctl_s cn38xxp2; struct cvmx_l2c_pfctl_s cn50xx; struct cvmx_l2c_pfctl_s cn52xx; struct cvmx_l2c_pfctl_s cn52xxp1; struct cvmx_l2c_pfctl_s cn56xx; struct cvmx_l2c_pfctl_s cn56xxp1; struct cvmx_l2c_pfctl_s cn58xx; struct cvmx_l2c_pfctl_s cn58xxp1; } cvmx_l2c_pfctl_t; /** * cvmx_l2c_ppgrp * * L2C_PPGRP = L2C PP Group Number * * Description: Defines the PP(Packet Processor) PLC Group \# (0,1,2) */ typedef union { uint64_t u64; struct cvmx_l2c_ppgrp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t pp11grp : 2; /**< PP11 PLC Group# (0,1,2) */ uint64_t pp10grp : 2; /**< PP10 PLC Group# (0,1,2) */ uint64_t pp9grp : 2; /**< PP9 PLC Group# (0,1,2) */ uint64_t pp8grp : 2; /**< PP8 PLC Group# (0,1,2) */ uint64_t pp7grp : 2; /**< PP7 PLC Group# (0,1,2) */ uint64_t pp6grp : 2; /**< PP6 PLC Group# (0,1,2) */ uint64_t pp5grp : 2; /**< PP5 PLC Group# (0,1,2) */ uint64_t pp4grp : 2; /**< PP4 PLC Group# (0,1,2) */ uint64_t pp3grp : 2; /**< PP3 PLC Group# (0,1,2) */ uint64_t pp2grp : 2; /**< PP2 PLC Group# (0,1,2) */ uint64_t pp1grp : 2; /**< PP1 PLC Group# (0,1,2) */ uint64_t pp0grp : 2; /**< PP0 PLC Group# (0,1,2) */ #else uint64_t pp0grp : 2; uint64_t pp1grp : 2; uint64_t pp2grp : 2; uint64_t pp3grp : 2; uint64_t pp4grp : 2; uint64_t pp5grp : 2; uint64_t pp6grp : 2; uint64_t pp7grp : 2; uint64_t pp8grp : 2; uint64_t pp9grp : 2; uint64_t pp10grp : 2; uint64_t pp11grp : 2; uint64_t reserved_24_63 : 40; #endif } s; struct cvmx_l2c_ppgrp_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t pp3grp : 2; /**< PP3 PLC Group# (0,1,2) */ uint64_t pp2grp : 2; /**< PP2 PLC Group# (0,1,2) */ uint64_t pp1grp : 2; /**< PP1 PLC Group# (0,1,2) */ uint64_t pp0grp : 2; /**< PP0 PLC Group# (0,1,2) */ #else uint64_t pp0grp : 2; uint64_t pp1grp : 2; uint64_t pp2grp : 2; uint64_t pp3grp : 2; uint64_t reserved_8_63 : 56; #endif } cn52xx; struct cvmx_l2c_ppgrp_cn52xx cn52xxp1; struct cvmx_l2c_ppgrp_s cn56xx; struct cvmx_l2c_ppgrp_s cn56xxp1; } cvmx_l2c_ppgrp_t; /** * cvmx_l2c_spar0 * * L2C_SPAR0 = L2 Set Partitioning Register (PP0-3) * * Description: L2 Set Partitioning Register * * Notes: * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that * set for replacement. * - There MUST ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation * - NOTES: When L2C FUSE[136] is blown(CRIP_256K), then SETS#7-4 are SET in all UMSK'x' registers * When L2C FUSE[137] is blown(CRIP_128K), then SETS#7-2 are SET in all UMSK'x' registers */ typedef union { uint64_t u64; struct cvmx_l2c_spar0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t umsk3 : 8; /**< PP[3] L2 'DO NOT USE' set partition mask */ uint64_t umsk2 : 8; /**< PP[2] L2 'DO NOT USE' set partition mask */ uint64_t umsk1 : 8; /**< PP[1] L2 'DO NOT USE' set partition mask */ uint64_t umsk0 : 8; /**< PP[0] L2 'DO NOT USE' set partition mask */ #else uint64_t umsk0 : 8; uint64_t umsk1 : 8; uint64_t umsk2 : 8; uint64_t umsk3 : 8; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_l2c_spar0_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t umsk0 : 4; /**< PP[0] L2 'DO NOT USE' set partition mask */ #else uint64_t umsk0 : 4; uint64_t reserved_4_63 : 60; #endif } cn30xx; struct cvmx_l2c_spar0_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t umsk1 : 4; /**< PP[1] L2 'DO NOT USE' set partition mask */ uint64_t reserved_4_7 : 4; uint64_t umsk0 : 4; /**< PP[0] L2 'DO NOT USE' set partition mask */ #else uint64_t umsk0 : 4; uint64_t reserved_4_7 : 4; uint64_t umsk1 : 4; uint64_t reserved_12_63 : 52; #endif } cn31xx; struct cvmx_l2c_spar0_s cn38xx; struct cvmx_l2c_spar0_s cn38xxp2; struct cvmx_l2c_spar0_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t umsk1 : 8; /**< PP[1] L2 'DO NOT USE' set partition mask */ uint64_t umsk0 : 8; /**< PP[0] L2 'DO NOT USE' set partition mask */ #else uint64_t umsk0 : 8; uint64_t umsk1 : 8; uint64_t reserved_16_63 : 48; #endif } cn50xx; struct cvmx_l2c_spar0_s cn52xx; struct cvmx_l2c_spar0_s cn52xxp1; struct cvmx_l2c_spar0_s cn56xx; struct cvmx_l2c_spar0_s cn56xxp1; struct cvmx_l2c_spar0_s cn58xx; struct cvmx_l2c_spar0_s cn58xxp1; } cvmx_l2c_spar0_t; /** * cvmx_l2c_spar1 * * L2C_SPAR1 = L2 Set Partitioning Register (PP4-7) * * Description: L2 Set Partitioning Register * * Notes: * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that * set for replacement. * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers */ typedef union { uint64_t u64; struct cvmx_l2c_spar1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t umsk7 : 8; /**< PP[7] L2 'DO NOT USE' set partition mask */ uint64_t umsk6 : 8; /**< PP[6] L2 'DO NOT USE' set partition mask */ uint64_t umsk5 : 8; /**< PP[5] L2 'DO NOT USE' set partition mask */ uint64_t umsk4 : 8; /**< PP[4] L2 'DO NOT USE' set partition mask */ #else uint64_t umsk4 : 8; uint64_t umsk5 : 8; uint64_t umsk6 : 8; uint64_t umsk7 : 8; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_l2c_spar1_s cn38xx; struct cvmx_l2c_spar1_s cn38xxp2; struct cvmx_l2c_spar1_s cn56xx; struct cvmx_l2c_spar1_s cn56xxp1; struct cvmx_l2c_spar1_s cn58xx; struct cvmx_l2c_spar1_s cn58xxp1; } cvmx_l2c_spar1_t; /** * cvmx_l2c_spar2 * * L2C_SPAR2 = L2 Set Partitioning Register (PP8-11) * * Description: L2 Set Partitioning Register * * Notes: * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that * set for replacement. * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers */ typedef union { uint64_t u64; struct cvmx_l2c_spar2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t umsk11 : 8; /**< PP[11] L2 'DO NOT USE' set partition mask */ uint64_t umsk10 : 8; /**< PP[10] L2 'DO NOT USE' set partition mask */ uint64_t umsk9 : 8; /**< PP[9] L2 'DO NOT USE' set partition mask */ uint64_t umsk8 : 8; /**< PP[8] L2 'DO NOT USE' set partition mask */ #else uint64_t umsk8 : 8; uint64_t umsk9 : 8; uint64_t umsk10 : 8; uint64_t umsk11 : 8; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_l2c_spar2_s cn38xx; struct cvmx_l2c_spar2_s cn38xxp2; struct cvmx_l2c_spar2_s cn56xx; struct cvmx_l2c_spar2_s cn56xxp1; struct cvmx_l2c_spar2_s cn58xx; struct cvmx_l2c_spar2_s cn58xxp1; } cvmx_l2c_spar2_t; /** * cvmx_l2c_spar3 * * L2C_SPAR3 = L2 Set Partitioning Register (PP12-15) * * Description: L2 Set Partitioning Register * * Notes: * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that * set for replacement. * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation * - NOTES: When L2C FUSE[136] is blown(CRIP_1024K), then SETS#7-4 are SET in all UMSK'x' registers * When L2C FUSE[137] is blown(CRIP_512K), then SETS#7-2 are SET in all UMSK'x' registers */ typedef union { uint64_t u64; struct cvmx_l2c_spar3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t umsk15 : 8; /**< PP[15] L2 'DO NOT USE' set partition mask */ uint64_t umsk14 : 8; /**< PP[14] L2 'DO NOT USE' set partition mask */ uint64_t umsk13 : 8; /**< PP[13] L2 'DO NOT USE' set partition mask */ uint64_t umsk12 : 8; /**< PP[12] L2 'DO NOT USE' set partition mask */ #else uint64_t umsk12 : 8; uint64_t umsk13 : 8; uint64_t umsk14 : 8; uint64_t umsk15 : 8; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_l2c_spar3_s cn38xx; struct cvmx_l2c_spar3_s cn38xxp2; struct cvmx_l2c_spar3_s cn58xx; struct cvmx_l2c_spar3_s cn58xxp1; } cvmx_l2c_spar3_t; /** * cvmx_l2c_spar4 * * L2C_SPAR4 = L2 Set Partitioning Register (IOB) * * Description: L2 Set Partitioning Register * * Notes: * - When a bit is set in the UMSK'x' register, a memory command issued from PP='x' will NOT select that * set for replacement. * - There should ALWAYS BE at least 1 bit clear in each UMSK'x' register for proper L2 cache operation * - NOTES: When L2C FUSE[136] is blown(CRIP_256K), then SETS#7-4 are SET in all UMSK'x' registers * When L2C FUSE[137] is blown(CRIP_128K), then SETS#7-2 are SET in all UMSK'x' registers */ typedef union { uint64_t u64; struct cvmx_l2c_spar4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t umskiob : 8; /**< IOB L2 'DO NOT USE' set partition mask */ #else uint64_t umskiob : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_l2c_spar4_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t umskiob : 4; /**< IOB L2 'DO NOT USE' set partition mask */ #else uint64_t umskiob : 4; uint64_t reserved_4_63 : 60; #endif } cn30xx; struct cvmx_l2c_spar4_cn30xx cn31xx; struct cvmx_l2c_spar4_s cn38xx; struct cvmx_l2c_spar4_s cn38xxp2; struct cvmx_l2c_spar4_s cn50xx; struct cvmx_l2c_spar4_s cn52xx; struct cvmx_l2c_spar4_s cn52xxp1; struct cvmx_l2c_spar4_s cn56xx; struct cvmx_l2c_spar4_s cn56xxp1; struct cvmx_l2c_spar4_s cn58xx; struct cvmx_l2c_spar4_s cn58xxp1; } cvmx_l2c_spar4_t; /** * cvmx_l2d_bst0 * * L2D_BST0 = L2C Data Store QUAD0 BIST Status Register * */ typedef union { uint64_t u64; struct cvmx_l2d_bst0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_35_63 : 29; uint64_t ftl : 1; /**< L2C Data Store Fatal Defect(across all QUADs) 2 or more columns were detected bad across all QUADs[0-3]. Please refer to individual quad failures for bad column = 0x7e to determine which QUAD was in error. */ uint64_t q0stat : 34; /**< Bist Results for QUAD0 Failure \#1 Status [16:14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Status [33:31] bad bank [30:24] bad high column [23:17] bad low column NOTES: For bad high/low column reporting: 0x7f: No failure 0x7e: Fatal Defect: 2 or more bad columns 0-0x45: Bad column NOTE: If there are less than 2 failures then the bad bank will be 0x7. */ #else uint64_t q0stat : 34; uint64_t ftl : 1; uint64_t reserved_35_63 : 29; #endif } s; struct cvmx_l2d_bst0_s cn30xx; struct cvmx_l2d_bst0_s cn31xx; struct cvmx_l2d_bst0_s cn38xx; struct cvmx_l2d_bst0_s cn38xxp2; struct cvmx_l2d_bst0_s cn50xx; struct cvmx_l2d_bst0_s cn52xx; struct cvmx_l2d_bst0_s cn52xxp1; struct cvmx_l2d_bst0_s cn56xx; struct cvmx_l2d_bst0_s cn56xxp1; struct cvmx_l2d_bst0_s cn58xx; struct cvmx_l2d_bst0_s cn58xxp1; } cvmx_l2d_bst0_t; /** * cvmx_l2d_bst1 * * L2D_BST1 = L2C Data Store QUAD1 BIST Status Register * */ typedef union { uint64_t u64; struct cvmx_l2d_bst1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t q1stat : 34; /**< Bist Results for QUAD1 Failure \#1 Status [16:14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Status [33:31] bad bank [30:24] bad high column [23:17] bad low column NOTES: For bad high/low column reporting: 0x7f: No failure 0x7e: Fatal Defect: 2 or more bad columns 0-0x45: Bad column NOTE: If there are less than 2 failures then the bad bank will be 0x7. */ #else uint64_t q1stat : 34; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_l2d_bst1_s cn30xx; struct cvmx_l2d_bst1_s cn31xx; struct cvmx_l2d_bst1_s cn38xx; struct cvmx_l2d_bst1_s cn38xxp2; struct cvmx_l2d_bst1_s cn50xx; struct cvmx_l2d_bst1_s cn52xx; struct cvmx_l2d_bst1_s cn52xxp1; struct cvmx_l2d_bst1_s cn56xx; struct cvmx_l2d_bst1_s cn56xxp1; struct cvmx_l2d_bst1_s cn58xx; struct cvmx_l2d_bst1_s cn58xxp1; } cvmx_l2d_bst1_t; /** * cvmx_l2d_bst2 * * L2D_BST2 = L2C Data Store QUAD2 BIST Status Register * */ typedef union { uint64_t u64; struct cvmx_l2d_bst2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t q2stat : 34; /**< Bist Results for QUAD2 Failure \#1 Status [16:14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Status [33:31] bad bank [30:24] bad high column [23:17] bad low column NOTES: For bad high/low column reporting: 0x7f: No failure 0x7e: Fatal Defect: 2 or more bad columns 0-0x45: Bad column NOTE: If there are less than 2 failures then the bad bank will be 0x7. */ #else uint64_t q2stat : 34; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_l2d_bst2_s cn30xx; struct cvmx_l2d_bst2_s cn31xx; struct cvmx_l2d_bst2_s cn38xx; struct cvmx_l2d_bst2_s cn38xxp2; struct cvmx_l2d_bst2_s cn50xx; struct cvmx_l2d_bst2_s cn52xx; struct cvmx_l2d_bst2_s cn52xxp1; struct cvmx_l2d_bst2_s cn56xx; struct cvmx_l2d_bst2_s cn56xxp1; struct cvmx_l2d_bst2_s cn58xx; struct cvmx_l2d_bst2_s cn58xxp1; } cvmx_l2d_bst2_t; /** * cvmx_l2d_bst3 * * L2D_BST3 = L2C Data Store QUAD3 BIST Status Register * */ typedef union { uint64_t u64; struct cvmx_l2d_bst3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t q3stat : 34; /**< Bist Results for QUAD3 Failure \#1 Status [16:14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Status [33:31] bad bank [30:24] bad high column [23:17] bad low column NOTES: For bad high/low column reporting: 0x7f: No failure 0x7e: Fatal Defect: 2 or more bad columns 0-0x45: Bad column NOTE: If there are less than 2 failures then the bad bank will be 0x7. */ #else uint64_t q3stat : 34; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_l2d_bst3_s cn30xx; struct cvmx_l2d_bst3_s cn31xx; struct cvmx_l2d_bst3_s cn38xx; struct cvmx_l2d_bst3_s cn38xxp2; struct cvmx_l2d_bst3_s cn50xx; struct cvmx_l2d_bst3_s cn52xx; struct cvmx_l2d_bst3_s cn52xxp1; struct cvmx_l2d_bst3_s cn56xx; struct cvmx_l2d_bst3_s cn56xxp1; struct cvmx_l2d_bst3_s cn58xx; struct cvmx_l2d_bst3_s cn58xxp1; } cvmx_l2d_bst3_t; /** * cvmx_l2d_err * * L2D_ERR = L2 Data Errors * * Description: L2 Data ECC SEC/DED Errors and Interrupt Enable */ typedef union { uint64_t u64; struct cvmx_l2d_err_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t bmhclsel : 1; /**< L2 Bit Map Half CacheLine ECC Selector *** NOTE: PASS2 Addition When L2C_DBG[L2T]=1/L2D_ERR[ECC_ENA]=0, the BMHCLSEL selects which half cacheline to conditionally latch into the L2D_FSYN0/L2D_FSYN1 registers when an LDD command is detected from the diagnostic PP (see L2C_DBG[PPNUM]). - 0: OW[0-3] ECC (from first 1/2 cacheline) is selected to be conditionally latched into the L2D_FSYN0/1 CSRs. - 1: OW[4-7] ECC (from last 1/2 cacheline) is selected to be conditionally latched into the L2D_FSYN0/1 CSRs. */ uint64_t ded_err : 1; /**< L2D Double Error detected (DED) */ uint64_t sec_err : 1; /**< L2D Single Error corrected (SEC) */ uint64_t ded_intena : 1; /**< L2 Data ECC Double Error Detect(DED) Interrupt Enable bit When set, allows interrupts to be reported on double bit (uncorrectable) errors from the L2 Data Arrays. */ uint64_t sec_intena : 1; /**< L2 Data ECC Single Error Correct(SEC) Interrupt Enable bit When set, allows interrupts to be reported on single bit (correctable) errors from the L2 Data Arrays. */ uint64_t ecc_ena : 1; /**< L2 Data ECC Enable When set, enables 10-bit SEC/DED codeword for 128bit L2 Data Arrays. */ #else uint64_t ecc_ena : 1; uint64_t sec_intena : 1; uint64_t ded_intena : 1; uint64_t sec_err : 1; uint64_t ded_err : 1; uint64_t bmhclsel : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_l2d_err_s cn30xx; struct cvmx_l2d_err_s cn31xx; struct cvmx_l2d_err_s cn38xx; struct cvmx_l2d_err_s cn38xxp2; struct cvmx_l2d_err_s cn50xx; struct cvmx_l2d_err_s cn52xx; struct cvmx_l2d_err_s cn52xxp1; struct cvmx_l2d_err_s cn56xx; struct cvmx_l2d_err_s cn56xxp1; struct cvmx_l2d_err_s cn58xx; struct cvmx_l2d_err_s cn58xxp1; } cvmx_l2d_err_t; /** * cvmx_l2d_fadr * * L2D_FADR = L2 Failing Address * * Description: L2 Data ECC SEC/DED Failing Address * * Notes: * When L2D_SEC_ERR or L2D_DED_ERR are set, this field contains the failing L2 Data store index. * (A DED Error will always overwrite a SEC Error SYNDROME and FADR). */ typedef union { uint64_t u64; struct cvmx_l2d_fadr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t fadru : 1; /**< Failing L2 Data Store Upper Index bit(MSB) */ uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED error) */ uint64_t fset : 3; /**< Failing SET# */ uint64_t fadr : 11; /**< Failing L2 Data Store Lower Index bits (NOTE: L2 Data Store Index is for each 1/2 cacheline) [FADRU, FADR[10:1]]: cacheline index[17:7] FADR[0]: 1/2 cacheline index NOTE: FADR[1] is used to select between upper/lower 1MB physical L2 Data Store banks. */ #else uint64_t fadr : 11; uint64_t fset : 3; uint64_t fowmsk : 4; uint64_t fadru : 1; uint64_t reserved_19_63 : 45; #endif } s; struct cvmx_l2d_fadr_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED error) */ uint64_t reserved_13_13 : 1; uint64_t fset : 2; /**< Failing SET# */ uint64_t reserved_9_10 : 2; uint64_t fadr : 9; /**< Failing L2 Data Store Index(1of512 = 1/2 CL address) */ #else uint64_t fadr : 9; uint64_t reserved_9_10 : 2; uint64_t fset : 2; uint64_t reserved_13_13 : 1; uint64_t fowmsk : 4; uint64_t reserved_18_63 : 46; #endif } cn30xx; struct cvmx_l2d_fadr_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED error) */ uint64_t reserved_13_13 : 1; uint64_t fset : 2; /**< Failing SET# */ uint64_t reserved_10_10 : 1; uint64_t fadr : 10; /**< Failing L2 Data Store Index (1 of 1024 = half cacheline indices) */ #else uint64_t fadr : 10; uint64_t reserved_10_10 : 1; uint64_t fset : 2; uint64_t reserved_13_13 : 1; uint64_t fowmsk : 4; uint64_t reserved_18_63 : 46; #endif } cn31xx; struct cvmx_l2d_fadr_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED error) */ uint64_t fset : 3; /**< Failing SET# */ uint64_t fadr : 11; /**< Failing L2 Data Store Index (1of2K = 1/2 CL address) */ #else uint64_t fadr : 11; uint64_t fset : 3; uint64_t fowmsk : 4; uint64_t reserved_18_63 : 46; #endif } cn38xx; struct cvmx_l2d_fadr_cn38xx cn38xxp2; struct cvmx_l2d_fadr_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED error) */ uint64_t fset : 3; /**< Failing SET# */ uint64_t reserved_8_10 : 3; uint64_t fadr : 8; /**< Failing L2 Data Store Lower Index bits (NOTE: L2 Data Store Index is for each 1/2 cacheline) FADR[7:1]: cacheline index[13:7] FADR[0]: 1/2 cacheline index */ #else uint64_t fadr : 8; uint64_t reserved_8_10 : 3; uint64_t fset : 3; uint64_t fowmsk : 4; uint64_t reserved_18_63 : 46; #endif } cn50xx; struct cvmx_l2d_fadr_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t fowmsk : 4; /**< Failing OW Mask (which one of 4 OWs contained SEC/DED error) */ uint64_t fset : 3; /**< Failing SET# */ uint64_t reserved_10_10 : 1; uint64_t fadr : 10; /**< Failing L2 Data Store Lower Index bits (NOTE: L2 Data Store Index is for each 1/2 cacheline) FADR[9:1]: cacheline index[15:7] FADR[0]: 1/2 cacheline index */ #else uint64_t fadr : 10; uint64_t reserved_10_10 : 1; uint64_t fset : 3; uint64_t fowmsk : 4; uint64_t reserved_18_63 : 46; #endif } cn52xx; struct cvmx_l2d_fadr_cn52xx cn52xxp1; struct cvmx_l2d_fadr_s cn56xx; struct cvmx_l2d_fadr_s cn56xxp1; struct cvmx_l2d_fadr_s cn58xx; struct cvmx_l2d_fadr_s cn58xxp1; } cvmx_l2d_fadr_t; /** * cvmx_l2d_fsyn0 * * L2D_FSYN0 = L2 Failing Syndrome [OW0,4 / OW1,5] * * Description: L2 Data ECC SEC/DED Failing Syndrome for lower cache line * * Notes: * When L2D_SEC_ERR or L2D_DED_ERR are set, this field contains the failing L2 Data ECC 10b syndrome. * (A DED Error will always overwrite a SEC Error SYNDROME and FADR). */ typedef union { uint64_t u64; struct cvmx_l2d_fsyn0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t fsyn_ow1 : 10; /**< Failing L2 Data Store SYNDROME OW[1,5] When L2D_ERR[ECC_ENA]=1 and either L2D_ERR[SEC_ERR] or L2D_ERR[DED_ERR] are set, this field represents the failing OWECC syndrome for the half cacheline indexed by L2D_FADR[FADR]. NOTE: The L2D_FADR[FOWMSK] further qualifies which OW lane(1of4) detected the error. When L2C_DBG[L2T]=1 and L2D_ERR[ECC_ENA]=0, an LDD command from the diagnostic PP will conditionally latch the raw OWECC for the selected half cacheline. (see: L2D_ERR[BMHCLSEL] */ uint64_t fsyn_ow0 : 10; /**< Failing L2 Data Store SYNDROME OW[0,4] When L2D_ERR[ECC_ENA]=1 and either L2D_ERR[SEC_ERR] or L2D_ERR[DED_ERR] are set, this field represents the failing OWECC syndrome for the half cacheline indexed by L2D_FADR[FADR]. NOTE: The L2D_FADR[FOWMSK] further qualifies which OW lane(1of4) detected the error. When L2C_DBG[L2T]=1 and L2D_ERR[ECC_ENA]=0, an LDD (L1 load-miss) from the diagnostic PP will conditionally latch the raw OWECC for the selected half cacheline. (see: L2D_ERR[BMHCLSEL] */ #else uint64_t fsyn_ow0 : 10; uint64_t fsyn_ow1 : 10; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_l2d_fsyn0_s cn30xx; struct cvmx_l2d_fsyn0_s cn31xx; struct cvmx_l2d_fsyn0_s cn38xx; struct cvmx_l2d_fsyn0_s cn38xxp2; struct cvmx_l2d_fsyn0_s cn50xx; struct cvmx_l2d_fsyn0_s cn52xx; struct cvmx_l2d_fsyn0_s cn52xxp1; struct cvmx_l2d_fsyn0_s cn56xx; struct cvmx_l2d_fsyn0_s cn56xxp1; struct cvmx_l2d_fsyn0_s cn58xx; struct cvmx_l2d_fsyn0_s cn58xxp1; } cvmx_l2d_fsyn0_t; /** * cvmx_l2d_fsyn1 * * L2D_FSYN1 = L2 Failing Syndrome [OW2,6 / OW3,7] * * Description: L2 Data ECC SEC/DED Failing Syndrome for upper cache line * * Notes: * When L2D_SEC_ERR or L2D_DED_ERR are set, this field contains the failing L2 Data ECC 10b syndrome. * (A DED Error will always overwrite a SEC Error SYNDROME and FADR). */ typedef union { uint64_t u64; struct cvmx_l2d_fsyn1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t fsyn_ow3 : 10; /**< Failing L2 Data Store SYNDROME OW[3,7] */ uint64_t fsyn_ow2 : 10; /**< Failing L2 Data Store SYNDROME OW[2,5] */ #else uint64_t fsyn_ow2 : 10; uint64_t fsyn_ow3 : 10; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_l2d_fsyn1_s cn30xx; struct cvmx_l2d_fsyn1_s cn31xx; struct cvmx_l2d_fsyn1_s cn38xx; struct cvmx_l2d_fsyn1_s cn38xxp2; struct cvmx_l2d_fsyn1_s cn50xx; struct cvmx_l2d_fsyn1_s cn52xx; struct cvmx_l2d_fsyn1_s cn52xxp1; struct cvmx_l2d_fsyn1_s cn56xx; struct cvmx_l2d_fsyn1_s cn56xxp1; struct cvmx_l2d_fsyn1_s cn58xx; struct cvmx_l2d_fsyn1_s cn58xxp1; } cvmx_l2d_fsyn1_t; /** * cvmx_l2d_fus0 * * L2D_FUS0 = L2C Data Store QUAD0 Fuse Register * */ typedef union { uint64_t u64; struct cvmx_l2d_fus0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t q0fus : 34; /**< Fuse Register for QUAD0 This is purely for debug and not needed in the general manufacturing flow. Note that the fuse are complementary (Assigning a fuse to 1 will read as a zero). This means the case where no fuses are blown result in these csr's showing all ones. Failure \#1 Fuse Mapping [16:14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Fuse Mapping [33:31] bad bank [30:24] bad high column [23:17] bad low column */ #else uint64_t q0fus : 34; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_l2d_fus0_s cn30xx; struct cvmx_l2d_fus0_s cn31xx; struct cvmx_l2d_fus0_s cn38xx; struct cvmx_l2d_fus0_s cn38xxp2; struct cvmx_l2d_fus0_s cn50xx; struct cvmx_l2d_fus0_s cn52xx; struct cvmx_l2d_fus0_s cn52xxp1; struct cvmx_l2d_fus0_s cn56xx; struct cvmx_l2d_fus0_s cn56xxp1; struct cvmx_l2d_fus0_s cn58xx; struct cvmx_l2d_fus0_s cn58xxp1; } cvmx_l2d_fus0_t; /** * cvmx_l2d_fus1 * * L2D_FUS1 = L2C Data Store QUAD1 Fuse Register * */ typedef union { uint64_t u64; struct cvmx_l2d_fus1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t q1fus : 34; /**< Fuse Register for QUAD1 This is purely for debug and not needed in the general manufacturing flow. Note that the fuse are complementary (Assigning a fuse to 1 will read as a zero). This means the case where no fuses are blown result in these csr's showing all ones. Failure \#1 Fuse Mapping [16:14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Fuse Mapping [33:31] bad bank [30:24] bad high column [23:17] bad low column */ #else uint64_t q1fus : 34; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_l2d_fus1_s cn30xx; struct cvmx_l2d_fus1_s cn31xx; struct cvmx_l2d_fus1_s cn38xx; struct cvmx_l2d_fus1_s cn38xxp2; struct cvmx_l2d_fus1_s cn50xx; struct cvmx_l2d_fus1_s cn52xx; struct cvmx_l2d_fus1_s cn52xxp1; struct cvmx_l2d_fus1_s cn56xx; struct cvmx_l2d_fus1_s cn56xxp1; struct cvmx_l2d_fus1_s cn58xx; struct cvmx_l2d_fus1_s cn58xxp1; } cvmx_l2d_fus1_t; /** * cvmx_l2d_fus2 * * L2D_FUS2 = L2C Data Store QUAD2 Fuse Register * */ typedef union { uint64_t u64; struct cvmx_l2d_fus2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t q2fus : 34; /**< Fuse Register for QUAD2 This is purely for debug and not needed in the general manufacturing flow. Note that the fuse are complementary (Assigning a fuse to 1 will read as a zero). This means the case where no fuses are blown result in these csr's showing all ones. Failure \#1 Fuse Mapping [16:14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Fuse Mapping [33:31] bad bank [30:24] bad high column [23:17] bad low column */ #else uint64_t q2fus : 34; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_l2d_fus2_s cn30xx; struct cvmx_l2d_fus2_s cn31xx; struct cvmx_l2d_fus2_s cn38xx; struct cvmx_l2d_fus2_s cn38xxp2; struct cvmx_l2d_fus2_s cn50xx; struct cvmx_l2d_fus2_s cn52xx; struct cvmx_l2d_fus2_s cn52xxp1; struct cvmx_l2d_fus2_s cn56xx; struct cvmx_l2d_fus2_s cn56xxp1; struct cvmx_l2d_fus2_s cn58xx; struct cvmx_l2d_fus2_s cn58xxp1; } cvmx_l2d_fus2_t; /** * cvmx_l2d_fus3 * * L2D_FUS3 = L2C Data Store QUAD3 Fuse Register * */ typedef union { uint64_t u64; struct cvmx_l2d_fus3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t ema_ctl : 3; /**< L2 Data Store EMA Control These bits are used to 'observe' the EMA[1:0] inputs for the L2 Data Store RAMs which are controlled by either FUSES[141:140] or by MIO_FUSE_EMA[EMA] CSR. From poweron (dc_ok), the EMA_CTL are driven from FUSE[141:140]. However after the 1st CSR write to the MIO_FUSE_EMA[EMA] bits, the EMA_CTL will source from the MIO_FUSE_EMA[EMA] register permanently (until dc_ok). NOTE: O9N Addition */ uint64_t reserved_34_36 : 3; uint64_t q3fus : 34; /**< Fuse Register for QUAD3 This is purely for debug and not needed in the general manufacturing flow. Note that the fuses are complementary (Assigning a fuse to 1 will read as a zero). This means the case where no fuses are blown result in these csr's showing all ones. Failure \#1 Fuse Mapping [16:14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Fuse Mapping [33:31] bad bank [30:24] bad high column [23:17] bad low column */ #else uint64_t q3fus : 34; uint64_t reserved_34_36 : 3; uint64_t ema_ctl : 3; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_l2d_fus3_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_35_63 : 29; uint64_t crip_64k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. */ uint64_t q3fus : 34; /**< Fuse Register for QUAD3 This is purely for debug and not needed in the general manufacturing flow. Note that the fuses are complementary (Assigning a fuse to 1 will read as a zero). This means the case where no fuses are blown result in these csr's showing all ones. Failure \#1 Fuse Mapping [16:15] UNUSED [14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Fuse Mapping [33:32] UNUSED [31] bad bank [30:24] bad high column [23:17] bad low column */ #else uint64_t q3fus : 34; uint64_t crip_64k : 1; uint64_t reserved_35_63 : 29; #endif } cn30xx; struct cvmx_l2d_fus3_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_35_63 : 29; uint64_t crip_128k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. */ uint64_t q3fus : 34; /**< Fuse Register for QUAD3 This is purely for debug and not needed in the general manufacturing flow. Note that the fuses are complementary (Assigning a fuse to 1 will read as a zero). This means the case where no fuses are blown result in these csr's showing all ones. Failure \#1 Fuse Mapping [16:15] UNUSED [14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Fuse Mapping [33:32] UNUSED [31] bad bank [30:24] bad high column [23:17] bad low column */ #else uint64_t q3fus : 34; uint64_t crip_128k : 1; uint64_t reserved_35_63 : 29; #endif } cn31xx; struct cvmx_l2d_fus3_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t crip_256k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. *** NOTE: Pass2 Addition */ uint64_t crip_512k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. *** NOTE: Pass2 Addition */ uint64_t q3fus : 34; /**< Fuse Register for QUAD3 This is purely for debug and not needed in the general manufacturing flow. Note that the fuses are complementary (Assigning a fuse to 1 will read as a zero). This means the case where no fuses are blown result in these csr's showing all ones. Failure \#1 Fuse Mapping [16:14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Fuse Mapping [33:31] bad bank [30:24] bad high column [23:17] bad low column */ #else uint64_t q3fus : 34; uint64_t crip_512k : 1; uint64_t crip_256k : 1; uint64_t reserved_36_63 : 28; #endif } cn38xx; struct cvmx_l2d_fus3_cn38xx cn38xxp2; struct cvmx_l2d_fus3_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t ema_ctl : 3; /**< L2 Data Store EMA Control These bits are used to 'observe' the EMA[2:0] inputs for the L2 Data Store RAMs which are controlled by either FUSES[142:140] or by MIO_FUSE_EMA[EMA] CSR. From poweron (dc_ok), the EMA_CTL are driven from FUSE[141:140]. However after the 1st CSR write to the MIO_FUSE_EMA[EMA] bits, the EMA_CTL will source from the MIO_FUSE_EMA[EMA] register permanently (until dc_ok). */ uint64_t reserved_36_36 : 1; uint64_t crip_32k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. */ uint64_t crip_64k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. */ uint64_t q3fus : 34; /**< Fuse Register for QUAD3 This is purely for debug and not needed in the general manufacturing flow. Note that the fuses are complementary (Assigning a fuse to 1 will read as a zero). This means the case where no fuses are blown result in these csr's showing all ones. Failure \#1 Fuse Mapping [16:14] UNUSED (5020 uses single physical bank per quad) [13:7] bad high column [6:0] bad low column Failure \#2 Fuse Mapping [33:31] UNUSED (5020 uses single physical bank per quad) [30:24] bad high column [23:17] bad low column */ #else uint64_t q3fus : 34; uint64_t crip_64k : 1; uint64_t crip_32k : 1; uint64_t reserved_36_36 : 1; uint64_t ema_ctl : 3; uint64_t reserved_40_63 : 24; #endif } cn50xx; struct cvmx_l2d_fus3_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t ema_ctl : 3; /**< L2 Data Store EMA Control These bits are used to 'observe' the EMA[2:0] inputs for the L2 Data Store RAMs which are controlled by either FUSES[142:140] or by MIO_FUSE_EMA[EMA] CSR. From poweron (dc_ok), the EMA_CTL are driven from FUSE[141:140]. However after the 1st CSR write to the MIO_FUSE_EMA[EMA] bits, the EMA_CTL will source from the MIO_FUSE_EMA[EMA] register permanently (until dc_ok). */ uint64_t reserved_36_36 : 1; uint64_t crip_128k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. */ uint64_t crip_256k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. */ uint64_t q3fus : 34; /**< Fuse Register for QUAD3 This is purely for debug and not needed in the general manufacturing flow. Note that the fuses are complementary (Assigning a fuse to 1 will read as a zero). This means the case where no fuses are blown result in these csr's showing all ones. Failure \#1 Fuse Mapping [16:14] UNUSED (5020 uses single physical bank per quad) [13:7] bad high column [6:0] bad low column Failure \#2 Fuse Mapping [33:31] UNUSED (5020 uses single physical bank per quad) [30:24] bad high column [23:17] bad low column */ #else uint64_t q3fus : 34; uint64_t crip_256k : 1; uint64_t crip_128k : 1; uint64_t reserved_36_36 : 1; uint64_t ema_ctl : 3; uint64_t reserved_40_63 : 24; #endif } cn52xx; struct cvmx_l2d_fus3_cn52xx cn52xxp1; struct cvmx_l2d_fus3_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t ema_ctl : 3; /**< L2 Data Store EMA Control These bits are used to 'observe' the EMA[2:0] inputs for the L2 Data Store RAMs which are controlled by either FUSES[142:140] or by MIO_FUSE_EMA[EMA] CSR. From poweron (dc_ok), the EMA_CTL are driven from FUSE[141:140]. However after the 1st CSR write to the MIO_FUSE_EMA[EMA] bits, the EMA_CTL will source from the MIO_FUSE_EMA[EMA] register permanently (until dc_ok). NOTE: O9N Addition */ uint64_t reserved_36_36 : 1; uint64_t crip_512k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. *** NOTE: Pass2 Addition */ uint64_t crip_1024k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. *** NOTE: Pass2 Addition */ uint64_t q3fus : 34; /**< Fuse Register for QUAD3 This is purely for debug and not needed in the general manufacturing flow. Note that the fuses are complementary (Assigning a fuse to 1 will read as a zero). This means the case where no fuses are blown result in these csr's showing all ones. Failure \#1 Fuse Mapping [16:14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Fuse Mapping [33:31] bad bank [30:24] bad high column [23:17] bad low column */ #else uint64_t q3fus : 34; uint64_t crip_1024k : 1; uint64_t crip_512k : 1; uint64_t reserved_36_36 : 1; uint64_t ema_ctl : 3; uint64_t reserved_40_63 : 24; #endif } cn56xx; struct cvmx_l2d_fus3_cn56xx cn56xxp1; struct cvmx_l2d_fus3_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_39_63 : 25; uint64_t ema_ctl : 2; /**< L2 Data Store EMA Control These bits are used to 'observe' the EMA[1:0] inputs for the L2 Data Store RAMs which are controlled by either FUSES[141:140] or by MIO_FUSE_EMA[EMA] CSR. From poweron (dc_ok), the EMA_CTL are driven from FUSE[141:140]. However after the 1st CSR write to the MIO_FUSE_EMA[EMA] bits, the EMA_CTL will source from the MIO_FUSE_EMA[EMA] register permanently (until dc_ok). NOTE: O9N Addition */ uint64_t reserved_36_36 : 1; uint64_t crip_512k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. *** NOTE: Pass2 Addition */ uint64_t crip_1024k : 1; /**< This is purely for debug and not needed in the general manufacturing flow. If the FUSE is not-blown, then this bit should read as 0. If the FUSE is blown, then this bit should read as 1. *** NOTE: Pass2 Addition */ uint64_t q3fus : 34; /**< Fuse Register for QUAD3 This is purely for debug and not needed in the general manufacturing flow. Note that the fuses are complementary (Assigning a fuse to 1 will read as a zero). This means the case where no fuses are blown result in these csr's showing all ones. Failure \#1 Fuse Mapping [16:14] bad bank [13:7] bad high column [6:0] bad low column Failure \#2 Fuse Mapping [33:31] bad bank [30:24] bad high column [23:17] bad low column */ #else uint64_t q3fus : 34; uint64_t crip_1024k : 1; uint64_t crip_512k : 1; uint64_t reserved_36_36 : 1; uint64_t ema_ctl : 2; uint64_t reserved_39_63 : 25; #endif } cn58xx; struct cvmx_l2d_fus3_cn58xx cn58xxp1; } cvmx_l2d_fus3_t; /** * cvmx_l2t_err * * L2T_ERR = L2 Tag Errors * * Description: L2 Tag ECC SEC/DED Errors and Interrupt Enable */ typedef union { uint64_t u64; struct cvmx_l2t_err_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t fadru : 1; /**< Failing L2 Tag Upper Address Bit (Index[10]) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the FADRU contains the upper(MSB bit) cacheline index into the L2 Tag Store. */ uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit *** NOTE: PASS2 Addition */ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n could not find an available/unlocked set (for replacement). Most likely, this is a result of SW mixing SET PARTITIONING with ADDRESS LOCKING. If SW allows another PP to LOCKDOWN all SETs available to PP#n, then a Rd/Wr Miss from PP#n will be unable to determine a 'valid' replacement set (since LOCKED addresses should NEVER be replaced). If such an event occurs, the HW will select the smallest available SET(specified by UMSK'x)' as the replacement set, and the address is unlocked. *** NOTE: PASS2 Addition */ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of the INDEX (which is ignored by HW - but reported to SW). The LDD(L1 load-miss) for the LOCK operation is completed successfully, however the address is NOT locked. NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*] into account. For example, if diagnostic PPx has UMSKx defined to only use SETs [1:0], and SET1 had been previously LOCKED, then an attempt to LOCK the last available SET0 would result in a LCKERR. (This is to ensure that at least 1 SET at each INDEX is not LOCKED for general use by other PPs). */ uint64_t fset : 3; /**< Failing L2 Tag Hit Set# (1-of-8) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and (FSYN != 0), the FSET specifies the failing hit-set. NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set is specified by the L2C_DBG[SET]. */ uint64_t fadr : 10; /**< Failing L2 Tag Address (10-bit Index) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the FADR contains the lower 10bit cacheline index into the L2 Tag Store. */ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the contents of this register contain the 6-bit syndrome for the hit set only. If (FSYN = 0), the SBE or DBE reported was for one of the "non-hit" sets at the failing index(FADR). NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set is specified by the L2C_DBG[SET]. If (FSYN != 0), the SBE or DBE reported was for the hit set at the failing index(FADR) and failing set(FSET). SW NOTE: To determine which "non-hit" set was in error, SW can use the L2C_DBG[L2T] debug feature to explicitly read the other sets at the failing index(FADR). When (FSYN !=0), then the FSET contains the failing hit-set. NOTE: A DED Error will always overwrite a SEC Error SYNDROME and FADR). */ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for double bit errors(DBEs). This bit is set if ANY of the 8 sets contains a DBE. DBEs also generated an interrupt(if enabled). */ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for single bit errors(SBEs). This bit is set if ANY of the 8 sets contains an SBE. SBEs are auto corrected in HW and generate an interrupt(if enabled). */ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt Enable bit. When set, allows interrupts to be reported on double bit (uncorrectable) errors from the L2 Tag Arrays. */ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt Enable bit. When set, allows interrupts to be reported on single bit (correctable) errors from the L2 Tag Arrays. */ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable When set, enables 6-bit SEC/DED codeword for 19-bit L2 Tag Arrays [V,D,L,TAG[33:18]] */ #else uint64_t ecc_ena : 1; uint64_t sec_intena : 1; uint64_t ded_intena : 1; uint64_t sec_err : 1; uint64_t ded_err : 1; uint64_t fsyn : 6; uint64_t fadr : 10; uint64_t fset : 3; uint64_t lckerr : 1; uint64_t lck_intena : 1; uint64_t lckerr2 : 1; uint64_t lck_intena2 : 1; uint64_t fadru : 1; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_l2t_err_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit */ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n could not find an available/unlocked set (for replacement). Most likely, this is a result of SW mixing SET PARTITIONING with ADDRESS LOCKING. If SW allows another PP to LOCKDOWN all SETs available to PP#n, then a Rd/Wr Miss from PP#n will be unable to determine a 'valid' replacement set (since LOCKED addresses should NEVER be replaced). If such an event occurs, the HW will select the smallest available SET(specified by UMSK'x)' as the replacement set, and the address is unlocked. */ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of the INDEX (which is ignored by HW - but reported to SW). The LDD(L1 load-miss) for the LOCK operation is completed successfully, however the address is NOT locked. NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*] into account. For example, if diagnostic PPx has UMSKx defined to only use SETs [1:0], and SET1 had been previously LOCKED, then an attempt to LOCK the last available SET0 would result in a LCKERR. (This is to ensure that at least 1 SET at each INDEX is not LOCKED for general use by other PPs). */ uint64_t reserved_23_23 : 1; uint64_t fset : 2; /**< Failing L2 Tag Hit Set# (1-of-4) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and (FSYN != 0), the FSET specifies the failing hit-set. NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set is specified by the L2C_DBG[SET]. */ uint64_t reserved_19_20 : 2; uint64_t fadr : 8; /**< Failing L2 Tag Store Index (8-bit) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the FADR contains the 8bit cacheline index into the L2 Tag Store. */ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the contents of this register contain the 6-bit syndrome for the hit set only. If (FSYN = 0), the SBE or DBE reported was for one of the "non-hit" sets at the failing index(FADR). NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set is specified by the L2C_DBG[SET]. If (FSYN != 0), the SBE or DBE reported was for the hit set at the failing index(FADR) and failing set(FSET). SW NOTE: To determine which "non-hit" set was in error, SW can use the L2C_DBG[L2T] debug feature to explicitly read the other sets at the failing index(FADR). When (FSYN !=0), then the FSET contains the failing hit-set. NOTE: A DED Error will always overwrite a SEC Error SYNDROME and FADR). */ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for double bit errors(DBEs). This bit is set if ANY of the 8 sets contains a DBE. DBEs also generated an interrupt(if enabled). */ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for single bit errors(SBEs). This bit is set if ANY of the 8 sets contains an SBE. SBEs are auto corrected in HW and generate an interrupt(if enabled). */ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt Enable bit. When set, allows interrupts to be reported on double bit (uncorrectable) errors from the L2 Tag Arrays. */ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt Enable bit. When set, allows interrupts to be reported on single bit (correctable) errors from the L2 Tag Arrays. */ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable When set, enables 6-bit SEC/DED codeword for 22-bit L2 Tag Arrays [V,D,L,TAG[33:15]] */ #else uint64_t ecc_ena : 1; uint64_t sec_intena : 1; uint64_t ded_intena : 1; uint64_t sec_err : 1; uint64_t ded_err : 1; uint64_t fsyn : 6; uint64_t fadr : 8; uint64_t reserved_19_20 : 2; uint64_t fset : 2; uint64_t reserved_23_23 : 1; uint64_t lckerr : 1; uint64_t lck_intena : 1; uint64_t lckerr2 : 1; uint64_t lck_intena2 : 1; uint64_t reserved_28_63 : 36; #endif } cn30xx; struct cvmx_l2t_err_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit */ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n could not find an available/unlocked set (for replacement). Most likely, this is a result of SW mixing SET PARTITIONING with ADDRESS LOCKING. If SW allows another PP to LOCKDOWN all SETs available to PP#n, then a Rd/Wr Miss from PP#n will be unable to determine a 'valid' replacement set (since LOCKED addresses should NEVER be replaced). If such an event occurs, the HW will select the smallest available SET(specified by UMSK'x)' as the replacement set, and the address is unlocked. */ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of the INDEX (which is ignored by HW - but reported to SW). The LDD(L1 load-miss) for the LOCK operation is completed successfully, however the address is NOT locked. NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*] into account. For example, if diagnostic PPx has UMSKx defined to only use SETs [1:0], and SET1 had been previously LOCKED, then an attempt to LOCK the last available SET0 would result in a LCKERR. (This is to ensure that at least 1 SET at each INDEX is not LOCKED for general use by other PPs). */ uint64_t reserved_23_23 : 1; uint64_t fset : 2; /**< Failing L2 Tag Hit Set# (1-of-4) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and (FSYN != 0), the FSET specifies the failing hit-set. NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set is specified by the L2C_DBG[SET]. */ uint64_t reserved_20_20 : 1; uint64_t fadr : 9; /**< Failing L2 Tag Address (9-bit Index) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the FADR contains the 9-bit cacheline index into the L2 Tag Store. */ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the contents of this register contain the 6-bit syndrome for the hit set only. If (FSYN = 0), the SBE or DBE reported was for one of the "non-hit" sets at the failing index(FADR). NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set is specified by the L2C_DBG[SET]. If (FSYN != 0), the SBE or DBE reported was for the hit set at the failing index(FADR) and failing set(FSET). SW NOTE: To determine which "non-hit" set was in error, SW can use the L2C_DBG[L2T] debug feature to explicitly read the other sets at the failing index(FADR). When (FSYN !=0), then the FSET contains the failing hit-set. NOTE: A DED Error will always overwrite a SEC Error SYNDROME and FADR). */ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for double bit errors(DBEs). This bit is set if ANY of the 8 sets contains a DBE. DBEs also generated an interrupt(if enabled). */ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for single bit errors(SBEs). This bit is set if ANY of the 8 sets contains an SBE. SBEs are auto corrected in HW and generate an interrupt(if enabled). */ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt Enable bit. When set, allows interrupts to be reported on double bit (uncorrectable) errors from the L2 Tag Arrays. */ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt Enable bit. When set, allows interrupts to be reported on single bit (correctable) errors from the L2 Tag Arrays. */ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable When set, enables 6-bit SEC/DED codeword for 21-bit L2 Tag Arrays [V,D,L,TAG[33:16]] */ #else uint64_t ecc_ena : 1; uint64_t sec_intena : 1; uint64_t ded_intena : 1; uint64_t sec_err : 1; uint64_t ded_err : 1; uint64_t fsyn : 6; uint64_t fadr : 9; uint64_t reserved_20_20 : 1; uint64_t fset : 2; uint64_t reserved_23_23 : 1; uint64_t lckerr : 1; uint64_t lck_intena : 1; uint64_t lckerr2 : 1; uint64_t lck_intena2 : 1; uint64_t reserved_28_63 : 36; #endif } cn31xx; struct cvmx_l2t_err_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit *** NOTE: PASS2 Addition */ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n could not find an available/unlocked set (for replacement). Most likely, this is a result of SW mixing SET PARTITIONING with ADDRESS LOCKING. If SW allows another PP to LOCKDOWN all SETs available to PP#n, then a Rd/Wr Miss from PP#n will be unable to determine a 'valid' replacement set (since LOCKED addresses should NEVER be replaced). If such an event occurs, the HW will select the smallest available SET(specified by UMSK'x)' as the replacement set, and the address is unlocked. *** NOTE: PASS2 Addition */ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of the INDEX (which is ignored by HW - but reported to SW). The LDD(L1 load-miss) for the LOCK operation is completed successfully, however the address is NOT locked. NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*] into account. For example, if diagnostic PPx has UMSKx defined to only use SETs [1:0], and SET1 had been previously LOCKED, then an attempt to LOCK the last available SET0 would result in a LCKERR. (This is to ensure that at least 1 SET at each INDEX is not LOCKED for general use by other PPs). */ uint64_t fset : 3; /**< Failing L2 Tag Hit Set# (1-of-8) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and (FSYN != 0), the FSET specifies the failing hit-set. NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set is specified by the L2C_DBG[SET]. */ uint64_t fadr : 10; /**< Failing L2 Tag Address (10-bit Index) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the FADR contains the 10bit cacheline index into the L2 Tag Store. */ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the contents of this register contain the 6-bit syndrome for the hit set only. If (FSYN = 0), the SBE or DBE reported was for one of the "non-hit" sets at the failing index(FADR). NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set is specified by the L2C_DBG[SET]. If (FSYN != 0), the SBE or DBE reported was for the hit set at the failing index(FADR) and failing set(FSET). SW NOTE: To determine which "non-hit" set was in error, SW can use the L2C_DBG[L2T] debug feature to explicitly read the other sets at the failing index(FADR). When (FSYN !=0), then the FSET contains the failing hit-set. NOTE: A DED Error will always overwrite a SEC Error SYNDROME and FADR). */ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for double bit errors(DBEs). This bit is set if ANY of the 8 sets contains a DBE. DBEs also generated an interrupt(if enabled). */ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for single bit errors(SBEs). This bit is set if ANY of the 8 sets contains an SBE. SBEs are auto corrected in HW and generate an interrupt(if enabled). */ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt Enable bit. When set, allows interrupts to be reported on double bit (uncorrectable) errors from the L2 Tag Arrays. */ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt Enable bit. When set, allows interrupts to be reported on single bit (correctable) errors from the L2 Tag Arrays. */ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable When set, enables 6-bit SEC/DED codeword for 20-bit L2 Tag Arrays [V,D,L,TAG[33:17]] */ #else uint64_t ecc_ena : 1; uint64_t sec_intena : 1; uint64_t ded_intena : 1; uint64_t sec_err : 1; uint64_t ded_err : 1; uint64_t fsyn : 6; uint64_t fadr : 10; uint64_t fset : 3; uint64_t lckerr : 1; uint64_t lck_intena : 1; uint64_t lckerr2 : 1; uint64_t lck_intena2 : 1; uint64_t reserved_28_63 : 36; #endif } cn38xx; struct cvmx_l2t_err_cn38xx cn38xxp2; struct cvmx_l2t_err_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit */ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n could not find an available/unlocked set (for replacement). Most likely, this is a result of SW mixing SET PARTITIONING with ADDRESS LOCKING. If SW allows another PP to LOCKDOWN all SETs available to PP#n, then a Rd/Wr Miss from PP#n will be unable to determine a 'valid' replacement set (since LOCKED addresses should NEVER be replaced). If such an event occurs, the HW will select the smallest available SET(specified by UMSK'x)' as the replacement set, and the address is unlocked. */ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of the INDEX (which is ignored by HW - but reported to SW). The LDD(L1 load-miss) for the LOCK operation is completed successfully, however the address is NOT locked. NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*] into account. For example, if diagnostic PPx has UMSKx defined to only use SETs [1:0], and SET1 had been previously LOCKED, then an attempt to LOCK the last available SET0 would result in a LCKERR. (This is to ensure that at least 1 SET at each INDEX is not LOCKED for general use by other PPs). */ uint64_t fset : 3; /**< Failing L2 Tag Hit Set# (1-of-8) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and (FSYN != 0), the FSET specifies the failing hit-set. NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set is specified by the L2C_DBG[SET]. */ uint64_t reserved_18_20 : 3; uint64_t fadr : 7; /**< Failing L2 Tag Address (7-bit Index) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the FADR contains the lower 7bit cacheline index into the L2 Tag Store. */ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the contents of this register contain the 6-bit syndrome for the hit set only. If (FSYN = 0), the SBE or DBE reported was for one of the "non-hit" sets at the failing index(FADR). NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set is specified by the L2C_DBG[SET]. If (FSYN != 0), the SBE or DBE reported was for the hit set at the failing index(FADR) and failing set(FSET). SW NOTE: To determine which "non-hit" set was in error, SW can use the L2C_DBG[L2T] debug feature to explicitly read the other sets at the failing index(FADR). When (FSYN !=0), then the FSET contains the failing hit-set. NOTE: A DED Error will always overwrite a SEC Error SYNDROME and FADR). */ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for double bit errors(DBEs). This bit is set if ANY of the 8 sets contains a DBE. DBEs also generated an interrupt(if enabled). */ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for single bit errors(SBEs). This bit is set if ANY of the 8 sets contains an SBE. SBEs are auto corrected in HW and generate an interrupt(if enabled). */ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt Enable bit. When set, allows interrupts to be reported on double bit (uncorrectable) errors from the L2 Tag Arrays. */ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt Enable bit. When set, allows interrupts to be reported on single bit (correctable) errors from the L2 Tag Arrays. */ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable When set, enables 6-bit SEC/DED codeword for 23-bit L2 Tag Arrays [V,D,L,TAG[33:14]] */ #else uint64_t ecc_ena : 1; uint64_t sec_intena : 1; uint64_t ded_intena : 1; uint64_t sec_err : 1; uint64_t ded_err : 1; uint64_t fsyn : 6; uint64_t fadr : 7; uint64_t reserved_18_20 : 3; uint64_t fset : 3; uint64_t lckerr : 1; uint64_t lck_intena : 1; uint64_t lckerr2 : 1; uint64_t lck_intena2 : 1; uint64_t reserved_28_63 : 36; #endif } cn50xx; struct cvmx_l2t_err_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t lck_intena2 : 1; /**< L2 Tag Lock Error2 Interrupt Enable bit */ uint64_t lckerr2 : 1; /**< HW detected a case where a Rd/Wr Miss from PP#n could not find an available/unlocked set (for replacement). Most likely, this is a result of SW mixing SET PARTITIONING with ADDRESS LOCKING. If SW allows another PP to LOCKDOWN all SETs available to PP#n, then a Rd/Wr Miss from PP#n will be unable to determine a 'valid' replacement set (since LOCKED addresses should NEVER be replaced). If such an event occurs, the HW will select the smallest available SET(specified by UMSK'x)' as the replacement set, and the address is unlocked. */ uint64_t lck_intena : 1; /**< L2 Tag Lock Error Interrupt Enable bit */ uint64_t lckerr : 1; /**< SW attempted to LOCK DOWN the last available set of the INDEX (which is ignored by HW - but reported to SW). The LDD(L1 load-miss) for the LOCK operation is completed successfully, however the address is NOT locked. NOTE: 'Available' sets takes the L2C_SPAR*[UMSK*] into account. For example, if diagnostic PPx has UMSKx defined to only use SETs [1:0], and SET1 had been previously LOCKED, then an attempt to LOCK the last available SET0 would result in a LCKERR. (This is to ensure that at least 1 SET at each INDEX is not LOCKED for general use by other PPs). */ uint64_t fset : 3; /**< Failing L2 Tag Hit Set# (1-of-8) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set and (FSYN != 0), the FSET specifies the failing hit-set. NOTE: During a force-hit (L2T/L2D/L2T=1), the hit-set is specified by the L2C_DBG[SET]. */ uint64_t reserved_20_20 : 1; uint64_t fadr : 9; /**< Failing L2 Tag Address (9-bit Index) When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the FADR contains the lower 9bit cacheline index into the L2 Tag Store. */ uint64_t fsyn : 6; /**< When L2T_ERR[SEC_ERR] or L2T_ERR[DED_ERR] are set, the contents of this register contain the 6-bit syndrome for the hit set only. If (FSYN = 0), the SBE or DBE reported was for one of the "non-hit" sets at the failing index(FADR). NOTE: During a force-hit (L2T/L2D/L2T=1), the hit set is specified by the L2C_DBG[SET]. If (FSYN != 0), the SBE or DBE reported was for the hit set at the failing index(FADR) and failing set(FSET). SW NOTE: To determine which "non-hit" set was in error, SW can use the L2C_DBG[L2T] debug feature to explicitly read the other sets at the failing index(FADR). When (FSYN !=0), then the FSET contains the failing hit-set. NOTE: A DED Error will always overwrite a SEC Error SYNDROME and FADR). */ uint64_t ded_err : 1; /**< L2T Double Bit Error detected (DED) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for double bit errors(DBEs). This bit is set if ANY of the 8 sets contains a DBE. DBEs also generated an interrupt(if enabled). */ uint64_t sec_err : 1; /**< L2T Single Bit Error corrected (SEC) During every L2 Tag Probe, all 8 sets Tag's (at a given index) are checked for single bit errors(SBEs). This bit is set if ANY of the 8 sets contains an SBE. SBEs are auto corrected in HW and generate an interrupt(if enabled). */ uint64_t ded_intena : 1; /**< L2 Tag ECC Double Error Detect(DED) Interrupt Enable bit. When set, allows interrupts to be reported on double bit (uncorrectable) errors from the L2 Tag Arrays. */ uint64_t sec_intena : 1; /**< L2 Tag ECC Single Error Correct(SEC) Interrupt Enable bit. When set, allows interrupts to be reported on single bit (correctable) errors from the L2 Tag Arrays. */ uint64_t ecc_ena : 1; /**< L2 Tag ECC Enable When set, enables 6-bit SEC/DED codeword for 21-bit L2 Tag Arrays [V,D,L,TAG[33:16]] */ #else uint64_t ecc_ena : 1; uint64_t sec_intena : 1; uint64_t ded_intena : 1; uint64_t sec_err : 1; uint64_t ded_err : 1; uint64_t fsyn : 6; uint64_t fadr : 9; uint64_t reserved_20_20 : 1; uint64_t fset : 3; uint64_t lckerr : 1; uint64_t lck_intena : 1; uint64_t lckerr2 : 1; uint64_t lck_intena2 : 1; uint64_t reserved_28_63 : 36; #endif } cn52xx; struct cvmx_l2t_err_cn52xx cn52xxp1; struct cvmx_l2t_err_s cn56xx; struct cvmx_l2t_err_s cn56xxp1; struct cvmx_l2t_err_s cn58xx; struct cvmx_l2t_err_s cn58xxp1; } cvmx_l2t_err_t; /** * cvmx_led_blink * * LED_BLINK = LED Blink Rate (in led_clks) * */ typedef union { uint64_t u64; struct cvmx_led_blink_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t rate : 8; /**< LED Blink rate in led_latch clks RATE must be > 0 */ #else uint64_t rate : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_led_blink_s cn38xx; struct cvmx_led_blink_s cn38xxp2; struct cvmx_led_blink_s cn56xx; struct cvmx_led_blink_s cn56xxp1; struct cvmx_led_blink_s cn58xx; struct cvmx_led_blink_s cn58xxp1; } cvmx_led_blink_t; /** * cvmx_led_clk_phase * * LED_CLK_PHASE = LED Clock Phase (in 64 eclks) * * * Notes: * Example: * Given a 2ns eclk, an LED_CLK_PHASE[PHASE] = 1, indicates that each * led_clk phase is 64 eclks, or 128ns. The led_clk period is 2*phase, * or 256ns which is 3.9MHz. The default value of 4, yields an led_clk * period of 64*4*2ns*2 = 1024ns or ~1MHz (977KHz). */ typedef union { uint64_t u64; struct cvmx_led_clk_phase_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t phase : 7; /**< Number of 64 eclks in order to create the led_clk */ #else uint64_t phase : 7; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_led_clk_phase_s cn38xx; struct cvmx_led_clk_phase_s cn38xxp2; struct cvmx_led_clk_phase_s cn56xx; struct cvmx_led_clk_phase_s cn56xxp1; struct cvmx_led_clk_phase_s cn58xx; struct cvmx_led_clk_phase_s cn58xxp1; } cvmx_led_clk_phase_t; /** * cvmx_led_cylon * * LED_CYLON = LED CYLON Effect (should remain undocumented) * */ typedef union { uint64_t u64; struct cvmx_led_cylon_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t rate : 16; /**< LED Cylon Effect when RATE!=0 Changes at RATE*LATCH period */ #else uint64_t rate : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_led_cylon_s cn38xx; struct cvmx_led_cylon_s cn38xxp2; struct cvmx_led_cylon_s cn56xx; struct cvmx_led_cylon_s cn56xxp1; struct cvmx_led_cylon_s cn58xx; struct cvmx_led_cylon_s cn58xxp1; } cvmx_led_cylon_t; /** * cvmx_led_dbg * * LED_DBG = LED Debug Port information * */ typedef union { uint64_t u64; struct cvmx_led_dbg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t dbg_en : 1; /**< Add Debug Port Data to the LED shift chain Debug Data is shifted out LSB to MSB */ #else uint64_t dbg_en : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_led_dbg_s cn38xx; struct cvmx_led_dbg_s cn38xxp2; struct cvmx_led_dbg_s cn56xx; struct cvmx_led_dbg_s cn56xxp1; struct cvmx_led_dbg_s cn58xx; struct cvmx_led_dbg_s cn58xxp1; } cvmx_led_dbg_t; /** * cvmx_led_en * * LED_EN = LED Interface Enable * * * Notes: * The LED interface is comprised of a shift chain with a parallel latch. LED * data is shifted out on each fallingg edge of led_clk and then captured by * led_lat. * * The LED shift chain is comprised of the following... * * 32 - UDD header * 6x8 - per port status * 17 - debug port * 32 - UDD trailer * * for a total of 129 bits. * * UDD header is programmable from 0-32 bits (LED_UDD_CNT0) and will shift out * LSB to MSB (LED_UDD_DAT0[0], LED_UDD_DAT0[1], * ... LED_UDD_DAT0[LED_UDD_CNT0]. * * The per port status is also variable. Systems can control which ports send * data (LED_PRT) as well as the status content (LED_PRT_FMT and * LED_PRT_STATUS*). When multiple ports are enabled, they come out in lowest * port to highest port (prt0, prt1, ...). * * The debug port data can also be added to the LED chain (LED_DBG). When * enabled, the debug data shifts out LSB to MSB. * * The UDD trailer data is identical to the header data, but uses LED_UDD_CNT1 * and LED_UDD_DAT1. */ typedef union { uint64_t u64; struct cvmx_led_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t en : 1; /**< Enable the LED interface shift-chain */ #else uint64_t en : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_led_en_s cn38xx; struct cvmx_led_en_s cn38xxp2; struct cvmx_led_en_s cn56xx; struct cvmx_led_en_s cn56xxp1; struct cvmx_led_en_s cn58xx; struct cvmx_led_en_s cn58xxp1; } cvmx_led_en_t; /** * cvmx_led_polarity * * LED_POLARITY = LED Polarity * */ typedef union { uint64_t u64; struct cvmx_led_polarity_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t polarity : 1; /**< LED active polarity 0 = active HIGH LED 1 = active LOW LED (invert led_dat) */ #else uint64_t polarity : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_led_polarity_s cn38xx; struct cvmx_led_polarity_s cn38xxp2; struct cvmx_led_polarity_s cn56xx; struct cvmx_led_polarity_s cn56xxp1; struct cvmx_led_polarity_s cn58xx; struct cvmx_led_polarity_s cn58xxp1; } cvmx_led_polarity_t; /** * cvmx_led_prt * * LED_PRT = LED Port status information * * * Notes: * Note: * the PRT vector enables information of the 8 RGMII ports connected to * Octane. It does not reflect the actual programmed PHY addresses. */ typedef union { uint64_t u64; struct cvmx_led_prt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t prt_en : 8; /**< Which ports are enabled to display status PRT_EN<3:0> coresponds to RGMII ports 3-0 on int0 PRT_EN<7:4> coresponds to RGMII ports 7-4 on int1 Only applies when interface is in RGMII mode The status format is defined by LED_PRT_FMT */ #else uint64_t prt_en : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_led_prt_s cn38xx; struct cvmx_led_prt_s cn38xxp2; struct cvmx_led_prt_s cn56xx; struct cvmx_led_prt_s cn56xxp1; struct cvmx_led_prt_s cn58xx; struct cvmx_led_prt_s cn58xxp1; } cvmx_led_prt_t; /** * cvmx_led_prt_fmt * * LED_PRT_FMT = LED Port Status Infomation Format * * * Notes: * TX: RGMII TX block is sending packet data or extends on the port * RX: RGMII RX block has received non-idle cycle * * For short transfers, LEDs will remain on for at least one blink cycle */ typedef union { uint64_t u64; struct cvmx_led_prt_fmt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t format : 4; /**< Port Status Information for each enabled port in LED_PRT. The formats are below 0x0: [ LED_PRT_STATUS[0] ] 0x1: [ LED_PRT_STATUS[1:0] ] 0x2: [ LED_PRT_STATUS[3:0] ] 0x3: [ LED_PRT_STATUS[5:0] ] 0x4: [ (RX|TX), LED_PRT_STATUS[0] ] 0x5: [ (RX|TX), LED_PRT_STATUS[1:0] ] 0x6: [ (RX|TX), LED_PRT_STATUS[3:0] ] 0x8: [ Tx, Rx, LED_PRT_STATUS[0] ] 0x9: [ Tx, Rx, LED_PRT_STATUS[1:0] ] 0xa: [ Tx, Rx, LED_PRT_STATUS[3:0] ] */ #else uint64_t format : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_led_prt_fmt_s cn38xx; struct cvmx_led_prt_fmt_s cn38xxp2; struct cvmx_led_prt_fmt_s cn56xx; struct cvmx_led_prt_fmt_s cn56xxp1; struct cvmx_led_prt_fmt_s cn58xx; struct cvmx_led_prt_fmt_s cn58xxp1; } cvmx_led_prt_fmt_t; /** * cvmx_led_prt_status# * * LED_PRT_STATUS = LED Port Status information * */ typedef union { uint64_t u64; struct cvmx_led_prt_statusx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t status : 6; /**< Bits that software can set to be added to the LED shift chain - depending on LED_PRT_FMT LED_PRT_STATUS(3..0) corespond to RGMII ports 3-0 on interface0 LED_PRT_STATUS(7..4) corespond to RGMII ports 7-4 on interface1 Only applies when interface is in RGMII mode */ #else uint64_t status : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_led_prt_statusx_s cn38xx; struct cvmx_led_prt_statusx_s cn38xxp2; struct cvmx_led_prt_statusx_s cn56xx; struct cvmx_led_prt_statusx_s cn56xxp1; struct cvmx_led_prt_statusx_s cn58xx; struct cvmx_led_prt_statusx_s cn58xxp1; } cvmx_led_prt_statusx_t; /** * cvmx_led_udd_cnt# * * LED_UDD_CNT = LED UDD Counts * */ typedef union { uint64_t u64; struct cvmx_led_udd_cntx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t cnt : 6; /**< Number of bits of user-defined data to include in the LED shift chain. Legal values: 0-32. */ #else uint64_t cnt : 6; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_led_udd_cntx_s cn38xx; struct cvmx_led_udd_cntx_s cn38xxp2; struct cvmx_led_udd_cntx_s cn56xx; struct cvmx_led_udd_cntx_s cn56xxp1; struct cvmx_led_udd_cntx_s cn58xx; struct cvmx_led_udd_cntx_s cn58xxp1; } cvmx_led_udd_cntx_t; /** * cvmx_led_udd_dat# * * LED_UDD_DAT = User defined data (header or trailer) * * * Notes: * Bits come out LSB to MSB on the shift chain. If LED_UDD_CNT is set to 4 * then the bits comes out LED_UDD_DAT[0], LED_UDD_DAT[1], LED_UDD_DAT[2], * LED_UDD_DAT[3]. */ typedef union { uint64_t u64; struct cvmx_led_udd_datx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t dat : 32; /**< Header or trailer UDD data to be displayed on the LED shift chain. Number of bits to include is controled by LED_UDD_CNT */ #else uint64_t dat : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_led_udd_datx_s cn38xx; struct cvmx_led_udd_datx_s cn38xxp2; struct cvmx_led_udd_datx_s cn56xx; struct cvmx_led_udd_datx_s cn56xxp1; struct cvmx_led_udd_datx_s cn58xx; struct cvmx_led_udd_datx_s cn58xxp1; } cvmx_led_udd_datx_t; /** * cvmx_led_udd_dat_clr# * * LED_UDD_DAT_CLR = User defined data (header or trailer) * */ typedef union { uint64_t u64; struct cvmx_led_udd_dat_clrx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t clr : 32; /**< Bitwise clear for the Header or trailer UDD data to be displayed on the LED shift chain. */ #else uint64_t clr : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_led_udd_dat_clrx_s cn38xx; struct cvmx_led_udd_dat_clrx_s cn38xxp2; struct cvmx_led_udd_dat_clrx_s cn56xx; struct cvmx_led_udd_dat_clrx_s cn56xxp1; struct cvmx_led_udd_dat_clrx_s cn58xx; struct cvmx_led_udd_dat_clrx_s cn58xxp1; } cvmx_led_udd_dat_clrx_t; /** * cvmx_led_udd_dat_set# * * LED_UDD_DAT_SET = User defined data (header or trailer) * */ typedef union { uint64_t u64; struct cvmx_led_udd_dat_setx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t set : 32; /**< Bitwise set for the Header or trailer UDD data to be displayed on the LED shift chain. */ #else uint64_t set : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_led_udd_dat_setx_s cn38xx; struct cvmx_led_udd_dat_setx_s cn38xxp2; struct cvmx_led_udd_dat_setx_s cn56xx; struct cvmx_led_udd_dat_setx_s cn56xxp1; struct cvmx_led_udd_dat_setx_s cn58xx; struct cvmx_led_udd_dat_setx_s cn58xxp1; } cvmx_led_udd_dat_setx_t; /** * cvmx_lmc#_bist_ctl * * Notes: * This controls BiST only for the memories that operate on DCLK. The normal, chip-wide BiST flow * controls BiST for the memories that operate on ECLK. */ typedef union { uint64_t u64; struct cvmx_lmcx_bist_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t start : 1; /**< A 0->1 transition causes BiST to run. */ #else uint64_t start : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_lmcx_bist_ctl_s cn50xx; struct cvmx_lmcx_bist_ctl_s cn52xx; struct cvmx_lmcx_bist_ctl_s cn52xxp1; struct cvmx_lmcx_bist_ctl_s cn56xx; struct cvmx_lmcx_bist_ctl_s cn56xxp1; } cvmx_lmcx_bist_ctl_t; /** * cvmx_lmc#_bist_result * * Notes: * Access to the internal BiST results * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail). */ typedef union { uint64_t u64; struct cvmx_lmcx_bist_result_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t csrd2e : 1; /**< BiST result of CSRD2E memory (0=pass, !0=fail) */ uint64_t csre2d : 1; /**< BiST result of CSRE2D memory (0=pass, !0=fail) */ uint64_t mwf : 1; /**< BiST result of MWF memories (0=pass, !0=fail) */ uint64_t mwd : 3; /**< BiST result of MWD memories (0=pass, !0=fail) */ uint64_t mwc : 1; /**< BiST result of MWC memories (0=pass, !0=fail) */ uint64_t mrf : 1; /**< BiST result of MRF memories (0=pass, !0=fail) */ uint64_t mrd : 3; /**< BiST result of MRD memories (0=pass, !0=fail) */ #else uint64_t mrd : 3; uint64_t mrf : 1; uint64_t mwc : 1; uint64_t mwd : 3; uint64_t mwf : 1; uint64_t csre2d : 1; uint64_t csrd2e : 1; uint64_t reserved_11_63 : 53; #endif } s; struct cvmx_lmcx_bist_result_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t mwf : 1; /**< BiST result of MWF memories (0=pass, !0=fail) */ uint64_t mwd : 3; /**< BiST result of MWD memories (0=pass, !0=fail) */ uint64_t mwc : 1; /**< BiST result of MWC memories (0=pass, !0=fail) */ uint64_t mrf : 1; /**< BiST result of MRF memories (0=pass, !0=fail) */ uint64_t mrd : 3; /**< BiST result of MRD memories (0=pass, !0=fail) */ #else uint64_t mrd : 3; uint64_t mrf : 1; uint64_t mwc : 1; uint64_t mwd : 3; uint64_t mwf : 1; uint64_t reserved_9_63 : 55; #endif } cn50xx; struct cvmx_lmcx_bist_result_s cn52xx; struct cvmx_lmcx_bist_result_s cn52xxp1; struct cvmx_lmcx_bist_result_s cn56xx; struct cvmx_lmcx_bist_result_s cn56xxp1; } cvmx_lmcx_bist_result_t; /** * cvmx_lmc#_comp_ctl * * LMC_COMP_CTL = LMC Compensation control * */ typedef union { uint64_t u64; struct cvmx_lmcx_comp_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t nctl_csr : 4; /**< Compensation control bits */ uint64_t nctl_clk : 4; /**< Compensation control bits */ uint64_t nctl_cmd : 4; /**< Compensation control bits */ uint64_t nctl_dat : 4; /**< Compensation control bits */ uint64_t pctl_csr : 4; /**< Compensation control bits */ uint64_t pctl_clk : 4; /**< Compensation control bits */ uint64_t reserved_0_7 : 8; #else uint64_t reserved_0_7 : 8; uint64_t pctl_clk : 4; uint64_t pctl_csr : 4; uint64_t nctl_dat : 4; uint64_t nctl_cmd : 4; uint64_t nctl_clk : 4; uint64_t nctl_csr : 4; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_comp_ctl_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t nctl_csr : 4; /**< Compensation control bits */ uint64_t nctl_clk : 4; /**< Compensation control bits */ uint64_t nctl_cmd : 4; /**< Compensation control bits */ uint64_t nctl_dat : 4; /**< Compensation control bits */ uint64_t pctl_csr : 4; /**< Compensation control bits */ uint64_t pctl_clk : 4; /**< Compensation control bits */ uint64_t pctl_cmd : 4; /**< Compensation control bits */ uint64_t pctl_dat : 4; /**< Compensation control bits */ #else uint64_t pctl_dat : 4; uint64_t pctl_cmd : 4; uint64_t pctl_clk : 4; uint64_t pctl_csr : 4; uint64_t nctl_dat : 4; uint64_t nctl_cmd : 4; uint64_t nctl_clk : 4; uint64_t nctl_csr : 4; uint64_t reserved_32_63 : 32; #endif } cn30xx; struct cvmx_lmcx_comp_ctl_cn30xx cn31xx; struct cvmx_lmcx_comp_ctl_cn30xx cn38xx; struct cvmx_lmcx_comp_ctl_cn30xx cn38xxp2; struct cvmx_lmcx_comp_ctl_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t nctl_csr : 4; /**< Compensation control bits */ uint64_t reserved_20_27 : 8; uint64_t nctl_dat : 4; /**< Compensation control bits */ uint64_t pctl_csr : 4; /**< Compensation control bits */ uint64_t reserved_5_11 : 7; uint64_t pctl_dat : 5; /**< Compensation control bits */ #else uint64_t pctl_dat : 5; uint64_t reserved_5_11 : 7; uint64_t pctl_csr : 4; uint64_t nctl_dat : 4; uint64_t reserved_20_27 : 8; uint64_t nctl_csr : 4; uint64_t reserved_32_63 : 32; #endif } cn50xx; struct cvmx_lmcx_comp_ctl_cn50xx cn52xx; struct cvmx_lmcx_comp_ctl_cn50xx cn52xxp1; struct cvmx_lmcx_comp_ctl_cn50xx cn56xx; struct cvmx_lmcx_comp_ctl_cn50xx cn56xxp1; struct cvmx_lmcx_comp_ctl_cn50xx cn58xx; struct cvmx_lmcx_comp_ctl_cn58xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t nctl_csr : 4; /**< Compensation control bits */ uint64_t reserved_20_27 : 8; uint64_t nctl_dat : 4; /**< Compensation control bits */ uint64_t pctl_csr : 4; /**< Compensation control bits */ uint64_t reserved_4_11 : 8; uint64_t pctl_dat : 4; /**< Compensation control bits */ #else uint64_t pctl_dat : 4; uint64_t reserved_4_11 : 8; uint64_t pctl_csr : 4; uint64_t nctl_dat : 4; uint64_t reserved_20_27 : 8; uint64_t nctl_csr : 4; uint64_t reserved_32_63 : 32; #endif } cn58xxp1; } cvmx_lmcx_comp_ctl_t; /** * cvmx_lmc#_ctl * * LMC_CTL = LMC Control * This register is an assortment of various control fields needed by the memory controller */ typedef union { uint64_t u64; struct cvmx_lmcx_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pulldns. */ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pullup. */ uint64_t slow_scf : 1; /**< Should be cleared to zero */ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] else bank[n:0]=address[n+7:7] where n=1 for a 4 bank part and n=2 for an 8 bank part */ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before allowing reads to interrupt. */ uint64_t pll_div2 : 1; /**< PLL Div2. */ uint64_t pll_bypass : 1; /**< PLL Bypass. */ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use of JEDEC Registered DIMMs which require Write data to be registered in the controller. */ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans will slot an additional 1 cycle data bus bubble to avoid DQ/DQS bus contention. This is only a CYA bit, in case the "built-in" DIMM and RANK crossing logic which should auto-detect and perfectly slot read-to-reads to the same DIMM/RANK. */ uint64_t inorder_mwf : 1; /**< Reads as zero */ uint64_t inorder_mrf : 1; /**< Always clear to zero */ uint64_t reserved_10_11 : 2; uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off time for the DDR_DQ/DQS drivers is 1 dclk earlier. This bit should typically be set. */ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for the DDR_DQ/DQS drivers is delayed an additional DCLK cycle. This should be set to one whenever both SILO_HC and SILO_QC are set. */ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional dclks to wait (on top of TCL+1+TSKW) before pulling data out of the pad silos. - 00: illegal - 01: 1 dclks - 10: 2 dclks - 11: illegal This should always be set to 1. */ uint64_t tskw : 2; /**< This component is a representation of total BOARD DELAY on DQ (used in the controller to determine the R->W spacing to avoid DQS/DQ bus conflicts). Enter the largest of the per byte Board delay - 00: 0 dclk - 01: 1 dclks - 10: 2 dclks - 11: 3 dclks */ uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting A non Zero value in this register enables the On Die Termination (ODT) in DDR parts. These two bits are loaded into the RTT portion of the EMRS register bits A6 & A2. If DDR2's termination (for the memory's DQ/DQS/DM pads) is not desired, set it to 00. If it is, chose between 01 for 75 ohm and 10 for 150 ohm termination. 00 = ODT Disabled 01 = 75 ohm Termination 10 = 150 ohm Termination 11 = 50 ohm Termination Octeon, on writes, by default, drives the 4/8 ODT pins (64/128b mode) based on what the masks (LMC_WODT_CTL) are programmed to. LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins for READS. LMC_RODT_CTL needs to be programmed based on the system's needs for ODT. */ uint64_t dic : 2; /**< Drive Strength Control: DIC[0] is loaded into the Extended Mode Register (EMRS) A1 bit during initialization. 0 = Normal 1 = Reduced DIC[1] is used to load into EMRS bit 10 - DQSN Enable/Disable field. By default, we program the DDR's to drive the DQSN also. Set it to 1 if DQSN should be Hi-Z. 0 - DQSN Enable 1 - DQSN Disable */ #else uint64_t dic : 2; uint64_t qs_dic : 2; uint64_t tskw : 2; uint64_t sil_lat : 2; uint64_t bprch : 1; uint64_t fprch2 : 1; uint64_t reserved_10_11 : 2; uint64_t inorder_mrf : 1; uint64_t inorder_mwf : 1; uint64_t r2r_slot : 1; uint64_t rdimm_ena : 1; uint64_t pll_bypass : 1; uint64_t pll_div2 : 1; uint64_t max_write_batch : 4; uint64_t xor_bank : 1; uint64_t slow_scf : 1; uint64_t ddr__pctl : 4; uint64_t ddr__nctl : 4; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_ctl_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pulldns. */ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pullup. */ uint64_t slow_scf : 1; /**< 1=SCF has pass1 latency, 0=SCF has 1 cycle lower latency when compared to pass1 */ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] else bank[n:0]=address[n+7:7] where n=1 for a 4 bank part and n=2 for an 8 bank part */ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before allowing reads to interrupt. */ uint64_t pll_div2 : 1; /**< PLL Div2. */ uint64_t pll_bypass : 1; /**< PLL Bypass. */ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use of JEDEC Registered DIMMs which require Write data to be registered in the controller. */ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans will slot an additional 1 cycle data bus bubble to avoid DQ/DQS bus contention. This is only a CYA bit, in case the "built-in" DIMM and RANK crossing logic which should auto-detect and perfectly slot read-to-reads to the same DIMM/RANK. */ uint64_t inorder_mwf : 1; /**< Reads as zero */ uint64_t inorder_mrf : 1; /**< Always set to zero */ uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the Dclk domain is (DRESET || ECLK_RESET). */ uint64_t mode32b : 1; /**< 32b data Path Mode Set to 1 if we use only 32 DQ pins 0 for 16b DQ mode. */ uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off time for the DDR_DQ/DQS drivers is 1 dclk earlier. This bit should typically be set. */ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for the DDR_DQ/DQS drivers is delayed an additional DCLK cycle. This should be set to one whenever both SILO_HC and SILO_QC are set. */ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional dclks to wait (on top of TCL+1+TSKW) before pulling data out of the pad silos. - 00: illegal - 01: 1 dclks - 10: 2 dclks - 11: illegal This should always be set to 1. */ uint64_t tskw : 2; /**< This component is a representation of total BOARD DELAY on DQ (used in the controller to determine the R->W spacing to avoid DQS/DQ bus conflicts). Enter the largest of the per byte Board delay - 00: 0 dclk - 01: 1 dclks - 10: 2 dclks - 11: 3 dclks */ uint64_t qs_dic : 2; /**< QS Drive Strength Control (DDR1): & DDR2 Termination Resistor Setting When in DDR2, a non Zero value in this register enables the On Die Termination (ODT) in DDR parts. These two bits are loaded into the RTT portion of the EMRS register bits A6 & A2. If DDR2's termination (for the memory's DQ/DQS/DM pads) is not desired, set it to 00. If it is, chose between 01 for 75 ohm and 10 for 150 ohm termination. 00 = ODT Disabled 01 = 75 ohm Termination 10 = 150 ohm Termination 11 = 50 ohm Termination Octeon, on writes, by default, drives the 8 ODT pins based on what the masks (LMC_WODT_CTL1 & 2) are programmed to. LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins for READS. LMC_RODT_CTL needs to be programmed based on the system's needs for ODT. */ uint64_t dic : 2; /**< Drive Strength Control: For DDR-I/II Mode, DIC[0] is loaded into the Extended Mode Register (EMRS) A1 bit during initialization. (see DDR-I data sheet EMRS description) 0 = Normal 1 = Reduced For DDR-II Mode, DIC[1] is used to load into EMRS bit 10 - DQSN Enable/Disable field. By default, we program the DDR's to drive the DQSN also. Set it to 1 if DQSN should be Hi-Z. 0 - DQSN Enable 1 - DQSN Disable */ #else uint64_t dic : 2; uint64_t qs_dic : 2; uint64_t tskw : 2; uint64_t sil_lat : 2; uint64_t bprch : 1; uint64_t fprch2 : 1; uint64_t mode32b : 1; uint64_t dreset : 1; uint64_t inorder_mrf : 1; uint64_t inorder_mwf : 1; uint64_t r2r_slot : 1; uint64_t rdimm_ena : 1; uint64_t pll_bypass : 1; uint64_t pll_div2 : 1; uint64_t max_write_batch : 4; uint64_t xor_bank : 1; uint64_t slow_scf : 1; uint64_t ddr__pctl : 4; uint64_t ddr__nctl : 4; uint64_t reserved_32_63 : 32; #endif } cn30xx; struct cvmx_lmcx_ctl_cn30xx cn31xx; struct cvmx_lmcx_ctl_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pulldns. */ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pullup. */ uint64_t slow_scf : 1; /**< 1=SCF has pass1 latency, 0=SCF has 1 cycle lower latency when compared to pass1 NOTE - This bit has NO effect in PASS1 */ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] else bank[n:0]=address[n+7:7] where n=1 for a 4 bank part and n=2 for an 8 bank part */ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before allowing reads to interrupt. */ uint64_t reserved_16_17 : 2; uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use of JEDEC Registered DIMMs which require Write data to be registered in the controller. */ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans will slot an additional 1 cycle data bus bubble to avoid DQ/DQS bus contention. This is only a CYA bit, in case the "built-in" DIMM and RANK crossing logic which should auto-detect and perfectly slot read-to-reads to the same DIMM/RANK. */ uint64_t inorder_mwf : 1; /**< When set, forces LMC_MWF (writes) into strict, in-order mode. When clear, writes may be serviced out of order (optimized to keep multiple banks active). This bit is ONLY to be set at power-on and should not be set for normal use. NOTE: For PASS1, set as follows: DDR-I -> 1 DDR-II -> 0 For Pass2, this bit is RA0, write ignore (this feature is permanently disabled) */ uint64_t inorder_mrf : 1; /**< When set, forces LMC_MRF (reads) into strict, in-order mode. When clear, reads may be serviced out of order (optimized to keep multiple banks active). This bit is ONLY to be set at power-on and should not be set for normal use. NOTE: For PASS1, set as follows: DDR-I -> 1 DDR-II -> 0 For Pass2, this bit should be written ZERO for DDR I & II */ uint64_t set_zero : 1; /**< Reserved. Always Set this Bit to Zero */ uint64_t mode128b : 1; /**< 128b data Path Mode Set to 1 if we use all 128 DQ pins 0 for 64b DQ mode. */ uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off time for the DDR_DQ/DQS drivers is 1 dclk earlier. This bit should typically be set. */ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for the DDR_DQ/DQS drivers is delayed an additional DCLK cycle. This should be set to one whenever both SILO_HC and SILO_QC are set. */ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional dclks to wait (on top of TCL+1+TSKW) before pulling data out of the pad silos. - 00: illegal - 01: 1 dclks - 10: 2 dclks - 11: illegal This should always be set to 1. */ uint64_t tskw : 2; /**< This component is a representation of total BOARD DELAY on DQ (used in the controller to determine the R->W spacing to avoid DQS/DQ bus conflicts). Enter the largest of the per byte Board delay - 00: 0 dclk - 01: 1 dclks - 10: 2 dclks - 11: 3 dclks */ uint64_t qs_dic : 2; /**< QS Drive Strength Control (DDR1): & DDR2 Termination Resistor Setting When in DDR2, a non Zero value in this register enables the On Die Termination (ODT) in DDR parts. These two bits are loaded into the RTT portion of the EMRS register bits A6 & A2. If DDR2's termination (for the memory's DQ/DQS/DM pads) is not desired, set it to 00. If it is, chose between 01 for 75 ohm and 10 for 150 ohm termination. 00 = ODT Disabled 01 = 75 ohm Termination 10 = 150 ohm Termination 11 = 50 ohm Termination Octeon, on writes, by default, drives the 4/8 ODT pins (64/128b mode) based on what the masks (LMC_WODT_CTL) are programmed to. LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins for READS. LMC_RODT_CTL needs to be programmed based on the system's needs for ODT. */ uint64_t dic : 2; /**< Drive Strength Control: For DDR-I/II Mode, DIC[0] is loaded into the Extended Mode Register (EMRS) A1 bit during initialization. (see DDR-I data sheet EMRS description) 0 = Normal 1 = Reduced For DDR-II Mode, DIC[1] is used to load into EMRS bit 10 - DQSN Enable/Disable field. By default, we program the DDR's to drive the DQSN also. Set it to 1 if DQSN should be Hi-Z. 0 - DQSN Enable 1 - DQSN Disable */ #else uint64_t dic : 2; uint64_t qs_dic : 2; uint64_t tskw : 2; uint64_t sil_lat : 2; uint64_t bprch : 1; uint64_t fprch2 : 1; uint64_t mode128b : 1; uint64_t set_zero : 1; uint64_t inorder_mrf : 1; uint64_t inorder_mwf : 1; uint64_t r2r_slot : 1; uint64_t rdimm_ena : 1; uint64_t reserved_16_17 : 2; uint64_t max_write_batch : 4; uint64_t xor_bank : 1; uint64_t slow_scf : 1; uint64_t ddr__pctl : 4; uint64_t ddr__nctl : 4; uint64_t reserved_32_63 : 32; #endif } cn38xx; struct cvmx_lmcx_ctl_cn38xx cn38xxp2; struct cvmx_lmcx_ctl_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pulldns. */ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pullup. */ uint64_t slow_scf : 1; /**< Should be cleared to zero */ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] else bank[n:0]=address[n+7:7] where n=1 for a 4 bank part and n=2 for an 8 bank part */ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before allowing reads to interrupt. */ uint64_t reserved_17_17 : 1; uint64_t pll_bypass : 1; /**< PLL Bypass. */ uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use of JEDEC Registered DIMMs which require Write data to be registered in the controller. */ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans will slot an additional 1 cycle data bus bubble to avoid DQ/DQS bus contention. This is only a CYA bit, in case the "built-in" DIMM and RANK crossing logic which should auto-detect and perfectly slot read-to-reads to the same DIMM/RANK. */ uint64_t inorder_mwf : 1; /**< Reads as zero */ uint64_t inorder_mrf : 1; /**< Always clear to zero */ uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the Dclk domain is (DRESET || ECLK_RESET). */ uint64_t mode32b : 1; /**< 32b data Path Mode Set to 1 if we use 32 DQ pins 0 for 16b DQ mode. */ uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off time for the DDR_DQ/DQS drivers is 1 dclk earlier. This bit should typically be set. */ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for the DDR_DQ/DQS drivers is delayed an additional DCLK cycle. This should be set to one whenever both SILO_HC and SILO_QC are set. */ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional dclks to wait (on top of TCL+1+TSKW) before pulling data out of the pad silos. - 00: illegal - 01: 1 dclks - 10: 2 dclks - 11: illegal This should always be set to 1. */ uint64_t tskw : 2; /**< This component is a representation of total BOARD DELAY on DQ (used in the controller to determine the R->W spacing to avoid DQS/DQ bus conflicts). Enter the largest of the per byte Board delay - 00: 0 dclk - 01: 1 dclks - 10: 2 dclks - 11: 3 dclks */ uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting When in DDR2, a non Zero value in this register enables the On Die Termination (ODT) in DDR parts. These two bits are loaded into the RTT portion of the EMRS register bits A6 & A2. If DDR2's termination (for the memory's DQ/DQS/DM pads) is not desired, set it to 00. If it is, chose between 01 for 75 ohm and 10 for 150 ohm termination. 00 = ODT Disabled 01 = 75 ohm Termination 10 = 150 ohm Termination 11 = 50 ohm Termination Octeon, on writes, by default, drives the ODT pins based on what the masks (LMC_WODT_CTL) are programmed to. LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins for READS. LMC_RODT_CTL needs to be programmed based on the system's needs for ODT. */ uint64_t dic : 2; /**< Drive Strength Control: DIC[0] is loaded into the Extended Mode Register (EMRS) A1 bit during initialization. 0 = Normal 1 = Reduced DIC[1] is used to load into EMRS bit 10 - DQSN Enable/Disable field. By default, we program the DDR's to drive the DQSN also. Set it to 1 if DQSN should be Hi-Z. 0 - DQSN Enable 1 - DQSN Disable */ #else uint64_t dic : 2; uint64_t qs_dic : 2; uint64_t tskw : 2; uint64_t sil_lat : 2; uint64_t bprch : 1; uint64_t fprch2 : 1; uint64_t mode32b : 1; uint64_t dreset : 1; uint64_t inorder_mrf : 1; uint64_t inorder_mwf : 1; uint64_t r2r_slot : 1; uint64_t rdimm_ena : 1; uint64_t pll_bypass : 1; uint64_t reserved_17_17 : 1; uint64_t max_write_batch : 4; uint64_t xor_bank : 1; uint64_t slow_scf : 1; uint64_t ddr__pctl : 4; uint64_t ddr__nctl : 4; uint64_t reserved_32_63 : 32; #endif } cn50xx; struct cvmx_lmcx_ctl_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pulldns. */ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pullup. */ uint64_t slow_scf : 1; /**< Always clear to zero */ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] else bank[n:0]=address[n+7:7] where n=1 for a 4 bank part and n=2 for an 8 bank part */ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before allowing reads to interrupt. */ uint64_t reserved_16_17 : 2; uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use of JEDEC Registered DIMMs which require Write data to be registered in the controller. */ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans will slot an additional 1 cycle data bus bubble to avoid DQ/DQS bus contention. This is only a CYA bit, in case the "built-in" DIMM and RANK crossing logic which should auto-detect and perfectly slot read-to-reads to the same DIMM/RANK. */ uint64_t inorder_mwf : 1; /**< Reads as zero */ uint64_t inorder_mrf : 1; /**< Always set to zero */ uint64_t dreset : 1; /**< MBZ THIS IS OBSOLETE. Use LMC_DLL_CTL[DRESET] instead. */ uint64_t mode32b : 1; /**< 32b data Path Mode Set to 1 if we use only 32 DQ pins 0 for 64b DQ mode. */ uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off time for the DDR_DQ/DQS drivers is 1 dclk earlier. This bit should typically be set. */ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for the DDR_DQ/DQS drivers is delayed an additional DCLK cycle. This should be set to one whenever both SILO_HC and SILO_QC are set. */ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional dclks to wait (on top of TCL+1+TSKW) before pulling data out of the pad silos. - 00: illegal - 01: 1 dclks - 10: 2 dclks - 11: illegal This should always be set to 1. THIS IS OBSOLETE. Use READ_LEVEL_RANK instead. */ uint64_t tskw : 2; /**< This component is a representation of total BOARD DELAY on DQ (used in the controller to determine the R->W spacing to avoid DQS/DQ bus conflicts). Enter the largest of the per byte Board delay - 00: 0 dclk - 01: 1 dclks - 10: 2 dclks - 11: 3 dclks THIS IS OBSOLETE. Use READ_LEVEL_RANK instead. */ uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting When in DDR2, a non Zero value in this register enables the On Die Termination (ODT) in DDR parts. These two bits are loaded into the RTT portion of the EMRS register bits A6 & A2. If DDR2's termination (for the memory's DQ/DQS/DM pads) is not desired, set it to 00. If it is, chose between 01 for 75 ohm and 10 for 150 ohm termination. 00 = ODT Disabled 01 = 75 ohm Termination 10 = 150 ohm Termination 11 = 50 ohm Termination Octeon, on writes, by default, drives the 4/8 ODT pins (64/128b mode) based on what the masks (LMC_WODT_CTL0 & 1) are programmed to. LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins for READS. LMC_RODT_CTL needs to be programmed based on the system's needs for ODT. */ uint64_t dic : 2; /**< Drive Strength Control: DIC[0] is loaded into the Extended Mode Register (EMRS) A1 bit during initialization. 0 = Normal 1 = Reduced DIC[1] is used to load into EMRS bit 10 - DQSN Enable/Disable field. By default, we program the DDR's to drive the DQSN also. Set it to 1 if DQSN should be Hi-Z. 0 - DQSN Enable 1 - DQSN Disable */ #else uint64_t dic : 2; uint64_t qs_dic : 2; uint64_t tskw : 2; uint64_t sil_lat : 2; uint64_t bprch : 1; uint64_t fprch2 : 1; uint64_t mode32b : 1; uint64_t dreset : 1; uint64_t inorder_mrf : 1; uint64_t inorder_mwf : 1; uint64_t r2r_slot : 1; uint64_t rdimm_ena : 1; uint64_t reserved_16_17 : 2; uint64_t max_write_batch : 4; uint64_t xor_bank : 1; uint64_t slow_scf : 1; uint64_t ddr__pctl : 4; uint64_t ddr__nctl : 4; uint64_t reserved_32_63 : 32; #endif } cn52xx; struct cvmx_lmcx_ctl_cn52xx cn52xxp1; struct cvmx_lmcx_ctl_cn52xx cn56xx; struct cvmx_lmcx_ctl_cn52xx cn56xxp1; struct cvmx_lmcx_ctl_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ddr__nctl : 4; /**< DDR nctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pulldns. */ uint64_t ddr__pctl : 4; /**< DDR pctl from compensation circuit The encoded value on this will adjust the drive strength of the DDR DQ pullup. */ uint64_t slow_scf : 1; /**< Should be cleared to zero */ uint64_t xor_bank : 1; /**< If (XOR_BANK == 1), then bank[n:0]=address[n+7:7] ^ address[n+7+5:7+5] else bank[n:0]=address[n+7:7] where n=1 for a 4 bank part and n=2 for an 8 bank part */ uint64_t max_write_batch : 4; /**< Maximum number of consecutive writes to service before allowing reads to interrupt. */ uint64_t reserved_16_17 : 2; uint64_t rdimm_ena : 1; /**< Registered DIMM Enable - When set allows the use of JEDEC Registered DIMMs which require Write data to be registered in the controller. */ uint64_t r2r_slot : 1; /**< R2R Slot Enable: When set, all read-to-read trans will slot an additional 1 cycle data bus bubble to avoid DQ/DQS bus contention. This is only a CYA bit, in case the "built-in" DIMM and RANK crossing logic which should auto-detect and perfectly slot read-to-reads to the same DIMM/RANK. */ uint64_t inorder_mwf : 1; /**< Reads as zero */ uint64_t inorder_mrf : 1; /**< Always clear to zero */ uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the Dclk domain is (DRESET || ECLK_RESET). */ uint64_t mode128b : 1; /**< 128b data Path Mode Set to 1 if we use all 128 DQ pins 0 for 64b DQ mode. */ uint64_t fprch2 : 1; /**< Front Porch Enable: When set, the turn-off time for the DDR_DQ/DQS drivers is 1 dclk earlier. This bit should typically be set. */ uint64_t bprch : 1; /**< Back Porch Enable: When set, the turn-on time for the DDR_DQ/DQS drivers is delayed an additional DCLK cycle. This should be set to one whenever both SILO_HC and SILO_QC are set. */ uint64_t sil_lat : 2; /**< SILO Latency: On reads, determines how many additional dclks to wait (on top of TCL+1+TSKW) before pulling data out of the pad silos. - 00: illegal - 01: 1 dclks - 10: 2 dclks - 11: illegal This should always be set to 1. */ uint64_t tskw : 2; /**< This component is a representation of total BOARD DELAY on DQ (used in the controller to determine the R->W spacing to avoid DQS/DQ bus conflicts). Enter the largest of the per byte Board delay - 00: 0 dclk - 01: 1 dclks - 10: 2 dclks - 11: 3 dclks */ uint64_t qs_dic : 2; /**< DDR2 Termination Resistor Setting A non Zero value in this register enables the On Die Termination (ODT) in DDR parts. These two bits are loaded into the RTT portion of the EMRS register bits A6 & A2. If DDR2's termination (for the memory's DQ/DQS/DM pads) is not desired, set it to 00. If it is, chose between 01 for 75 ohm and 10 for 150 ohm termination. 00 = ODT Disabled 01 = 75 ohm Termination 10 = 150 ohm Termination 11 = 50 ohm Termination Octeon, on writes, by default, drives the 4/8 ODT pins (64/128b mode) based on what the masks (LMC_WODT_CTL) are programmed to. LMC_DDR2_CTL->ODT_ENA enables Octeon to drive ODT pins for READS. LMC_RODT_CTL needs to be programmed based on the system's needs for ODT. */ uint64_t dic : 2; /**< Drive Strength Control: DIC[0] is loaded into the Extended Mode Register (EMRS) A1 bit during initialization. 0 = Normal 1 = Reduced DIC[1] is used to load into EMRS bit 10 - DQSN Enable/Disable field. By default, we program the DDR's to drive the DQSN also. Set it to 1 if DQSN should be Hi-Z. 0 - DQSN Enable 1 - DQSN Disable */ #else uint64_t dic : 2; uint64_t qs_dic : 2; uint64_t tskw : 2; uint64_t sil_lat : 2; uint64_t bprch : 1; uint64_t fprch2 : 1; uint64_t mode128b : 1; uint64_t dreset : 1; uint64_t inorder_mrf : 1; uint64_t inorder_mwf : 1; uint64_t r2r_slot : 1; uint64_t rdimm_ena : 1; uint64_t reserved_16_17 : 2; uint64_t max_write_batch : 4; uint64_t xor_bank : 1; uint64_t slow_scf : 1; uint64_t ddr__pctl : 4; uint64_t ddr__nctl : 4; uint64_t reserved_32_63 : 32; #endif } cn58xx; struct cvmx_lmcx_ctl_cn58xx cn58xxp1; } cvmx_lmcx_ctl_t; /** * cvmx_lmc#_ctl1 * * LMC_CTL1 = LMC Control1 * This register is an assortment of various control fields needed by the memory controller */ typedef union { uint64_t u64; struct cvmx_lmcx_ctl1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_21_63 : 43; uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation 0=disabled, 1=enabled */ uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after having waited for 2^FORCEWRITE cycles. 0=disabled. */ uint64_t idlepower : 3; /**< Enter power-down mode after the memory controller has been idle for 2^(2+IDLEPOWER) cycles. 0=disabled. */ uint64_t sequence : 3; /**< Instruction sequence that is run after a 0->1 transition on LMC_MEM_CFG0[INIT_START]. 0=DDR2 power-up/init, 1=read-leveling 2=self-refresh entry, 3=self-refresh exit, 4=power-down entry, 5=power-down exit, 6=7=illegal */ uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */ uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable. 0=disable, 1=enable If the memory part does not support DCC, then this bit must be set to 0. */ uint64_t reserved_2_7 : 6; uint64_t data_layout : 2; /**< Logical data layout per DQ byte lane: In 32b mode, this setting has no effect and the data layout DQ[35:0] is the following: [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]] In 16b mode, the DQ[35:0] layouts are the following: 0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]] 1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]] 2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]] where E means ecc, D means data, and 0 means unused (ignored on reads and written as 0 on writes) */ #else uint64_t data_layout : 2; uint64_t reserved_2_7 : 6; uint64_t dcc_enable : 1; uint64_t sil_mode : 1; uint64_t sequence : 3; uint64_t idlepower : 3; uint64_t forcewrite : 4; uint64_t ecc_adr : 1; uint64_t reserved_21_63 : 43; #endif } s; struct cvmx_lmcx_ctl1_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t data_layout : 2; /**< Logical data layout per DQ byte lane: In 32b mode, this setting has no effect and the data layout DQ[35:0] is the following: [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]] In 16b mode, the DQ[35:0] layouts are the following: 0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]] 1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]] 2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]] where E means ecc, D means data, and 0 means unused (ignored on reads and written as 0 on writes) */ #else uint64_t data_layout : 2; uint64_t reserved_2_63 : 62; #endif } cn30xx; struct cvmx_lmcx_ctl1_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */ uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable. 0=disable, 1=enable If the memory part does not support DCC, then this bit must be set to 0. */ uint64_t reserved_2_7 : 6; uint64_t data_layout : 2; /**< Logical data layout per DQ byte lane: In 32b mode, this setting has no effect and the data layout DQ[35:0] is the following: [E[3:0], D[31:24], D[23:16], D[15:8], D[7:0]] In 16b mode, the DQ[35:0] layouts are the following: 0 - [0[3:0], 0[7:0], [0[7:2], E[1:0]], D[15:8], D[7:0]] 1 - [0[3:0], [0[7:2], E[1:0]], D[15:8], D[7:0], 0[7:0]] 2 - [[0[1:0], E[1:0]], D[15:8], D[7:0], 0[7:0], 0[7:0]] where E means ecc, D means data, and 0 means unused (ignored on reads and written as 0 on writes) */ #else uint64_t data_layout : 2; uint64_t reserved_2_7 : 6; uint64_t dcc_enable : 1; uint64_t sil_mode : 1; uint64_t reserved_10_63 : 54; #endif } cn50xx; struct cvmx_lmcx_ctl1_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_21_63 : 43; uint64_t ecc_adr : 1; /**< Include memory reference address in the ECC calculation 0=disabled, 1=enabled */ uint64_t forcewrite : 4; /**< Force the oldest outstanding write to complete after having waited for 2^FORCEWRITE cycles. 0=disabled. */ uint64_t idlepower : 3; /**< Enter power-down mode after the memory controller has been idle for 2^(2+IDLEPOWER) cycles. 0=disabled. */ uint64_t sequence : 3; /**< Instruction sequence that is run after a 0->1 transition on LMC_MEM_CFG0[INIT_START]. 0=DDR2 power-up/init, 1=read-leveling 2=self-refresh entry, 3=self-refresh exit, 4=power-down entry, 5=power-down exit, 6=7=illegal */ uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */ uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable. 0=disable, 1=enable If the memory part does not support DCC, then this bit must be set to 0. */ uint64_t reserved_0_7 : 8; #else uint64_t reserved_0_7 : 8; uint64_t dcc_enable : 1; uint64_t sil_mode : 1; uint64_t sequence : 3; uint64_t idlepower : 3; uint64_t forcewrite : 4; uint64_t ecc_adr : 1; uint64_t reserved_21_63 : 43; #endif } cn52xx; struct cvmx_lmcx_ctl1_cn52xx cn52xxp1; struct cvmx_lmcx_ctl1_cn52xx cn56xx; struct cvmx_lmcx_ctl1_cn52xx cn56xxp1; struct cvmx_lmcx_ctl1_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t sil_mode : 1; /**< Read Silo mode. 0=envelope, 1=self-timed. */ uint64_t dcc_enable : 1; /**< Duty Cycle Corrector Enable. 0=disable, 1=enable If the memory part does not support DCC, then this bit must be set to 0. */ uint64_t reserved_0_7 : 8; #else uint64_t reserved_0_7 : 8; uint64_t dcc_enable : 1; uint64_t sil_mode : 1; uint64_t reserved_10_63 : 54; #endif } cn58xx; struct cvmx_lmcx_ctl1_cn58xx cn58xxp1; } cvmx_lmcx_ctl1_t; /** * cvmx_lmc#_dclk_cnt_hi * * LMC_DCLK_CNT_HI = Performance Counters * */ typedef union { uint64_t u64; struct cvmx_lmcx_dclk_cnt_hi_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t dclkcnt_hi : 32; /**< Performance Counter that counts dclks Upper 32-bits of a 64-bit counter. */ #else uint64_t dclkcnt_hi : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_dclk_cnt_hi_s cn30xx; struct cvmx_lmcx_dclk_cnt_hi_s cn31xx; struct cvmx_lmcx_dclk_cnt_hi_s cn38xx; struct cvmx_lmcx_dclk_cnt_hi_s cn38xxp2; struct cvmx_lmcx_dclk_cnt_hi_s cn50xx; struct cvmx_lmcx_dclk_cnt_hi_s cn52xx; struct cvmx_lmcx_dclk_cnt_hi_s cn52xxp1; struct cvmx_lmcx_dclk_cnt_hi_s cn56xx; struct cvmx_lmcx_dclk_cnt_hi_s cn56xxp1; struct cvmx_lmcx_dclk_cnt_hi_s cn58xx; struct cvmx_lmcx_dclk_cnt_hi_s cn58xxp1; } cvmx_lmcx_dclk_cnt_hi_t; /** * cvmx_lmc#_dclk_cnt_lo * * LMC_DCLK_CNT_LO = Performance Counters * */ typedef union { uint64_t u64; struct cvmx_lmcx_dclk_cnt_lo_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t dclkcnt_lo : 32; /**< Performance Counter that counts dclks Lower 32-bits of a 64-bit counter. */ #else uint64_t dclkcnt_lo : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_dclk_cnt_lo_s cn30xx; struct cvmx_lmcx_dclk_cnt_lo_s cn31xx; struct cvmx_lmcx_dclk_cnt_lo_s cn38xx; struct cvmx_lmcx_dclk_cnt_lo_s cn38xxp2; struct cvmx_lmcx_dclk_cnt_lo_s cn50xx; struct cvmx_lmcx_dclk_cnt_lo_s cn52xx; struct cvmx_lmcx_dclk_cnt_lo_s cn52xxp1; struct cvmx_lmcx_dclk_cnt_lo_s cn56xx; struct cvmx_lmcx_dclk_cnt_lo_s cn56xxp1; struct cvmx_lmcx_dclk_cnt_lo_s cn58xx; struct cvmx_lmcx_dclk_cnt_lo_s cn58xxp1; } cvmx_lmcx_dclk_cnt_lo_t; /** * cvmx_lmc#_dclk_ctl * * LMC_DCLK_CTL = LMC DCLK generation control * * * Notes: * This CSR is only relevant for LMC1. LMC0_DCLK_CTL is not used. * */ typedef union { uint64_t u64; struct cvmx_lmcx_dclk_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t off90_ena : 1; /**< 0=use global DCLK (i.e. the PLL) directly for LMC1 1=use the 90 degree DCLK DLL to offset LMC1 DCLK */ uint64_t dclk90_byp : 1; /**< 0=90 degree DCLK DLL uses sampled delay from LMC0 1=90 degree DCLK DLL uses DCLK90_VLU See DCLK90_VLU. */ uint64_t dclk90_ld : 1; /**< The 90 degree DCLK DLL samples the delay setting from LMC0's DLL when this field transitions 0->1 */ uint64_t dclk90_vlu : 5; /**< Manual open-loop delay setting. The LMC1 90 degree DCLK DLL uses DCLK90_VLU rather than the delay setting sampled from LMC0 when DCLK90_BYP=1. */ #else uint64_t dclk90_vlu : 5; uint64_t dclk90_ld : 1; uint64_t dclk90_byp : 1; uint64_t off90_ena : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_lmcx_dclk_ctl_s cn56xx; struct cvmx_lmcx_dclk_ctl_s cn56xxp1; } cvmx_lmcx_dclk_ctl_t; /** * cvmx_lmc#_ddr2_ctl * * LMC_DDR2_CTL = LMC DDR2 & DLL Control Register * */ typedef union { uint64_t u64; struct cvmx_lmcx_ddr2_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t bank8 : 1; /**< For 8 bank DDR2 parts 1 - DDR2 parts have 8 internal banks (BA is 3 bits wide). 0 - DDR2 parts have 4 internal banks (BA is 2 bits wide). */ uint64_t burst8 : 1; /**< 8-burst mode. 1 - DDR data transfer happens in burst of 8 0 - DDR data transfer happens in burst of 4 BURST8 should be set when DDR2T is set to minimize the command bandwidth loss. */ uint64_t addlat : 3; /**< Additional Latency for posted CAS When Posted CAS is on, this configures the additional latency. This should be set to 1 .. LMC_MEM_CFG1[TRCD]-2 (Note the implication that posted CAS should not be used when tRCD is two.) */ uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR2. */ uint64_t bwcnt : 1; /**< Bus utilization counter Clear. Clears the LMC_OPS_CNT_*, LMC_IFB_CNT_*, and LMC_DCLK_CNT_* registers. SW should first write this field to a one, then write this field to a zero to clear the CSR's. */ uint64_t twr : 3; /**< DDR Write Recovery time (tWR). Last Wr Brst to Pre delay This is not a direct encoding of the value. Its programmed as below per DDR2 spec. The decimal number on the right is RNDUP(tWR(ns) / tCYC(ns)) TYP=15ns - 000: RESERVED - 001: 2 - 010: 3 - 011: 4 - 100: 5 - 101: 6 - 110: 7 - 111: 8 */ uint64_t silo_hc : 1; /**< Delays the read sample window by a Half Cycle. */ uint64_t ddr_eof : 4; /**< Early Fill Counter Init. L2 needs to know a few cycle before a fill completes so it can get its Control pipe started (for better overall performance). This counter contains an init value which is a function of Eclk/Dclk ratio to account for the asynchronous boundary between L2 cache and the DRAM controller. This init value will determine when to safely let the L2 know that a fill termination is coming up. Set DDR_EOF according to the following rule: eclkFreq/dclkFreq = dclkPeriod/eclkPeriod = RATIO RATIO < 6/6 -> illegal 6/6 <= RATIO < 6/5 -> DDR_EOF=3 6/5 <= RATIO < 6/4 -> DDR_EOF=3 6/4 <= RATIO < 6/3 -> DDR_EOF=2 6/3 <= RATIO < 6/2 -> DDR_EOF=1 6/2 <= RATIO < 6/1 -> DDR_EOF=0 6/1 <= RATIO -> DDR_EOF=0 */ uint64_t tfaw : 5; /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1 Four Access Window time. Relevant only in DDR2 AND in 8-bank parts. tFAW = 5'b0 in DDR2-4bank tFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1 in DDR2-8bank */ uint64_t crip_mode : 1; /**< Cripple Mode - When set, the LMC allows only 1 inflight transaction (.vs. 8 in normal mode). This bit is ONLY to be set at power-on and should not be set for normal use. */ uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 cycle window for CMD and address. This mode helps relieve setup time pressure on the Address and command bus which nominally have a very large fanout. Please refer to Micron's tech note tn_47_01 titled "DDR2-533 Memory Design Guide for Two Dimm Unbuffered Systems" for physical details. BURST8 should be set when DDR2T is set to minimize add/cmd loss. */ uint64_t odt_ena : 1; /**< Enable Obsolete ODT on Reads Obsolete Read ODT wiggles DDR_ODT_* pins on reads. Should normally be cleared to zero. When this is on, the following fields must also be programmed: LMC_CTL->QS_DIC - programs the termination value LMC_RODT_CTL - programs the ODT I/O mask for Reads */ uint64_t qdll_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after DCLK init sequence will reset the DDR 90 DLL. Should happen at startup before any activity in DDR. DRESET should be asserted before and for 10 usec following the 0->1 transition on QDLL_ENA. */ uint64_t dll90_vlu : 5; /**< Contains the open loop setting value for the DDR90 delay line. */ uint64_t dll90_byp : 1; /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be bypassed and the setting is defined by DLL90_VLU */ uint64_t rdqs : 1; /**< DDR2 RDQS mode. When set, configures memory subsystem to use unidirectional DQS pins. RDQS/DM - Rcv & DQS - Xmit */ uint64_t ddr2 : 1; /**< Should be set */ #else uint64_t ddr2 : 1; uint64_t rdqs : 1; uint64_t dll90_byp : 1; uint64_t dll90_vlu : 5; uint64_t qdll_ena : 1; uint64_t odt_ena : 1; uint64_t ddr2t : 1; uint64_t crip_mode : 1; uint64_t tfaw : 5; uint64_t ddr_eof : 4; uint64_t silo_hc : 1; uint64_t twr : 3; uint64_t bwcnt : 1; uint64_t pocas : 1; uint64_t addlat : 3; uint64_t burst8 : 1; uint64_t bank8 : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_ddr2_ctl_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t bank8 : 1; /**< For 8 bank DDR2 parts 1 - DDR2 parts have 8 internal banks (BA is 3 bits wide). 0 - DDR2 parts have 4 internal banks (BA is 2 bits wide). */ uint64_t burst8 : 1; /**< 8-burst mode. 1 - DDR data transfer happens in burst of 8 0 - DDR data transfer happens in burst of 4 BURST8 should be set when DDR2T is set to minimize add/cmd bandwidth loss. */ uint64_t addlat : 3; /**< Additional Latency for posted CAS When Posted CAS is on, this configures the additional latency. This should be set to 1 .. LMC_MEM_CFG1[TRCD]-2 (Note the implication that posted CAS should not be used when tRCD is two.) */ uint64_t pocas : 1; /**< Enable the Posted CAS feature of DDR2. */ uint64_t bwcnt : 1; /**< Bus utilization counter Clear. Clears the LMC_OPS_CNT_*, LMC_IFB_CNT_*, and LMC_DCLK_CNT_* registers. SW should first write this field to a one, then write this field to a zero to clear the CSR's. */ uint64_t twr : 3; /**< DDR Write Recovery time (tWR). Last Wr Brst to Pre delay This is not a direct encoding of the value. Its programmed as below per DDR2 spec. The decimal number on the right is RNDUP(tWR(ns) / tCYC(ns)) TYP=15ns - 000: RESERVED - 001: 2 - 010: 3 - 011: 4 - 100: 5 - 101: 6 - 110-111: RESERVED */ uint64_t silo_hc : 1; /**< Delays the read sample window by a Half Cycle. */ uint64_t ddr_eof : 4; /**< Early Fill Counter Init. L2 needs to know a few cycle before a fill completes so it can get its Control pipe started (for better overall performance). This counter contains an init value which is a function of Eclk/Dclk ratio to account for the asynchronous boundary between L2 cache and the DRAM controller. This init value will determine when to safely let the L2 know that a fill termination is coming up. DDR_EOF = RNDUP (DCLK period/Eclk Period). If the ratio is above 3, set DDR_EOF to 3. DCLK/ECLK period DDR_EOF Less than 1 1 Less than 2 2 More than 2 3 */ uint64_t tfaw : 5; /**< tFAW - Cycles = RNDUP[tFAW(ns)/tcyc(ns)] - 1 Four Access Window time. Relevant only in 8-bank parts. TFAW = 5'b0 for DDR2-4bank TFAW = RNDUP[tFAW(ns)/tcyc(ns)] - 1 in DDR2-8bank */ uint64_t crip_mode : 1; /**< Cripple Mode - When set, the LMC allows only 1 inflight transaction (.vs. 8 in normal mode). This bit is ONLY to be set at power-on and should not be set for normal use. */ uint64_t ddr2t : 1; /**< Turn on the DDR 2T mode. 2 cycle window for CMD and address. This mode helps relieve setup time pressure on the Address and command bus which nominally have a very large fanout. Please refer to Micron's tech note tn_47_01 titled "DDR2-533 Memory Design Guide for Two Dimm Unbuffered Systems" for physical details. BURST8 should be used when DDR2T is set to minimize add/cmd bandwidth loss. */ uint64_t odt_ena : 1; /**< Enable ODT for DDR2 on Reads When this is on, the following fields must also be programmed: LMC_CTL->QS_DIC - programs the termination value LMC_RODT_CTL - programs the ODT I/O mask for writes Program as 0 for DDR1 mode and ODT needs to be off on Octeon Reads */ uint64_t qdll_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after erst deassertion will reset the DDR 90 DLL. Should happen at startup before any activity in DDR. */ uint64_t dll90_vlu : 5; /**< Contains the open loop setting value for the DDR90 delay line. */ uint64_t dll90_byp : 1; /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be bypassed and the setting is defined by DLL90_VLU */ uint64_t reserved_1_1 : 1; uint64_t ddr2 : 1; /**< DDR2 Enable: When set, configures memory subsystem for DDR-II SDRAMs. */ #else uint64_t ddr2 : 1; uint64_t reserved_1_1 : 1; uint64_t dll90_byp : 1; uint64_t dll90_vlu : 5; uint64_t qdll_ena : 1; uint64_t odt_ena : 1; uint64_t ddr2t : 1; uint64_t crip_mode : 1; uint64_t tfaw : 5; uint64_t ddr_eof : 4; uint64_t silo_hc : 1; uint64_t twr : 3; uint64_t bwcnt : 1; uint64_t pocas : 1; uint64_t addlat : 3; uint64_t burst8 : 1; uint64_t bank8 : 1; uint64_t reserved_32_63 : 32; #endif } cn30xx; struct cvmx_lmcx_ddr2_ctl_cn30xx cn31xx; struct cvmx_lmcx_ddr2_ctl_s cn38xx; struct cvmx_lmcx_ddr2_ctl_s cn38xxp2; struct cvmx_lmcx_ddr2_ctl_s cn50xx; struct cvmx_lmcx_ddr2_ctl_s cn52xx; struct cvmx_lmcx_ddr2_ctl_s cn52xxp1; struct cvmx_lmcx_ddr2_ctl_s cn56xx; struct cvmx_lmcx_ddr2_ctl_s cn56xxp1; struct cvmx_lmcx_ddr2_ctl_s cn58xx; struct cvmx_lmcx_ddr2_ctl_s cn58xxp1; } cvmx_lmcx_ddr2_ctl_t; /** * cvmx_lmc#_delay_cfg * * LMC_DELAY_CFG = Open-loop delay line settings * * * Notes: * The DQ bits add OUTGOING delay only to dq, dqs_[p,n], cb, cbs_[p,n], dqm. Delay is approximately * 50-80ps per setting depending on process/voltage. There is no need to add incoming delay since by * default all strobe bits are delayed internally by 90 degrees (as was always the case in previous * passes and past chips. * * The CMD add delay to all command bits DDR_RAS, DDR_CAS, DDR_A<15:0>, DDR_BA<2:0>, DDR_n_CS<1:0>_L, * DDR_WE, DDR_CKE and DDR_ODT_<7:0>. Again, delay is 50-80ps per tap. * * The CLK bits add delay to all clock signals DDR_CK_<5:0>_P and DDR_CK_<5:0>_N. Again, delay is * 50-80ps per tap. * * The usage scenario is the following: There is too much delay on command signals and setup on command * is not met. The user can then delay the clock until setup is met. * * At the same time though, dq/dqs should be delayed because there is also a DDR spec tying dqs with * clock. If clock is too much delayed with respect to dqs, writes will start to fail. * * This scheme should eliminate the board need of adding routing delay to clock signals to make high * frequencies work. */ typedef union { uint64_t u64; struct cvmx_lmcx_delay_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_15_63 : 49; uint64_t dq : 5; /**< Setting for DQ delay line */ uint64_t cmd : 5; /**< Setting for CMD delay line */ uint64_t clk : 5; /**< Setting for CLK delay line */ #else uint64_t clk : 5; uint64_t cmd : 5; uint64_t dq : 5; uint64_t reserved_15_63 : 49; #endif } s; struct cvmx_lmcx_delay_cfg_s cn30xx; struct cvmx_lmcx_delay_cfg_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t dq : 4; /**< Setting for DQ delay line */ uint64_t reserved_9_9 : 1; uint64_t cmd : 4; /**< Setting for CMD delay line */ uint64_t reserved_4_4 : 1; uint64_t clk : 4; /**< Setting for CLK delay line */ #else uint64_t clk : 4; uint64_t reserved_4_4 : 1; uint64_t cmd : 4; uint64_t reserved_9_9 : 1; uint64_t dq : 4; uint64_t reserved_14_63 : 50; #endif } cn38xx; struct cvmx_lmcx_delay_cfg_cn38xx cn50xx; struct cvmx_lmcx_delay_cfg_cn38xx cn52xx; struct cvmx_lmcx_delay_cfg_cn38xx cn52xxp1; struct cvmx_lmcx_delay_cfg_cn38xx cn56xx; struct cvmx_lmcx_delay_cfg_cn38xx cn56xxp1; struct cvmx_lmcx_delay_cfg_cn38xx cn58xx; struct cvmx_lmcx_delay_cfg_cn38xx cn58xxp1; } cvmx_lmcx_delay_cfg_t; /** * cvmx_lmc#_dll_ctl * * LMC_DLL_CTL = LMC DLL control and DCLK reset * */ typedef union { uint64_t u64; struct cvmx_lmcx_dll_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t dreset : 1; /**< Dclk domain reset. The reset signal that is used by the Dclk domain is (DRESET || ECLK_RESET). */ uint64_t dll90_byp : 1; /**< DDR DLL90 Bypass: When set, the DDR90 DLL is to be bypassed and the setting is defined by DLL90_VLU */ uint64_t dll90_ena : 1; /**< DDR Quad DLL Enable: A 0->1 transition on this bit after DCLK init sequence resets the DDR 90 DLL. Should happen at startup before any activity in DDR. QDLL_ENA must not transition 1->0 outside of a DRESET sequence (i.e. it must remain 1 until the next DRESET). DRESET should be asserted before and for 10 usec following the 0->1 transition on QDLL_ENA. */ uint64_t dll90_vlu : 5; /**< Contains the open loop setting value for the DDR90 delay line. */ #else uint64_t dll90_vlu : 5; uint64_t dll90_ena : 1; uint64_t dll90_byp : 1; uint64_t dreset : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_lmcx_dll_ctl_s cn52xx; struct cvmx_lmcx_dll_ctl_s cn52xxp1; struct cvmx_lmcx_dll_ctl_s cn56xx; struct cvmx_lmcx_dll_ctl_s cn56xxp1; } cvmx_lmcx_dll_ctl_t; /** * cvmx_lmc#_dual_memcfg * * LMC_DUAL_MEMCFG = LMC Dual Memory Configuration Register * * This register controls certain parameters of Dual Memory Configuration * * Notes: * This register enables the design to have two, separate memory configurations, selected dynamically * by the reference address. Note however, that both configurations share LMC_CTL[MODE128b], * LMC_CTL[XOR_BANK], LMC_MEM_CFG0[PBANK_LSB], LMC_MEM_CFG0[BUNK_ENA], and all timing parameters. * In this description, "config0" refers to the normal memory configuration that is defined by the * LMC_MEM_CFG0[ROW_LSB] andLMC_DDR2_CTL[BANK8] parameters and "config1" refers to the dual (or second) * memory configuration that is defined by this register. * * Memory config0 must be programmed for the part with the most strict timing requirements. If a mix of * 4 bank and 8 bank parts is used, then config0 must be used for the 8 bank part (because the timing * requirements of tFAW and tRP are more strict for 8 bank parts than they are for 4 bank parts). * * Enable mask to chip select mapping is shown below: * CS_MASK[7] -> DDR_3_CS_<1> * CS_MASK[6] -> DDR_3_CS_<0> * * CS_MASK[5] -> DDR_2_CS_<1> * CS_MASK[4] -> DDR_2_CS_<0> * * CS_MASK[3] -> DDR_1_CS_<1> * CS_MASK[2] -> DDR_1_CS_<0> * * CS_MASK[1] -> DDR_0_CS_<1> * CS_MASK[0] -> DDR_0_CS_<0> * * the DIMMS are arranged in one of the following arrangements: * LMC_CTL[MODE128b] == 1 LMC_CTL[MODE128b] == 0 * * DIMM3_RANK1 | DIMM1_RANK1 highest address DIMM3_RANK1 highest addres * DIMM3_RANK0 | DIMM1_RANK0 DIMM3_RANK0 * * DIMM2_RANK1 | DIMM0_RANK1 DIMM2_RANK1 * DIMM2_RANK0 | DIMM0_RANK0 lowest address DIMM2_RANK0 * * data[127:64] | data_[63:0] DIMM1_RANK1 * DIMM1_RANK0 * * DIMM0_RANK1 * DIMM0_RANK0 lowest address * * data_[63:0] * * DIMM n uses the pair of chip selects DDR_n_CS_<1:0>. When LMC_CTL[BUNK_ENA] == 1, each * chip select in the pair asserts independently. When LMC_CTL[BUNK_ENA] == 0, both chip * selects in the pair assert together. * * Programming restrictions for CS_MASK: * when LMC_CTL[BUNK_ENA] == 0, CS_MASK[2n + 1] = CS_MASK[2n], where 0 <= n <= 3 * when LMC_CTL[MODE128b] == 1, CS_MASK[ n + 4] = CS_MASK[ n], where 0 <= n <= 3 */ typedef union { uint64_t u64; struct cvmx_lmcx_dual_memcfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t bank8 : 1; /**< See LMC_DDR2_CTL[BANK8] */ uint64_t row_lsb : 3; /**< See LMC_MEM_CFG0[ROW_LSB] */ uint64_t reserved_8_15 : 8; uint64_t cs_mask : 8; /**< Chip select mask. This mask corresponds to the 8 chip selects for a memory configuration. Each reference address will assert one of the chip selects. If that chip select has its corresponding CS_MASK bit set, then the "config1" parameters are used, otherwise the "config0" parameters are used. See additional notes below. */ #else uint64_t cs_mask : 8; uint64_t reserved_8_15 : 8; uint64_t row_lsb : 3; uint64_t bank8 : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_lmcx_dual_memcfg_s cn50xx; struct cvmx_lmcx_dual_memcfg_s cn52xx; struct cvmx_lmcx_dual_memcfg_s cn52xxp1; struct cvmx_lmcx_dual_memcfg_s cn56xx; struct cvmx_lmcx_dual_memcfg_s cn56xxp1; struct cvmx_lmcx_dual_memcfg_s cn58xx; struct cvmx_lmcx_dual_memcfg_s cn58xxp1; } cvmx_lmcx_dual_memcfg_t; /** * cvmx_lmc#_ecc_synd * * LMC_ECC_SYND = MRD ECC Syndromes * */ typedef union { uint64_t u64; struct cvmx_lmcx_ecc_synd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t mrdsyn3 : 8; /**< MRD ECC Syndrome Quad3 128b mode - corresponds to DQ[127:64], Phase1 64b mode - corresponds to DQ[127:64], Phase1, cycle1 */ uint64_t mrdsyn2 : 8; /**< MRD ECC Syndrome Quad2 128b mode - corresponds to DQ[63:0], Phase1 64b mode - corresponds to DQ[63:0], Phase1, cycle0 */ uint64_t mrdsyn1 : 8; /**< MRD ECC Syndrome Quad1 128b mode - corresponds to DQ[127:64], Phase0 64b mode - corresponds to DQ[127:64], Phase0, cycle1 */ uint64_t mrdsyn0 : 8; /**< MRD ECC Syndrome Quad0 In 128b mode, ecc is calulated on 1 cycle worth of data SYND0 corresponds to DQ[63:0], Phase0 In 64b mode, ecc is calculated on 2 cycle worth of data SYND0 corresponds to DQ[63:0], Phase0, cycle0 */ #else uint64_t mrdsyn0 : 8; uint64_t mrdsyn1 : 8; uint64_t mrdsyn2 : 8; uint64_t mrdsyn3 : 8; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_ecc_synd_s cn30xx; struct cvmx_lmcx_ecc_synd_s cn31xx; struct cvmx_lmcx_ecc_synd_s cn38xx; struct cvmx_lmcx_ecc_synd_s cn38xxp2; struct cvmx_lmcx_ecc_synd_s cn50xx; struct cvmx_lmcx_ecc_synd_s cn52xx; struct cvmx_lmcx_ecc_synd_s cn52xxp1; struct cvmx_lmcx_ecc_synd_s cn56xx; struct cvmx_lmcx_ecc_synd_s cn56xxp1; struct cvmx_lmcx_ecc_synd_s cn58xx; struct cvmx_lmcx_ecc_synd_s cn58xxp1; } cvmx_lmcx_ecc_synd_t; /** * cvmx_lmc#_fadr * * LMC_FADR = LMC Failing Address Register (SEC/DED) * * This register only captures the first transaction with ecc errors. A DBE error can * over-write this register with its failing addresses. If you write * LMC_MEM_CFG0->SEC_ERR/DED_ERR then it will clear the error bits and capture the * next failing address. * The phy mapping is a function of the num Col bits & \# row bits * * If failing dimm is 2 that means the error is in the higher bits dimm. */ typedef union { uint64_t u64; struct cvmx_lmcx_fadr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t fdimm : 2; /**< Failing DIMM# */ uint64_t fbunk : 1; /**< Failing Rank */ uint64_t fbank : 3; /**< Failing Bank[2:0] */ uint64_t frow : 14; /**< Failing Row Address[13:0] */ uint64_t fcol : 12; /**< Failing Column Start Address[11:0] Represents the Failing read's starting column address (and not the exact column address in which the SEC/DED was detected) */ #else uint64_t fcol : 12; uint64_t frow : 14; uint64_t fbank : 3; uint64_t fbunk : 1; uint64_t fdimm : 2; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_fadr_s cn30xx; struct cvmx_lmcx_fadr_s cn31xx; struct cvmx_lmcx_fadr_s cn38xx; struct cvmx_lmcx_fadr_s cn38xxp2; struct cvmx_lmcx_fadr_s cn50xx; struct cvmx_lmcx_fadr_s cn52xx; struct cvmx_lmcx_fadr_s cn52xxp1; struct cvmx_lmcx_fadr_s cn56xx; struct cvmx_lmcx_fadr_s cn56xxp1; struct cvmx_lmcx_fadr_s cn58xx; struct cvmx_lmcx_fadr_s cn58xxp1; } cvmx_lmcx_fadr_t; /** * cvmx_lmc#_ifb_cnt_hi * * LMC_IFB_CNT_HI = Performance Counters * */ typedef union { uint64_t u64; struct cvmx_lmcx_ifb_cnt_hi_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ifbcnt_hi : 32; /**< Performance Counter to measure Bus Utilization Upper 32-bits of 64-bit counter that increments every cycle there is something in the in-flight buffer. */ #else uint64_t ifbcnt_hi : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_ifb_cnt_hi_s cn30xx; struct cvmx_lmcx_ifb_cnt_hi_s cn31xx; struct cvmx_lmcx_ifb_cnt_hi_s cn38xx; struct cvmx_lmcx_ifb_cnt_hi_s cn38xxp2; struct cvmx_lmcx_ifb_cnt_hi_s cn50xx; struct cvmx_lmcx_ifb_cnt_hi_s cn52xx; struct cvmx_lmcx_ifb_cnt_hi_s cn52xxp1; struct cvmx_lmcx_ifb_cnt_hi_s cn56xx; struct cvmx_lmcx_ifb_cnt_hi_s cn56xxp1; struct cvmx_lmcx_ifb_cnt_hi_s cn58xx; struct cvmx_lmcx_ifb_cnt_hi_s cn58xxp1; } cvmx_lmcx_ifb_cnt_hi_t; /** * cvmx_lmc#_ifb_cnt_lo * * LMC_IFB_CNT_LO = Performance Counters * */ typedef union { uint64_t u64; struct cvmx_lmcx_ifb_cnt_lo_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ifbcnt_lo : 32; /**< Performance Counter Low 32-bits of 64-bit counter that increments every cycle there is something in the in-flight buffer. */ #else uint64_t ifbcnt_lo : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_ifb_cnt_lo_s cn30xx; struct cvmx_lmcx_ifb_cnt_lo_s cn31xx; struct cvmx_lmcx_ifb_cnt_lo_s cn38xx; struct cvmx_lmcx_ifb_cnt_lo_s cn38xxp2; struct cvmx_lmcx_ifb_cnt_lo_s cn50xx; struct cvmx_lmcx_ifb_cnt_lo_s cn52xx; struct cvmx_lmcx_ifb_cnt_lo_s cn52xxp1; struct cvmx_lmcx_ifb_cnt_lo_s cn56xx; struct cvmx_lmcx_ifb_cnt_lo_s cn56xxp1; struct cvmx_lmcx_ifb_cnt_lo_s cn58xx; struct cvmx_lmcx_ifb_cnt_lo_s cn58xxp1; } cvmx_lmcx_ifb_cnt_lo_t; /** * cvmx_lmc#_mem_cfg0 * * Specify the RSL base addresses for the block * * LMC_MEM_CFG0 = LMC Memory Configuration Register0 * * This register controls certain parameters of Memory Configuration */ typedef union { uint64_t u64; struct cvmx_lmcx_mem_cfg0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t reset : 1; /**< Reset oneshot pulse for refresh counter, and LMC_OPS_CNT_*, LMC_IFB_CNT_*, and LMC_DCLK_CNT_* CSR's. SW should write this to a one, then re-write it to a zero to cause the reset. */ uint64_t silo_qc : 1; /**< Adds a Quarter Cycle granularity to generate dqs pulse generation for silo. Combination of Silo_HC and Silo_QC gives the ability to position the read enable with quarter cycle resolution. This is applied on all the bytes uniformly. */ uint64_t bunk_ena : 1; /**< Bunk Enable aka RANK ena (for use with dual-rank DIMMs) For dual-rank DIMMs, the bunk_ena bit will enable the drive of the CS_N[1:0] pins based on the (pbank_lsb-1) address bit. Write 0 for SINGLE ranked DIMM's. */ uint64_t ded_err : 4; /**< Double Error detected (DED) of Rd Data In 128b mode, ecc is calulated on 1 cycle worth of data [25] corresponds to DQ[63:0], Phase0 [26] corresponds to DQ[127:64], Phase0 [27] corresponds to DQ[63:0], Phase1 [28] corresponds to DQ[127:64], Phase1 In 64b mode, ecc is calculated on 2 cycle worth of data [25] corresponds to DQ[63:0], Phase0, cycle0 [26] corresponds to DQ[63:0], Phase0, cycle1 [27] corresponds to DQ[63:0], Phase1, cycle0 [28] corresponds to DQ[63:0], Phase1, cycle1 Write of 1 will clear the corresponding error bit */ uint64_t sec_err : 4; /**< Single Error (corrected) of Rd Data In 128b mode, ecc is calulated on 1 cycle worth of data [21] corresponds to DQ[63:0], Phase0 [22] corresponds to DQ[127:64], Phase0 [23] corresponds to DQ[63:0], Phase1 [24] corresponds to DQ[127:64], Phase1 In 64b mode, ecc is calculated on 2 cycle worth of data [21] corresponds to DQ[63:0], Phase0, cycle0 [22] corresponds to DQ[63:0], Phase0, cycle1 [23] corresponds to DQ[63:0], Phase1, cycle0 [24] corresponds to DQ[63:0], Phase1, cycle1 Write of 1 will clear the corresponding error bit */ uint64_t intr_ded_ena : 1; /**< ECC Double Error Detect(DED) Interrupt Enable bit When set, the memory controller raises a processor interrupt on detecting an uncorrectable Dbl Bit ECC error. */ uint64_t intr_sec_ena : 1; /**< ECC Single Error Correct(SEC) Interrupt Enable bit When set, the memory controller raises a processor interrupt on detecting a correctable Single Bit ECC error. */ uint64_t tcl : 4; /**< This register is not used */ uint64_t ref_int : 6; /**< Refresh interval represented in \#of 512 dclk increments. Program this to RND-DN(tREFI/clkPeriod/512) - 000000: RESERVED - 000001: 1 * 512 = 512 dclks - ... - 111111: 63 * 512 = 32256 dclks */ uint64_t pbank_lsb : 4; /**< Physical Bank address select Reverting to the explanation for ROW_LSB, PBank_LSB would be Row_LSB bit + \#rowbits + \#rankbits In the 512MB DIMM Example, assuming no rank bits: pbank_lsb=mem_addr[15+13] for 64 b mode =mem_addr[16+13] for 128b mode Hence the parameter 0000:pbank[1:0] = mem_adr[28:27] / rank = mem_adr[26] (if bunk_ena) 0001:pbank[1:0] = mem_adr[29:28] / rank = mem_adr[27] " 0010:pbank[1:0] = mem_adr[30:29] / rank = mem_adr[28] " 0011:pbank[1:0] = mem_adr[31:30] / rank = mem_adr[29] " 0100:pbank[1:0] = mem_adr[32:31] / rank = mem_adr[30] " 0101:pbank[1:0] = mem_adr[33:32] / rank = mem_adr[31] " 0110:pbank[1:0] =[1'b0,mem_adr[33]] / rank = mem_adr[32] " 0111:pbank[1:0] =[2'b0] / rank = mem_adr[33] " 1000-1111: RESERVED */ uint64_t row_lsb : 3; /**< Encoding used to determine which memory address bit position represents the low order DDR ROW address. The processor's memory address[33:7] needs to be translated to DRAM addresses (bnk,row,col,rank and dimm) and that is a function of the following: 1. \# Banks (4 or 8) - spec'd by BANK8 2. Datapath Width(64 or 128) - MODE128b 3. \# Ranks in a DIMM - spec'd by BUNK_ENA 4. \# DIMM's in the system 5. \# Column Bits of the memory part - spec'd indirectly by this register. 6. \# Row Bits of the memory part - spec'd indirectly by the register below (PBANK_LSB). Illustration: For Micron's MT18HTF6472A,512MB DDR2 Unbuffered DIMM which uses 256Mb parts (8M x 8 x 4), \# Banks = 4 -> 2 bits of BA \# Columns = 1K -> 10 bits of Col \# Rows = 8K -> 13 bits of Row Assuming that the total Data width is 128, this is how we arrive at row_lsb: Col Address starts from mem_addr[4] for 128b (16Bytes) dq width or from mem_addr[3] for 64b (8Bytes) dq width \# col + \# bank = 12. Hence row_lsb is mem_adr[15] for 64bmode or mem_adr[16] for 128b mode. Hence row_lsb parameter should be set to 001 (64b) or 010 (128b). - 000: row_lsb = mem_adr[14] - 001: row_lsb = mem_adr[15] - 010: row_lsb = mem_adr[16] - 011: row_lsb = mem_adr[17] - 100: row_lsb = mem_adr[18] - 101-111:row_lsb = RESERVED */ uint64_t ecc_ena : 1; /**< ECC Enable: When set will enable the 8b ECC check/correct logic. Should be 1 when used with DIMMs with ECC. 0, otherwise. When this mode is turned on, DQ[71:64] and DQ[143:137] on writes, will contain the ECC code generated for the lower 64 and upper 64 bits of data which will written in the memory and then later on reads, used to check for Single bit error (which will be auto- corrected) and Double Bit error (which will be reported). When not turned on, DQ[71:64] and DQ[143:137] are driven to 0. Please refer to SEC_ERR, DED_ERR, LMC_FADR, and LMC_ECC_SYND registers for diagnostics information when there is an error. */ uint64_t init_start : 1; /**< A 0->1 transition starts the DDR memory initialization sequence. */ #else uint64_t init_start : 1; uint64_t ecc_ena : 1; uint64_t row_lsb : 3; uint64_t pbank_lsb : 4; uint64_t ref_int : 6; uint64_t tcl : 4; uint64_t intr_sec_ena : 1; uint64_t intr_ded_ena : 1; uint64_t sec_err : 4; uint64_t ded_err : 4; uint64_t bunk_ena : 1; uint64_t silo_qc : 1; uint64_t reset : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_mem_cfg0_s cn30xx; struct cvmx_lmcx_mem_cfg0_s cn31xx; struct cvmx_lmcx_mem_cfg0_s cn38xx; struct cvmx_lmcx_mem_cfg0_s cn38xxp2; struct cvmx_lmcx_mem_cfg0_s cn50xx; struct cvmx_lmcx_mem_cfg0_s cn52xx; struct cvmx_lmcx_mem_cfg0_s cn52xxp1; struct cvmx_lmcx_mem_cfg0_s cn56xx; struct cvmx_lmcx_mem_cfg0_s cn56xxp1; struct cvmx_lmcx_mem_cfg0_s cn58xx; struct cvmx_lmcx_mem_cfg0_s cn58xxp1; } cvmx_lmcx_mem_cfg0_t; /** * cvmx_lmc#_mem_cfg1 * * LMC_MEM_CFG1 = LMC Memory Configuration Register1 * * This register controls the External Memory Configuration Timing Parameters. Please refer to the * appropriate DDR part spec from your memory vendor for the various values in this CSR. * The details of each of these timing parameters can be found in the JEDEC spec or the vendor * spec of the memory parts. */ typedef union { uint64_t u64; struct cvmx_lmcx_mem_cfg1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t comp_bypass : 1; /**< Compensation bypass. */ uint64_t trrd : 3; /**< tRRD cycles: ACT-ACT timing parameter for different banks. (Represented in tCYC cycles == 1dclks) TYP=15ns (66MHz=1,167MHz=3,200MHz=3) For DDR2, TYP=7.5ns - 000: RESERVED - 001: 1 tCYC - 010: 2 tCYC - 011: 3 tCYC - 100: 4 tCYC - 101: 5 tCYC - 110: 6 tCYC - 111: 7 tCYC */ uint64_t caslat : 3; /**< CAS Latency Encoding which is loaded into each DDR SDRAM device (MRS[6:4]) upon power-up (INIT_START=1). (Represented in tCYC cycles == 1 dclks) 000 RESERVED 001 RESERVED 010 2.0 tCYC 011 3.0 tCYC 100 4.0 tCYC 101 5.0 tCYC 110 6.0 tCYC 111 RESERVED eg). The parameters TSKW, SILO_HC, and SILO_QC can account for 1/4 cycle granularity in board/etch delays. */ uint64_t tmrd : 3; /**< tMRD Cycles (Represented in dclk tCYC) For DDR2, its TYP 2*tCYC) - 000: RESERVED - 001: 1 - 010: 2 - 011: 3 - 100: 4 - 101-111: RESERVED */ uint64_t trfc : 5; /**< 1/4 tRFC Cycles = RNDUP[tRFC(ns)/4*tcyc(ns)] (Represented in tCYC cycles == 1dclks) For 2Gb, DDR2-667 parts, typ=195ns (TRFC = 195/3/4 = 5'd17 = 0x11) - 00000-00001: RESERVED - 00010: 8 - 00011: 12 - 00100: 16 - ... - 11110: 120 - 11111: 124 */ uint64_t trp : 4; /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)] (Represented in tCYC cycles == 1dclk) TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP) - 0000: RESERVED - 0001: 1 - ... - 1001: 9 - 1010-1111: RESERVED When using parts with 8 banks (LMC_DDR2_CTL->BANK8 is 1), load tRP cycles + 1 into this register. */ uint64_t twtr : 4; /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)] Last Wr Data to Rd Command time. (Represented in tCYC cycles == 1dclks) TYP=15ns (66MHz=1,167MHz=3,400MHz=6, for TYP) - 0000: RESERVED - 0001: 1 - ... - 0111: 7 - 1000-1111: RESERVED */ uint64_t trcd : 4; /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)] (Represented in tCYC cycles == 1dclk) TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP) - 0000: RESERVED - 0001: 2 (2 is the smallest value allowed) - 0002: 2 - ... - 1001: 9 - 1010-1111: RESERVED In 2T mode, make this register TRCD-1, not going below 2. */ uint64_t tras : 5; /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)] (Represented in tCYC cycles == 1 dclk) - 00000-0001: RESERVED - 00010: 2 - ... - 11111: 31 */ #else uint64_t tras : 5; uint64_t trcd : 4; uint64_t twtr : 4; uint64_t trp : 4; uint64_t trfc : 5; uint64_t tmrd : 3; uint64_t caslat : 3; uint64_t trrd : 3; uint64_t comp_bypass : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_mem_cfg1_s cn30xx; struct cvmx_lmcx_mem_cfg1_s cn31xx; struct cvmx_lmcx_mem_cfg1_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t trrd : 3; /**< tRRD cycles: ACT-ACT timing parameter for different banks. (Represented in tCYC cycles == 1dclks) TYP=15ns (66MHz=1,167MHz=3,200MHz=3) For DDR2, TYP=7.5ns - 000: RESERVED - 001: 1 tCYC - 010: 2 tCYC - 011: 3 tCYC - 100: 4 tCYC - 101: 5 tCYC - 110-111: RESERVED */ uint64_t caslat : 3; /**< CAS Latency Encoding which is loaded into each DDR SDRAM device (MRS[6:4]) upon power-up (INIT_START=1). (Represented in tCYC cycles == 1 dclks) 000 RESERVED 001 RESERVED 010 2.0 tCYC 011 3.0 tCYC 100 4.0 tCYC 101 5.0 tCYC 110 6.0 tCYC (DDR2) 2.5 tCYC (DDR1) 111 RESERVED eg). The parameters TSKW, SILO_HC, and SILO_QC can account for 1/4 cycle granularity in board/etch delays. */ uint64_t tmrd : 3; /**< tMRD Cycles (Represented in dclk tCYC) For DDR2, its TYP 2*tCYC) - 000: RESERVED - 001: 1 - 010: 2 - 011: 3 - 100: 4 - 101-111: RESERVED */ uint64_t trfc : 5; /**< 1/4 tRFC Cycles = RNDUP[tRFC(ns)/4*tcyc(ns)] (Represented in tCYC cycles == 1dclks) For DDR-I, the following encodings are used TYP=70ns (133MHz - 3; 333MHz - 6) For 2Gb, DDR2-667 parts, typ=195ns (TRFC = 195/3/4 = 5'd17 = 0x11) - 00000-00001: RESERVED - 00010: 8 - 00011: 12 - 00100: 16 - ... - 11110: 120 - 11111: 124 */ uint64_t trp : 4; /**< tRP Cycles = RNDUP[tRP(ns)/tcyc(ns)] (Represented in tCYC cycles == 1dclk) TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP) - 0000: RESERVED - 0001: 1 - ... - 0111: 7 - 1000-1111: RESERVED When using parts with 8 banks (LMC_DDR2_CTL->BANK8 is 1), load tRP cycles + 1 into this register. */ uint64_t twtr : 4; /**< tWTR Cycles = RNDUP[tWTR(ns)/tcyc(ns)] Last Wr Data to Rd Command time. (Represented in tCYC cycles == 1dclks) TYP=15ns (66MHz=1,167MHz=3,400MHz=6, for TYP) - 0000: RESERVED - 0001: 1 - ... - 0111: 7 - 1000-1111: RESERVED */ uint64_t trcd : 4; /**< tRCD Cycles = RNDUP[tRCD(ns)/tcyc(ns)] (Represented in tCYC cycles == 1dclk) TYP=15ns (66MHz=1,167MHz=3,400MHz=6 for TYP) - 0000: RESERVED - 0001: 2 (2 is the smallest value allowed) - 0002: 2 - ... - 0111: 7 - 1110-1111: RESERVED In 2T mode, make this register TRCD-1, not going below 2. */ uint64_t tras : 5; /**< tRAS Cycles = RNDUP[tRAS(ns)/tcyc(ns)] (Represented in tCYC cycles == 1 dclk) For DDR-I mode: TYP=45ns (66MHz=3,167MHz=8,400MHz=18 - 00000-0001: RESERVED - 00010: 2 - ... - 10100: 20 - 10101-11111: RESERVED */ #else uint64_t tras : 5; uint64_t trcd : 4; uint64_t twtr : 4; uint64_t trp : 4; uint64_t trfc : 5; uint64_t tmrd : 3; uint64_t caslat : 3; uint64_t trrd : 3; uint64_t reserved_31_63 : 33; #endif } cn38xx; struct cvmx_lmcx_mem_cfg1_cn38xx cn38xxp2; struct cvmx_lmcx_mem_cfg1_s cn50xx; struct cvmx_lmcx_mem_cfg1_cn38xx cn52xx; struct cvmx_lmcx_mem_cfg1_cn38xx cn52xxp1; struct cvmx_lmcx_mem_cfg1_cn38xx cn56xx; struct cvmx_lmcx_mem_cfg1_cn38xx cn56xxp1; struct cvmx_lmcx_mem_cfg1_cn38xx cn58xx; struct cvmx_lmcx_mem_cfg1_cn38xx cn58xxp1; } cvmx_lmcx_mem_cfg1_t; /** * cvmx_lmc#_nxm * * LMC_NXM = LMC non-existent memory * * * Notes: * This CSR was introduced in pass2. * */ typedef union { uint64_t u64; struct cvmx_lmcx_nxm_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t cs_mask : 8; /**< Chip select mask. This mask corresponds to the 8 chip selects for a memory configuration. If LMC_MEM_CFG0[BUNK_ENA]==0 then this mask must be set in pairs because each reference address will assert a pair of chip selects. If the chip select(s) have a corresponding CS_MASK bit set, then the reference is to non-existent memory. LMC will alias the reference to use the lowest, legal chip select(s) in that case. */ #else uint64_t cs_mask : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_lmcx_nxm_s cn52xx; struct cvmx_lmcx_nxm_s cn56xx; struct cvmx_lmcx_nxm_s cn58xx; } cvmx_lmcx_nxm_t; /** * cvmx_lmc#_ops_cnt_hi * * LMC_OPS_CNT_HI = Performance Counters * */ typedef union { uint64_t u64; struct cvmx_lmcx_ops_cnt_hi_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t opscnt_hi : 32; /**< Performance Counter to measure Bus Utilization Upper 32-bits of 64-bit counter DRAM bus utilization = LMC_OPS_CNT_* /LMC_DCLK_CNT_* */ #else uint64_t opscnt_hi : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_ops_cnt_hi_s cn30xx; struct cvmx_lmcx_ops_cnt_hi_s cn31xx; struct cvmx_lmcx_ops_cnt_hi_s cn38xx; struct cvmx_lmcx_ops_cnt_hi_s cn38xxp2; struct cvmx_lmcx_ops_cnt_hi_s cn50xx; struct cvmx_lmcx_ops_cnt_hi_s cn52xx; struct cvmx_lmcx_ops_cnt_hi_s cn52xxp1; struct cvmx_lmcx_ops_cnt_hi_s cn56xx; struct cvmx_lmcx_ops_cnt_hi_s cn56xxp1; struct cvmx_lmcx_ops_cnt_hi_s cn58xx; struct cvmx_lmcx_ops_cnt_hi_s cn58xxp1; } cvmx_lmcx_ops_cnt_hi_t; /** * cvmx_lmc#_ops_cnt_lo * * LMC_OPS_CNT_LO = Performance Counters * */ typedef union { uint64_t u64; struct cvmx_lmcx_ops_cnt_lo_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t opscnt_lo : 32; /**< Performance Counter Low 32-bits of 64-bit counter DRAM bus utilization = LMC_OPS_CNT_* /LMC_DCLK_CNT_* */ #else uint64_t opscnt_lo : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_ops_cnt_lo_s cn30xx; struct cvmx_lmcx_ops_cnt_lo_s cn31xx; struct cvmx_lmcx_ops_cnt_lo_s cn38xx; struct cvmx_lmcx_ops_cnt_lo_s cn38xxp2; struct cvmx_lmcx_ops_cnt_lo_s cn50xx; struct cvmx_lmcx_ops_cnt_lo_s cn52xx; struct cvmx_lmcx_ops_cnt_lo_s cn52xxp1; struct cvmx_lmcx_ops_cnt_lo_s cn56xx; struct cvmx_lmcx_ops_cnt_lo_s cn56xxp1; struct cvmx_lmcx_ops_cnt_lo_s cn58xx; struct cvmx_lmcx_ops_cnt_lo_s cn58xxp1; } cvmx_lmcx_ops_cnt_lo_t; /** * cvmx_lmc#_pll_bwctl * * LMC_PLL_BWCTL = DDR PLL Bandwidth Control Register * */ typedef union { uint64_t u64; struct cvmx_lmcx_pll_bwctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t bwupd : 1; /**< Load this Bandwidth Register value into the PLL */ uint64_t bwctl : 4; /**< Bandwidth Control Register for DDR PLL */ #else uint64_t bwctl : 4; uint64_t bwupd : 1; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_lmcx_pll_bwctl_s cn30xx; struct cvmx_lmcx_pll_bwctl_s cn31xx; struct cvmx_lmcx_pll_bwctl_s cn38xx; struct cvmx_lmcx_pll_bwctl_s cn38xxp2; } cvmx_lmcx_pll_bwctl_t; /** * cvmx_lmc#_pll_ctl * * LMC_PLL_CTL = LMC pll control * * * Notes: * This CSR is only relevant for LMC0. LMC1_PLL_CTL is not used. * * Exactly one of EN2, EN4, EN6, EN8, EN12, EN16 must be set. * * The resultant DDR_CK frequency is the DDR2_REF_CLK * frequency multiplied by: * * (CLKF + 1) / ((CLKR + 1) * EN(2,4,6,8,12,16)) * * The PLL frequency, which is: * * (DDR2_REF_CLK freq) * ((CLKF + 1) / (CLKR + 1)) * * must reside between 1.2 and 2.5 GHz. A faster PLL frequency is desirable if there is a choice. */ typedef union { uint64_t u64; struct cvmx_lmcx_pll_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_30_63 : 34; uint64_t bypass : 1; /**< PLL Bypass */ uint64_t fasten_n : 1; /**< Should be set, especially when CLKF > ~80 */ uint64_t div_reset : 1; /**< Analog pll divider reset De-assert at least 500*(CLKR+1) reference clock cycles following RESET_N de-assertion. */ uint64_t reset_n : 1; /**< Analog pll reset De-assert at least 5 usec after CLKF, CLKR, and EN* are set up. */ uint64_t clkf : 12; /**< Multiply reference by CLKF + 1 CLKF must be <= 128 */ uint64_t clkr : 6; /**< Divide reference by CLKR + 1 */ uint64_t reserved_6_7 : 2; uint64_t en16 : 1; /**< Divide output by 16 */ uint64_t en12 : 1; /**< Divide output by 12 */ uint64_t en8 : 1; /**< Divide output by 8 */ uint64_t en6 : 1; /**< Divide output by 6 */ uint64_t en4 : 1; /**< Divide output by 4 */ uint64_t en2 : 1; /**< Divide output by 2 */ #else uint64_t en2 : 1; uint64_t en4 : 1; uint64_t en6 : 1; uint64_t en8 : 1; uint64_t en12 : 1; uint64_t en16 : 1; uint64_t reserved_6_7 : 2; uint64_t clkr : 6; uint64_t clkf : 12; uint64_t reset_n : 1; uint64_t div_reset : 1; uint64_t fasten_n : 1; uint64_t bypass : 1; uint64_t reserved_30_63 : 34; #endif } s; struct cvmx_lmcx_pll_ctl_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t fasten_n : 1; /**< Should be set, especially when CLKF > ~80 */ uint64_t div_reset : 1; /**< Analog pll divider reset De-assert at least 500*(CLKR+1) reference clock cycles following RESET_N de-assertion. */ uint64_t reset_n : 1; /**< Analog pll reset De-assert at least 5 usec after CLKF, CLKR, and EN* are set up. */ uint64_t clkf : 12; /**< Multiply reference by CLKF + 1 CLKF must be <= 256 */ uint64_t clkr : 6; /**< Divide reference by CLKR + 1 */ uint64_t reserved_6_7 : 2; uint64_t en16 : 1; /**< Divide output by 16 */ uint64_t en12 : 1; /**< Divide output by 12 */ uint64_t en8 : 1; /**< Divide output by 8 */ uint64_t en6 : 1; /**< Divide output by 6 */ uint64_t en4 : 1; /**< Divide output by 4 */ uint64_t en2 : 1; /**< Divide output by 2 */ #else uint64_t en2 : 1; uint64_t en4 : 1; uint64_t en6 : 1; uint64_t en8 : 1; uint64_t en12 : 1; uint64_t en16 : 1; uint64_t reserved_6_7 : 2; uint64_t clkr : 6; uint64_t clkf : 12; uint64_t reset_n : 1; uint64_t div_reset : 1; uint64_t fasten_n : 1; uint64_t reserved_29_63 : 35; #endif } cn50xx; struct cvmx_lmcx_pll_ctl_s cn52xx; struct cvmx_lmcx_pll_ctl_s cn52xxp1; struct cvmx_lmcx_pll_ctl_cn50xx cn56xx; struct cvmx_lmcx_pll_ctl_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t div_reset : 1; /**< Analog pll divider reset De-assert at least 500*(CLKR+1) reference clock cycles following RESET_N de-assertion. */ uint64_t reset_n : 1; /**< Analog pll reset De-assert at least 5 usec after CLKF, CLKR, and EN* are set up. */ uint64_t clkf : 12; /**< Multiply reference by CLKF + 1 CLKF must be <= 128 */ uint64_t clkr : 6; /**< Divide reference by CLKR + 1 */ uint64_t reserved_6_7 : 2; uint64_t en16 : 1; /**< Divide output by 16 */ uint64_t en12 : 1; /**< Divide output by 12 */ uint64_t en8 : 1; /**< Divide output by 8 */ uint64_t en6 : 1; /**< Divide output by 6 */ uint64_t en4 : 1; /**< Divide output by 4 */ uint64_t en2 : 1; /**< Divide output by 2 */ #else uint64_t en2 : 1; uint64_t en4 : 1; uint64_t en6 : 1; uint64_t en8 : 1; uint64_t en12 : 1; uint64_t en16 : 1; uint64_t reserved_6_7 : 2; uint64_t clkr : 6; uint64_t clkf : 12; uint64_t reset_n : 1; uint64_t div_reset : 1; uint64_t reserved_28_63 : 36; #endif } cn56xxp1; struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xx; struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xxp1; } cvmx_lmcx_pll_ctl_t; /** * cvmx_lmc#_pll_status * * LMC_PLL_STATUS = LMC pll status * */ typedef union { uint64_t u64; struct cvmx_lmcx_pll_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ddr__nctl : 5; /**< DDR nctl from compensation circuit */ uint64_t ddr__pctl : 5; /**< DDR pctl from compensation circuit */ uint64_t reserved_2_21 : 20; uint64_t rfslip : 1; /**< Reference clock slip */ uint64_t fbslip : 1; /**< Feedback clock slip */ #else uint64_t fbslip : 1; uint64_t rfslip : 1; uint64_t reserved_2_21 : 20; uint64_t ddr__pctl : 5; uint64_t ddr__nctl : 5; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_pll_status_s cn50xx; struct cvmx_lmcx_pll_status_s cn52xx; struct cvmx_lmcx_pll_status_s cn52xxp1; struct cvmx_lmcx_pll_status_s cn56xx; struct cvmx_lmcx_pll_status_s cn56xxp1; struct cvmx_lmcx_pll_status_s cn58xx; struct cvmx_lmcx_pll_status_cn58xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t rfslip : 1; /**< Reference clock slip */ uint64_t fbslip : 1; /**< Feedback clock slip */ #else uint64_t fbslip : 1; uint64_t rfslip : 1; uint64_t reserved_2_63 : 62; #endif } cn58xxp1; } cvmx_lmcx_pll_status_t; /** * cvmx_lmc#_read_level_ctl * * Notes: * The HW writes and reads the cache block selected by ROW, COL, BNK and the rank as part of a read-leveling sequence for a rank. * A cache block write is 16 72-bit words. PATTERN selects the write value. For the first 8 * words, the write value is the bit PATTERN duplicated into a 72-bit vector. The write value of * the last 8 words is the inverse of the write value of the first 8 words. * See LMC*_READ_LEVEL_RANK*. */ typedef union { uint64_t u64; struct cvmx_lmcx_read_level_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_44_63 : 20; uint64_t rankmask : 4; /**< Selects ranks to be leveled to read-level rank i, set RANKMASK */ uint64_t pattern : 8; /**< All DQ driven to PATTERN[burst], 0 <= burst <= 7 All DQ driven to ~PATTERN[burst-8], 8 <= burst <= 15 */ uint64_t row : 16; /**< Row address used to write/read data pattern */ uint64_t col : 12; /**< Column address used to write/read data pattern */ uint64_t reserved_3_3 : 1; uint64_t bnk : 3; /**< Bank address used to write/read data pattern */ #else uint64_t bnk : 3; uint64_t reserved_3_3 : 1; uint64_t col : 12; uint64_t row : 16; uint64_t pattern : 8; uint64_t rankmask : 4; uint64_t reserved_44_63 : 20; #endif } s; struct cvmx_lmcx_read_level_ctl_s cn52xx; struct cvmx_lmcx_read_level_ctl_s cn52xxp1; struct cvmx_lmcx_read_level_ctl_s cn56xx; struct cvmx_lmcx_read_level_ctl_s cn56xxp1; } cvmx_lmcx_read_level_ctl_t; /** * cvmx_lmc#_read_level_dbg * * Notes: * A given read of LMC*_READ_LEVEL_DBG returns the read-leveling pass/fail results for all possible * delay settings (i.e. the BITMASK) for only one byte in the last rank that the HW read-leveled. * LMC*_READ_LEVEL_DBG[BYTE] selects the particular byte. * To get these pass/fail results for another different rank, you must run the hardware read-leveling * again. For example, it is possible to get the BITMASK results for every byte of every rank * if you run read-leveling separately for each rank, probing LMC*_READ_LEVEL_DBG between each * read-leveling. */ typedef union { uint64_t u64; struct cvmx_lmcx_read_level_dbg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t bitmask : 16; /**< Bitmask generated during deskew settings sweep BITMASK[n]=0 means deskew setting n failed BITMASK[n]=1 means deskew setting n passed for 0 <= n <= 15 */ uint64_t reserved_4_15 : 12; uint64_t byte : 4; /**< 0 <= BYTE <= 8 */ #else uint64_t byte : 4; uint64_t reserved_4_15 : 12; uint64_t bitmask : 16; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_read_level_dbg_s cn52xx; struct cvmx_lmcx_read_level_dbg_s cn52xxp1; struct cvmx_lmcx_read_level_dbg_s cn56xx; struct cvmx_lmcx_read_level_dbg_s cn56xxp1; } cvmx_lmcx_read_level_dbg_t; /** * cvmx_lmc#_read_level_rank# * * Notes: * This is four CSRs per LMC, one per each rank. * Each CSR is written by HW during a read-leveling sequence for the rank. (HW sets STATUS==3 after HW read-leveling completes for the rank.) * Each CSR may also be written by SW, but not while a read-leveling sequence is in progress. (HW sets STATUS==1 after a CSR write.) * Deskew setting is measured in units of 1/4 DCLK, so the above BYTE* values can range over 4 DCLKs. * SW initiates a HW read-leveling sequence by programming LMC*_READ_LEVEL_CTL and writing INIT_START=1 with SEQUENCE=1. * See LMC*_READ_LEVEL_CTL. */ typedef union { uint64_t u64; struct cvmx_lmcx_read_level_rankx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t status : 2; /**< Indicates status of the read-levelling and where the BYTE* programmings in <35:0> came from: 0 = BYTE* values are their reset value 1 = BYTE* values were set via a CSR write to this register 2 = read-leveling sequence currently in progress (BYTE* values are unpredictable) 3 = BYTE* values came from a complete read-leveling sequence */ uint64_t byte8 : 4; /**< Deskew setting */ uint64_t byte7 : 4; /**< Deskew setting */ uint64_t byte6 : 4; /**< Deskew setting */ uint64_t byte5 : 4; /**< Deskew setting */ uint64_t byte4 : 4; /**< Deskew setting */ uint64_t byte3 : 4; /**< Deskew setting */ uint64_t byte2 : 4; /**< Deskew setting */ uint64_t byte1 : 4; /**< Deskew setting */ uint64_t byte0 : 4; /**< Deskew setting */ #else uint64_t byte0 : 4; uint64_t byte1 : 4; uint64_t byte2 : 4; uint64_t byte3 : 4; uint64_t byte4 : 4; uint64_t byte5 : 4; uint64_t byte6 : 4; uint64_t byte7 : 4; uint64_t byte8 : 4; uint64_t status : 2; uint64_t reserved_38_63 : 26; #endif } s; struct cvmx_lmcx_read_level_rankx_s cn52xx; struct cvmx_lmcx_read_level_rankx_s cn52xxp1; struct cvmx_lmcx_read_level_rankx_s cn56xx; struct cvmx_lmcx_read_level_rankx_s cn56xxp1; } cvmx_lmcx_read_level_rankx_t; /** * cvmx_lmc#_rodt_comp_ctl * * LMC_RODT_COMP_CTL = LMC Compensation control * */ typedef union { uint64_t u64; struct cvmx_lmcx_rodt_comp_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t enable : 1; /**< 0=not enabled, 1=enable */ uint64_t reserved_12_15 : 4; uint64_t nctl : 4; /**< Compensation control bits */ uint64_t reserved_5_7 : 3; uint64_t pctl : 5; /**< Compensation control bits */ #else uint64_t pctl : 5; uint64_t reserved_5_7 : 3; uint64_t nctl : 4; uint64_t reserved_12_15 : 4; uint64_t enable : 1; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_lmcx_rodt_comp_ctl_s cn50xx; struct cvmx_lmcx_rodt_comp_ctl_s cn52xx; struct cvmx_lmcx_rodt_comp_ctl_s cn52xxp1; struct cvmx_lmcx_rodt_comp_ctl_s cn56xx; struct cvmx_lmcx_rodt_comp_ctl_s cn56xxp1; struct cvmx_lmcx_rodt_comp_ctl_s cn58xx; struct cvmx_lmcx_rodt_comp_ctl_s cn58xxp1; } cvmx_lmcx_rodt_comp_ctl_t; /** * cvmx_lmc#_rodt_ctl * * LMC_RODT_CTL = Obsolete LMC Read OnDieTermination control * See the description in LMC_WODT_CTL1. On Reads, Octeon only supports turning on ODT's in * the lower 2 DIMM's with the masks as below. * * Notes: * When a given RANK in position N is selected, the RODT _HI and _LO masks for that position are used. * Mask[3:0] is used for RODT control of the RANKs in positions 3, 2, 1, and 0, respectively. * In 64b mode, DIMMs are assumed to be ordered in the following order: * position 3: [unused , DIMM1_RANK1_LO] * position 2: [unused , DIMM1_RANK0_LO] * position 1: [unused , DIMM0_RANK1_LO] * position 0: [unused , DIMM0_RANK0_LO] * In 128b mode, DIMMs are assumed to be ordered in the following order: * position 3: [DIMM3_RANK1_HI, DIMM1_RANK1_LO] * position 2: [DIMM3_RANK0_HI, DIMM1_RANK0_LO] * position 1: [DIMM2_RANK1_HI, DIMM0_RANK1_LO] * position 0: [DIMM2_RANK0_HI, DIMM0_RANK0_LO] */ typedef union { uint64_t u64; struct cvmx_lmcx_rodt_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t rodt_hi3 : 4; /**< Read ODT mask for position 3, data[127:64] */ uint64_t rodt_hi2 : 4; /**< Read ODT mask for position 2, data[127:64] */ uint64_t rodt_hi1 : 4; /**< Read ODT mask for position 1, data[127:64] */ uint64_t rodt_hi0 : 4; /**< Read ODT mask for position 0, data[127:64] */ uint64_t rodt_lo3 : 4; /**< Read ODT mask for position 3, data[ 63: 0] */ uint64_t rodt_lo2 : 4; /**< Read ODT mask for position 2, data[ 63: 0] */ uint64_t rodt_lo1 : 4; /**< Read ODT mask for position 1, data[ 63: 0] */ uint64_t rodt_lo0 : 4; /**< Read ODT mask for position 0, data[ 63: 0] */ #else uint64_t rodt_lo0 : 4; uint64_t rodt_lo1 : 4; uint64_t rodt_lo2 : 4; uint64_t rodt_lo3 : 4; uint64_t rodt_hi0 : 4; uint64_t rodt_hi1 : 4; uint64_t rodt_hi2 : 4; uint64_t rodt_hi3 : 4; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_rodt_ctl_s cn30xx; struct cvmx_lmcx_rodt_ctl_s cn31xx; struct cvmx_lmcx_rodt_ctl_s cn38xx; struct cvmx_lmcx_rodt_ctl_s cn38xxp2; struct cvmx_lmcx_rodt_ctl_s cn50xx; struct cvmx_lmcx_rodt_ctl_s cn52xx; struct cvmx_lmcx_rodt_ctl_s cn52xxp1; struct cvmx_lmcx_rodt_ctl_s cn56xx; struct cvmx_lmcx_rodt_ctl_s cn56xxp1; struct cvmx_lmcx_rodt_ctl_s cn58xx; struct cvmx_lmcx_rodt_ctl_s cn58xxp1; } cvmx_lmcx_rodt_ctl_t; /** * cvmx_lmc#_wodt_ctl0 * * LMC_WODT_CTL0 = LMC Write OnDieTermination control * See the description in LMC_WODT_CTL1. * * Notes: * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write ODT mask. See LMC_WODT_CTL1. * */ typedef union { uint64_t u64; struct cvmx_lmcx_wodt_ctl0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_lmcx_wodt_ctl0_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t wodt_d1_r1 : 8; /**< Write ODT mask DIMM1, RANK1 */ uint64_t wodt_d1_r0 : 8; /**< Write ODT mask DIMM1, RANK0 */ uint64_t wodt_d0_r1 : 8; /**< Write ODT mask DIMM0, RANK1 */ uint64_t wodt_d0_r0 : 8; /**< Write ODT mask DIMM0, RANK0 */ #else uint64_t wodt_d0_r0 : 8; uint64_t wodt_d0_r1 : 8; uint64_t wodt_d1_r0 : 8; uint64_t wodt_d1_r1 : 8; uint64_t reserved_32_63 : 32; #endif } cn30xx; struct cvmx_lmcx_wodt_ctl0_cn30xx cn31xx; struct cvmx_lmcx_wodt_ctl0_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t wodt_hi3 : 4; /**< Write ODT mask for position 3, data[127:64] */ uint64_t wodt_hi2 : 4; /**< Write ODT mask for position 2, data[127:64] */ uint64_t wodt_hi1 : 4; /**< Write ODT mask for position 1, data[127:64] */ uint64_t wodt_hi0 : 4; /**< Write ODT mask for position 0, data[127:64] */ uint64_t wodt_lo3 : 4; /**< Write ODT mask for position 3, data[ 63: 0] */ uint64_t wodt_lo2 : 4; /**< Write ODT mask for position 2, data[ 63: 0] */ uint64_t wodt_lo1 : 4; /**< Write ODT mask for position 1, data[ 63: 0] */ uint64_t wodt_lo0 : 4; /**< Write ODT mask for position 0, data[ 63: 0] */ #else uint64_t wodt_lo0 : 4; uint64_t wodt_lo1 : 4; uint64_t wodt_lo2 : 4; uint64_t wodt_lo3 : 4; uint64_t wodt_hi0 : 4; uint64_t wodt_hi1 : 4; uint64_t wodt_hi2 : 4; uint64_t wodt_hi3 : 4; uint64_t reserved_32_63 : 32; #endif } cn38xx; struct cvmx_lmcx_wodt_ctl0_cn38xx cn38xxp2; struct cvmx_lmcx_wodt_ctl0_cn38xx cn50xx; struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xx; struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xxp1; struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xx; struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xxp1; struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xx; struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xxp1; } cvmx_lmcx_wodt_ctl0_t; /** * cvmx_lmc#_wodt_ctl1 * * LMC_WODT_CTL1 = LMC Write OnDieTermination control * System designers may desire to terminate DQ/DQS/DM lines for higher frequency DDR operations * (667MHz and faster), especially on a multi-rank system. DDR2 DQ/DM/DQS I/O's have built in * Termination resistor that can be turned on or off by the controller, after meeting tAOND and tAOF * timing requirements. Each Rank has its own ODT pin that fans out to all the memory parts * in that DIMM. System designers may prefer different combinations of ODT ON's for read and write * into different ranks. Octeon supports full programmability by way of the mask register below. * Each Rank position has its own 8-bit programmable field. * When the controller does a write to that rank, it sets the 8 ODT pins to the MASK pins below. * For eg., When doing a write into Rank0, a system designer may desire to terminate the lines * with the resistor on Dimm0/Rank1. The mask WODT_D0_R0 would then be [00000010]. * If ODT feature is not desired, the DDR parts can be programmed to not look at these pins by * writing 0 in QS_DIC. Octeon drives the appropriate mask values on the ODT pins by default. * If this feature is not required, write 0 in this register. * * Notes: * Together, the LMC_WODT_CTL1 and LMC_WODT_CTL0 CSRs control the write ODT mask. * When a given RANK is selected, the WODT mask for that RANK is used. The resulting WODT mask is * driven to the DIMMs in the following manner: * BUNK_ENA=1 BUNK_ENA=0 * Mask[7] -> DIMM3, RANK1 DIMM3 * Mask[6] -> DIMM3, RANK0 * Mask[5] -> DIMM2, RANK1 DIMM2 * Mask[4] -> DIMM2, RANK0 * Mask[3] -> DIMM1, RANK1 DIMM1 * Mask[2] -> DIMM1, RANK0 * Mask[1] -> DIMM0, RANK1 DIMM0 * Mask[0] -> DIMM0, RANK0 */ typedef union { uint64_t u64; struct cvmx_lmcx_wodt_ctl1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t wodt_d3_r1 : 8; /**< Write ODT mask DIMM3, RANK1/DIMM3 in SingleRanked */ uint64_t wodt_d3_r0 : 8; /**< Write ODT mask DIMM3, RANK0 */ uint64_t wodt_d2_r1 : 8; /**< Write ODT mask DIMM2, RANK1/DIMM2 in SingleRanked */ uint64_t wodt_d2_r0 : 8; /**< Write ODT mask DIMM2, RANK0 */ #else uint64_t wodt_d2_r0 : 8; uint64_t wodt_d2_r1 : 8; uint64_t wodt_d3_r0 : 8; uint64_t wodt_d3_r1 : 8; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_lmcx_wodt_ctl1_s cn30xx; struct cvmx_lmcx_wodt_ctl1_s cn31xx; struct cvmx_lmcx_wodt_ctl1_s cn52xx; struct cvmx_lmcx_wodt_ctl1_s cn52xxp1; struct cvmx_lmcx_wodt_ctl1_s cn56xx; struct cvmx_lmcx_wodt_ctl1_s cn56xxp1; } cvmx_lmcx_wodt_ctl1_t; /** * cvmx_mio_boot_bist_stat * * MIO_BOOT_BIST_STAT = MIO Boot BIST Status Register * * Contains the BIST status for the MIO boot memories. '0' = pass, '1' = fail. */ typedef union { uint64_t u64; struct cvmx_mio_boot_bist_stat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t loc : 1; /**< Local memory BIST status */ uint64_t ncbi : 1; /**< NCB input FIFO BIST status */ #else uint64_t ncbi : 1; uint64_t loc : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_mio_boot_bist_stat_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t ncbo_1 : 1; /**< NCB output FIFO 1 BIST status */ uint64_t ncbo_0 : 1; /**< NCB output FIFO 0 BIST status */ uint64_t loc : 1; /**< Local memory BIST status */ uint64_t ncbi : 1; /**< NCB input FIFO BIST status */ #else uint64_t ncbi : 1; uint64_t loc : 1; uint64_t ncbo_0 : 1; uint64_t ncbo_1 : 1; uint64_t reserved_4_63 : 60; #endif } cn30xx; struct cvmx_mio_boot_bist_stat_cn30xx cn31xx; struct cvmx_mio_boot_bist_stat_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t ncbo_0 : 1; /**< NCB output FIFO BIST status */ uint64_t loc : 1; /**< Local memory BIST status */ uint64_t ncbi : 1; /**< NCB input FIFO BIST status */ #else uint64_t ncbi : 1; uint64_t loc : 1; uint64_t ncbo_0 : 1; uint64_t reserved_3_63 : 61; #endif } cn38xx; struct cvmx_mio_boot_bist_stat_cn38xx cn38xxp2; struct cvmx_mio_boot_bist_stat_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t pcm_1 : 1; /**< PCM memory 1 BIST status */ uint64_t pcm_0 : 1; /**< PCM memory 0 BIST status */ uint64_t ncbo_1 : 1; /**< NCB output FIFO 1 BIST status */ uint64_t ncbo_0 : 1; /**< NCB output FIFO 0 BIST status */ uint64_t loc : 1; /**< Local memory BIST status */ uint64_t ncbi : 1; /**< NCB input FIFO BIST status */ #else uint64_t ncbi : 1; uint64_t loc : 1; uint64_t ncbo_0 : 1; uint64_t ncbo_1 : 1; uint64_t pcm_0 : 1; uint64_t pcm_1 : 1; uint64_t reserved_6_63 : 58; #endif } cn50xx; struct cvmx_mio_boot_bist_stat_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t ndf : 2; /**< NAND flash BIST status */ uint64_t ncbo_0 : 1; /**< NCB output FIFO BIST status */ uint64_t dma : 1; /**< DMA memory BIST status */ uint64_t loc : 1; /**< Local memory BIST status */ uint64_t ncbi : 1; /**< NCB input FIFO BIST status */ #else uint64_t ncbi : 1; uint64_t loc : 1; uint64_t dma : 1; uint64_t ncbo_0 : 1; uint64_t ndf : 2; uint64_t reserved_6_63 : 58; #endif } cn52xx; struct cvmx_mio_boot_bist_stat_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t ncbo_0 : 1; /**< NCB output FIFO BIST status */ uint64_t dma : 1; /**< DMA memory BIST status */ uint64_t loc : 1; /**< Local memory BIST status */ uint64_t ncbi : 1; /**< NCB input FIFO BIST status */ #else uint64_t ncbi : 1; uint64_t loc : 1; uint64_t dma : 1; uint64_t ncbo_0 : 1; uint64_t reserved_4_63 : 60; #endif } cn52xxp1; struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xx; struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xxp1; struct cvmx_mio_boot_bist_stat_cn38xx cn58xx; struct cvmx_mio_boot_bist_stat_cn38xx cn58xxp1; } cvmx_mio_boot_bist_stat_t; /** * cvmx_mio_boot_comp * * MIO_BOOT_COMP = MIO Boot Compensation Register * * Reset value is as follows: * * no pullups, PCTL=0x1f, NCTL=0x1f * pullup on boot_ad[9], PCTL=0x1b, NCTL=0x1b (20 ohm termination) * pullup on boot_ad[10], PCTL=0x07, NCTL=0x08 (50 ohm termination) * pullups on boot_ad[10:9], PCTL=0x06, NCTL=0x04 (60 ohm termination) */ typedef union { uint64_t u64; struct cvmx_mio_boot_comp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t pctl : 5; /**< Boot bus PCTL */ uint64_t nctl : 5; /**< Boot bus NCTL */ #else uint64_t nctl : 5; uint64_t pctl : 5; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_mio_boot_comp_s cn50xx; struct cvmx_mio_boot_comp_s cn52xx; struct cvmx_mio_boot_comp_s cn52xxp1; struct cvmx_mio_boot_comp_s cn56xx; struct cvmx_mio_boot_comp_s cn56xxp1; } cvmx_mio_boot_comp_t; /** * cvmx_mio_boot_dma_cfg# * * MIO_BOOT_DMA_CFG = MIO Boot DMA Config Register (1 per engine * 2 engines) * * SIZE is specified in number of bus transfers, where one transfer is equal to the following number * of bytes dependent on MIO_BOOT_DMA_TIMn[WIDTH] and MIO_BOOT_DMA_TIMn[DDR]: * * WIDTH DDR Transfer Size (bytes) * ---------------------------------------- * 0 0 2 * 0 1 4 * 1 0 4 * 1 1 8 * * Note: ADR must be aligned to the bus width (i.e. 16 bit aligned if WIDTH=0, 32 bit aligned if WIDTH=1). */ typedef union { uint64_t u64; struct cvmx_mio_boot_dma_cfgx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t en : 1; /**< DMA Engine X enable */ uint64_t rw : 1; /**< DMA Engine X R/W bit (0 = read, 1 = write) */ uint64_t clr : 1; /**< DMA Engine X clear EN on device terminated burst */ uint64_t reserved_60_60 : 1; uint64_t swap32 : 1; /**< DMA Engine X 32 bit swap */ uint64_t swap16 : 1; /**< DMA Engine X 16 bit swap */ uint64_t swap8 : 1; /**< DMA Engine X 8 bit swap */ uint64_t endian : 1; /**< DMA Engine X NCB endian mode (0 = big, 1 = little) */ uint64_t size : 20; /**< DMA Engine X size */ uint64_t adr : 36; /**< DMA Engine X address */ #else uint64_t adr : 36; uint64_t size : 20; uint64_t endian : 1; uint64_t swap8 : 1; uint64_t swap16 : 1; uint64_t swap32 : 1; uint64_t reserved_60_60 : 1; uint64_t clr : 1; uint64_t rw : 1; uint64_t en : 1; #endif } s; struct cvmx_mio_boot_dma_cfgx_s cn52xx; struct cvmx_mio_boot_dma_cfgx_s cn52xxp1; struct cvmx_mio_boot_dma_cfgx_s cn56xx; struct cvmx_mio_boot_dma_cfgx_s cn56xxp1; } cvmx_mio_boot_dma_cfgx_t; /** * cvmx_mio_boot_dma_int# * * MIO_BOOT_DMA_INT = MIO Boot DMA Interrupt Register (1 per engine * 2 engines) * */ typedef union { uint64_t u64; struct cvmx_mio_boot_dma_intx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t dmarq : 1; /**< DMA Engine X DMARQ asserted interrupt */ uint64_t done : 1; /**< DMA Engine X request completion interrupt */ #else uint64_t done : 1; uint64_t dmarq : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_mio_boot_dma_intx_s cn52xx; struct cvmx_mio_boot_dma_intx_s cn52xxp1; struct cvmx_mio_boot_dma_intx_s cn56xx; struct cvmx_mio_boot_dma_intx_s cn56xxp1; } cvmx_mio_boot_dma_intx_t; /** * cvmx_mio_boot_dma_int_en# * * MIO_BOOT_DMA_INT_EN = MIO Boot DMA Interrupt Enable Register (1 per engine * 2 engines) * */ typedef union { uint64_t u64; struct cvmx_mio_boot_dma_int_enx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t dmarq : 1; /**< DMA Engine X DMARQ asserted interrupt enable */ uint64_t done : 1; /**< DMA Engine X request completion interrupt enable */ #else uint64_t done : 1; uint64_t dmarq : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_mio_boot_dma_int_enx_s cn52xx; struct cvmx_mio_boot_dma_int_enx_s cn52xxp1; struct cvmx_mio_boot_dma_int_enx_s cn56xx; struct cvmx_mio_boot_dma_int_enx_s cn56xxp1; } cvmx_mio_boot_dma_int_enx_t; /** * cvmx_mio_boot_dma_tim# * * MIO_BOOT_DMA_TIM = MIO Boot DMA Timing Register (1 per engine * 2 engines) * * DMACK_PI inverts the assertion level of boot_dmack[n]. The default polarity of boot_dmack[1:0] is * selected on the first de-assertion of reset by the values on boot_ad[12:11], where 0 is active high * and 1 is active low (see MIO_BOOT_PIN_DEFS for a read-only copy of the default polarity). * boot_ad[12:11] have internal pulldowns, so place a pullup on boot_ad[n+11] for active low default * polarity on engine n. To interface with CF cards in True IDE Mode, either a pullup should be placed * on boot_ad[n+11] OR the corresponding DMACK_PI[n] should be set. * * DMARQ_PI inverts the assertion level of boot_dmarq[n]. The default polarity of boot_dmarq[1:0] is * active high, thus setting the polarity inversion bits changes the polarity to active low. To * interface with CF cards in True IDE Mode, the corresponding DMARQ_PI[n] should be clear. * * TIM_MULT specifies the timing multiplier for an engine. The timing multiplier applies to all timing * parameters, except for DMARQ and RD_DLY, which simply count eclks. TIM_MULT is encoded as follows: * 0 = 4x, 1 = 1x, 2 = 2x, 3 = 8x. * * RD_DLY specifies the read sample delay in eclk cycles for an engine. For reads, the data bus is * normally sampled on the same eclk edge that drives boot_oe_n high (and also low in DDR mode). * This parameter can delay that sampling edge by up to 7 eclks. Note: the number of eclk cycles * counted by the OE_A and DMACK_H + PAUSE timing parameters must be greater than RD_DLY. * * If DDR is set, then WE_N must be less than WE_A. */ typedef union { uint64_t u64; struct cvmx_mio_boot_dma_timx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t dmack_pi : 1; /**< DMA Engine X DMA ack polarity inversion */ uint64_t dmarq_pi : 1; /**< DMA Engine X DMA request polarity inversion */ uint64_t tim_mult : 2; /**< DMA Engine X timing multiplier */ uint64_t rd_dly : 3; /**< DMA Engine X read sample delay */ uint64_t ddr : 1; /**< DMA Engine X DDR mode */ uint64_t width : 1; /**< DMA Engine X bus width (0 = 16 bits, 1 = 32 bits) */ uint64_t reserved_48_54 : 7; uint64_t pause : 6; /**< DMA Engine X pause count */ uint64_t dmack_h : 6; /**< DMA Engine X DMA ack hold count */ uint64_t we_n : 6; /**< DMA Engine X write enable negated count */ uint64_t we_a : 6; /**< DMA Engine X write enable asserted count */ uint64_t oe_n : 6; /**< DMA Engine X output enable negated count */ uint64_t oe_a : 6; /**< DMA Engine X output enable asserted count */ uint64_t dmack_s : 6; /**< DMA Engine X DMA ack setup count */ uint64_t dmarq : 6; /**< DMA Engine X DMA request count (must be non-zero) */ #else uint64_t dmarq : 6; uint64_t dmack_s : 6; uint64_t oe_a : 6; uint64_t oe_n : 6; uint64_t we_a : 6; uint64_t we_n : 6; uint64_t dmack_h : 6; uint64_t pause : 6; uint64_t reserved_48_54 : 7; uint64_t width : 1; uint64_t ddr : 1; uint64_t rd_dly : 3; uint64_t tim_mult : 2; uint64_t dmarq_pi : 1; uint64_t dmack_pi : 1; #endif } s; struct cvmx_mio_boot_dma_timx_s cn52xx; struct cvmx_mio_boot_dma_timx_s cn52xxp1; struct cvmx_mio_boot_dma_timx_s cn56xx; struct cvmx_mio_boot_dma_timx_s cn56xxp1; } cvmx_mio_boot_dma_timx_t; /** * cvmx_mio_boot_err * * MIO_BOOT_ERR = MIO Boot Error Register * * Contains the address decode error and wait mode error bits. Address decode error is set when a * boot bus access does not hit in any of the 8 remote regions or 2 local regions. Wait mode error is * set when wait mode is enabled and the external wait signal is not de-asserted after 32k eclk cycles. */ typedef union { uint64_t u64; struct cvmx_mio_boot_err_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t wait_err : 1; /**< Wait mode error */ uint64_t adr_err : 1; /**< Address decode error */ #else uint64_t adr_err : 1; uint64_t wait_err : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_mio_boot_err_s cn30xx; struct cvmx_mio_boot_err_s cn31xx; struct cvmx_mio_boot_err_s cn38xx; struct cvmx_mio_boot_err_s cn38xxp2; struct cvmx_mio_boot_err_s cn50xx; struct cvmx_mio_boot_err_s cn52xx; struct cvmx_mio_boot_err_s cn52xxp1; struct cvmx_mio_boot_err_s cn56xx; struct cvmx_mio_boot_err_s cn56xxp1; struct cvmx_mio_boot_err_s cn58xx; struct cvmx_mio_boot_err_s cn58xxp1; } cvmx_mio_boot_err_t; /** * cvmx_mio_boot_int * * MIO_BOOT_INT = MIO Boot Interrupt Register * * Contains the interrupt enable bits for address decode error and wait mode error. */ typedef union { uint64_t u64; struct cvmx_mio_boot_int_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t wait_int : 1; /**< Wait mode error interrupt enable */ uint64_t adr_int : 1; /**< Address decode error interrupt enable */ #else uint64_t adr_int : 1; uint64_t wait_int : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_mio_boot_int_s cn30xx; struct cvmx_mio_boot_int_s cn31xx; struct cvmx_mio_boot_int_s cn38xx; struct cvmx_mio_boot_int_s cn38xxp2; struct cvmx_mio_boot_int_s cn50xx; struct cvmx_mio_boot_int_s cn52xx; struct cvmx_mio_boot_int_s cn52xxp1; struct cvmx_mio_boot_int_s cn56xx; struct cvmx_mio_boot_int_s cn56xxp1; struct cvmx_mio_boot_int_s cn58xx; struct cvmx_mio_boot_int_s cn58xxp1; } cvmx_mio_boot_int_t; /** * cvmx_mio_boot_loc_adr * * MIO_BOOT_LOC_ADR = MIO Boot Local Memory Address Register * * Specifies the address for reading or writing the local memory. This address will post-increment * following an access to the MIO Boot Local Memory Data Register (MIO_BOOT_LOC_DAT). * * Local memory region 0 exists from addresses 0x00 - 0x78. * Local memory region 1 exists from addresses 0x80 - 0xf8. */ typedef union { uint64_t u64; struct cvmx_mio_boot_loc_adr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t adr : 5; /**< Local memory address */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t adr : 5; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_boot_loc_adr_s cn30xx; struct cvmx_mio_boot_loc_adr_s cn31xx; struct cvmx_mio_boot_loc_adr_s cn38xx; struct cvmx_mio_boot_loc_adr_s cn38xxp2; struct cvmx_mio_boot_loc_adr_s cn50xx; struct cvmx_mio_boot_loc_adr_s cn52xx; struct cvmx_mio_boot_loc_adr_s cn52xxp1; struct cvmx_mio_boot_loc_adr_s cn56xx; struct cvmx_mio_boot_loc_adr_s cn56xxp1; struct cvmx_mio_boot_loc_adr_s cn58xx; struct cvmx_mio_boot_loc_adr_s cn58xxp1; } cvmx_mio_boot_loc_adr_t; /** * cvmx_mio_boot_loc_cfg# * * MIO_BOOT_LOC_CFG = MIO Boot Local Region Config Register (1 per region * 2 regions) * * Contains local region enable and local region base address parameters. Each local region is 128 * bytes organized as 16 entries x 8 bytes. * * Base address specifies address bits [31:7] of the region. */ typedef union { uint64_t u64; struct cvmx_mio_boot_loc_cfgx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t en : 1; /**< Local region X enable */ uint64_t reserved_28_30 : 3; uint64_t base : 25; /**< Local region X base address */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t base : 25; uint64_t reserved_28_30 : 3; uint64_t en : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_mio_boot_loc_cfgx_s cn30xx; struct cvmx_mio_boot_loc_cfgx_s cn31xx; struct cvmx_mio_boot_loc_cfgx_s cn38xx; struct cvmx_mio_boot_loc_cfgx_s cn38xxp2; struct cvmx_mio_boot_loc_cfgx_s cn50xx; struct cvmx_mio_boot_loc_cfgx_s cn52xx; struct cvmx_mio_boot_loc_cfgx_s cn52xxp1; struct cvmx_mio_boot_loc_cfgx_s cn56xx; struct cvmx_mio_boot_loc_cfgx_s cn56xxp1; struct cvmx_mio_boot_loc_cfgx_s cn58xx; struct cvmx_mio_boot_loc_cfgx_s cn58xxp1; } cvmx_mio_boot_loc_cfgx_t; /** * cvmx_mio_boot_loc_dat * * MIO_BOOT_LOC_DAT = MIO Boot Local Memory Data Register * * This is a pseudo-register that will read/write the local memory at the address specified by the MIO * Boot Local Address Register (MIO_BOOT_LOC_ADR) when accessed. */ typedef union { uint64_t u64; struct cvmx_mio_boot_loc_dat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< Local memory data */ #else uint64_t data : 64; #endif } s; struct cvmx_mio_boot_loc_dat_s cn30xx; struct cvmx_mio_boot_loc_dat_s cn31xx; struct cvmx_mio_boot_loc_dat_s cn38xx; struct cvmx_mio_boot_loc_dat_s cn38xxp2; struct cvmx_mio_boot_loc_dat_s cn50xx; struct cvmx_mio_boot_loc_dat_s cn52xx; struct cvmx_mio_boot_loc_dat_s cn52xxp1; struct cvmx_mio_boot_loc_dat_s cn56xx; struct cvmx_mio_boot_loc_dat_s cn56xxp1; struct cvmx_mio_boot_loc_dat_s cn58xx; struct cvmx_mio_boot_loc_dat_s cn58xxp1; } cvmx_mio_boot_loc_dat_t; /** * cvmx_mio_boot_pin_defs * * MIO_BOOT_PIN_DEFS = MIO Boot Pin Defaults Register * */ typedef union { uint64_t u64; struct cvmx_mio_boot_pin_defs_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t ale : 1; /**< Region 0 default ALE mode */ uint64_t width : 1; /**< Region 0 default bus width */ uint64_t dmack_p2 : 1; /**< boot_dmack[2] default polarity */ uint64_t dmack_p1 : 1; /**< boot_dmack[1] default polarity */ uint64_t dmack_p0 : 1; /**< boot_dmack[0] default polarity */ uint64_t term : 2; /**< Selects default driver termination */ uint64_t nand : 1; /**< Region 0 is NAND flash */ uint64_t reserved_0_7 : 8; #else uint64_t reserved_0_7 : 8; uint64_t nand : 1; uint64_t term : 2; uint64_t dmack_p0 : 1; uint64_t dmack_p1 : 1; uint64_t dmack_p2 : 1; uint64_t width : 1; uint64_t ale : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_mio_boot_pin_defs_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t ale : 1; /**< Region 0 default ALE mode */ uint64_t width : 1; /**< Region 0 default bus width */ uint64_t reserved_13_13 : 1; uint64_t dmack_p1 : 1; /**< boot_dmack[1] default polarity */ uint64_t dmack_p0 : 1; /**< boot_dmack[0] default polarity */ uint64_t term : 2; /**< Selects default driver termination */ uint64_t nand : 1; /**< Region 0 is NAND flash */ uint64_t reserved_0_7 : 8; #else uint64_t reserved_0_7 : 8; uint64_t nand : 1; uint64_t term : 2; uint64_t dmack_p0 : 1; uint64_t dmack_p1 : 1; uint64_t reserved_13_13 : 1; uint64_t width : 1; uint64_t ale : 1; uint64_t reserved_16_63 : 48; #endif } cn52xx; struct cvmx_mio_boot_pin_defs_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t ale : 1; /**< Region 0 default ALE mode */ uint64_t width : 1; /**< Region 0 default bus width */ uint64_t dmack_p2 : 1; /**< boot_dmack[2] default polarity */ uint64_t dmack_p1 : 1; /**< boot_dmack[1] default polarity */ uint64_t dmack_p0 : 1; /**< boot_dmack[0] default polarity */ uint64_t term : 2; /**< Selects default driver termination */ uint64_t reserved_0_8 : 9; #else uint64_t reserved_0_8 : 9; uint64_t term : 2; uint64_t dmack_p0 : 1; uint64_t dmack_p1 : 1; uint64_t dmack_p2 : 1; uint64_t width : 1; uint64_t ale : 1; uint64_t reserved_16_63 : 48; #endif } cn56xx; } cvmx_mio_boot_pin_defs_t; /** * cvmx_mio_boot_reg_cfg# */ typedef union { uint64_t u64; struct cvmx_mio_boot_reg_cfgx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_44_63 : 20; uint64_t dmack : 2; /**< Region X DMACK */ uint64_t tim_mult : 2; /**< Region X timing multiplier */ uint64_t rd_dly : 3; /**< Region X read sample delay */ uint64_t sam : 1; /**< Region X SAM mode */ uint64_t we_ext : 2; /**< Region X write enable count extension */ uint64_t oe_ext : 2; /**< Region X output enable count extension */ uint64_t en : 1; /**< Region X enable */ uint64_t orbit : 1; /**< Region X or bit */ uint64_t ale : 1; /**< Region X ALE mode */ uint64_t width : 1; /**< Region X bus width */ uint64_t size : 12; /**< Region X size */ uint64_t base : 16; /**< Region X base address */ #else uint64_t base : 16; uint64_t size : 12; uint64_t width : 1; uint64_t ale : 1; uint64_t orbit : 1; uint64_t en : 1; uint64_t oe_ext : 2; uint64_t we_ext : 2; uint64_t sam : 1; uint64_t rd_dly : 3; uint64_t tim_mult : 2; uint64_t dmack : 2; uint64_t reserved_44_63 : 20; #endif } s; struct cvmx_mio_boot_reg_cfgx_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_37_63 : 27; uint64_t sam : 1; /**< Region X SAM mode */ uint64_t we_ext : 2; /**< Region X write enable count extension */ uint64_t oe_ext : 2; /**< Region X output enable count extension */ uint64_t en : 1; /**< Region X enable */ uint64_t orbit : 1; /**< Region X or bit */ uint64_t ale : 1; /**< Region X ALE mode */ uint64_t width : 1; /**< Region X bus width */ uint64_t size : 12; /**< Region X size */ uint64_t base : 16; /**< Region X base address */ #else uint64_t base : 16; uint64_t size : 12; uint64_t width : 1; uint64_t ale : 1; uint64_t orbit : 1; uint64_t en : 1; uint64_t oe_ext : 2; uint64_t we_ext : 2; uint64_t sam : 1; uint64_t reserved_37_63 : 27; #endif } cn30xx; struct cvmx_mio_boot_reg_cfgx_cn30xx cn31xx; struct cvmx_mio_boot_reg_cfgx_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t en : 1; /**< Region X enable */ uint64_t orbit : 1; /**< Region X or bit */ uint64_t reserved_28_29 : 2; uint64_t size : 12; /**< Region X size */ uint64_t base : 16; /**< Region X base address */ #else uint64_t base : 16; uint64_t size : 12; uint64_t reserved_28_29 : 2; uint64_t orbit : 1; uint64_t en : 1; uint64_t reserved_32_63 : 32; #endif } cn38xx; struct cvmx_mio_boot_reg_cfgx_cn38xx cn38xxp2; struct cvmx_mio_boot_reg_cfgx_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_42_63 : 22; uint64_t tim_mult : 2; /**< Region X timing multiplier */ uint64_t rd_dly : 3; /**< Region X read sample delay */ uint64_t sam : 1; /**< Region X SAM mode */ uint64_t we_ext : 2; /**< Region X write enable count extension */ uint64_t oe_ext : 2; /**< Region X output enable count extension */ uint64_t en : 1; /**< Region X enable */ uint64_t orbit : 1; /**< Region X or bit */ uint64_t ale : 1; /**< Region X ALE mode */ uint64_t width : 1; /**< Region X bus width */ uint64_t size : 12; /**< Region X size */ uint64_t base : 16; /**< Region X base address */ #else uint64_t base : 16; uint64_t size : 12; uint64_t width : 1; uint64_t ale : 1; uint64_t orbit : 1; uint64_t en : 1; uint64_t oe_ext : 2; uint64_t we_ext : 2; uint64_t sam : 1; uint64_t rd_dly : 3; uint64_t tim_mult : 2; uint64_t reserved_42_63 : 22; #endif } cn50xx; struct cvmx_mio_boot_reg_cfgx_s cn52xx; struct cvmx_mio_boot_reg_cfgx_s cn52xxp1; struct cvmx_mio_boot_reg_cfgx_s cn56xx; struct cvmx_mio_boot_reg_cfgx_s cn56xxp1; struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xx; struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xxp1; } cvmx_mio_boot_reg_cfgx_t; /** * cvmx_mio_boot_reg_tim# */ typedef union { uint64_t u64; struct cvmx_mio_boot_reg_timx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t pagem : 1; /**< Region X page mode */ uint64_t waitm : 1; /**< Region X wait mode */ uint64_t pages : 2; /**< Region X page size */ uint64_t ale : 6; /**< Region X ALE count */ uint64_t page : 6; /**< Region X page count */ uint64_t wait : 6; /**< Region X wait count */ uint64_t pause : 6; /**< Region X pause count */ uint64_t wr_hld : 6; /**< Region X write hold count */ uint64_t rd_hld : 6; /**< Region X read hold count */ uint64_t we : 6; /**< Region X write enable count */ uint64_t oe : 6; /**< Region X output enable count */ uint64_t ce : 6; /**< Region X chip enable count */ uint64_t adr : 6; /**< Region X address count */ #else uint64_t adr : 6; uint64_t ce : 6; uint64_t oe : 6; uint64_t we : 6; uint64_t rd_hld : 6; uint64_t wr_hld : 6; uint64_t pause : 6; uint64_t wait : 6; uint64_t page : 6; uint64_t ale : 6; uint64_t pages : 2; uint64_t waitm : 1; uint64_t pagem : 1; #endif } s; struct cvmx_mio_boot_reg_timx_s cn30xx; struct cvmx_mio_boot_reg_timx_s cn31xx; struct cvmx_mio_boot_reg_timx_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t pagem : 1; /**< Region X page mode */ uint64_t waitm : 1; /**< Region X wait mode */ uint64_t pages : 2; /**< Region X page size (NOT IN PASS 1) */ uint64_t reserved_54_59 : 6; uint64_t page : 6; /**< Region X page count */ uint64_t wait : 6; /**< Region X wait count */ uint64_t pause : 6; /**< Region X pause count */ uint64_t wr_hld : 6; /**< Region X write hold count */ uint64_t rd_hld : 6; /**< Region X read hold count */ uint64_t we : 6; /**< Region X write enable count */ uint64_t oe : 6; /**< Region X output enable count */ uint64_t ce : 6; /**< Region X chip enable count */ uint64_t adr : 6; /**< Region X address count */ #else uint64_t adr : 6; uint64_t ce : 6; uint64_t oe : 6; uint64_t we : 6; uint64_t rd_hld : 6; uint64_t wr_hld : 6; uint64_t pause : 6; uint64_t wait : 6; uint64_t page : 6; uint64_t reserved_54_59 : 6; uint64_t pages : 2; uint64_t waitm : 1; uint64_t pagem : 1; #endif } cn38xx; struct cvmx_mio_boot_reg_timx_cn38xx cn38xxp2; struct cvmx_mio_boot_reg_timx_s cn50xx; struct cvmx_mio_boot_reg_timx_s cn52xx; struct cvmx_mio_boot_reg_timx_s cn52xxp1; struct cvmx_mio_boot_reg_timx_s cn56xx; struct cvmx_mio_boot_reg_timx_s cn56xxp1; struct cvmx_mio_boot_reg_timx_s cn58xx; struct cvmx_mio_boot_reg_timx_s cn58xxp1; } cvmx_mio_boot_reg_timx_t; /** * cvmx_mio_boot_thr * * MIO_BOOT_THR = MIO Boot Threshold Register * * Contains MIO Boot threshold values: * * FIF_THR = Assert ncb__busy when the Boot NCB input FIFO reaches this level (not typically for * customer use). * * DMA_THR = When non-DMA accesses are pending, perform a DMA access after this value of non-DMA * accesses have completed. If set to zero, only perform a DMA access when non-DMA * accesses are not pending. */ typedef union { uint64_t u64; struct cvmx_mio_boot_thr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_22_63 : 42; uint64_t dma_thr : 6; /**< DMA threshold */ uint64_t reserved_14_15 : 2; uint64_t fif_cnt : 6; /**< Current NCB FIFO count */ uint64_t reserved_6_7 : 2; uint64_t fif_thr : 6; /**< NCB busy threshold */ #else uint64_t fif_thr : 6; uint64_t reserved_6_7 : 2; uint64_t fif_cnt : 6; uint64_t reserved_14_15 : 2; uint64_t dma_thr : 6; uint64_t reserved_22_63 : 42; #endif } s; struct cvmx_mio_boot_thr_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t fif_cnt : 6; /**< Current NCB FIFO count */ uint64_t reserved_6_7 : 2; uint64_t fif_thr : 6; /**< NCB busy threshold */ #else uint64_t fif_thr : 6; uint64_t reserved_6_7 : 2; uint64_t fif_cnt : 6; uint64_t reserved_14_63 : 50; #endif } cn30xx; struct cvmx_mio_boot_thr_cn30xx cn31xx; struct cvmx_mio_boot_thr_cn30xx cn38xx; struct cvmx_mio_boot_thr_cn30xx cn38xxp2; struct cvmx_mio_boot_thr_cn30xx cn50xx; struct cvmx_mio_boot_thr_s cn52xx; struct cvmx_mio_boot_thr_s cn52xxp1; struct cvmx_mio_boot_thr_s cn56xx; struct cvmx_mio_boot_thr_s cn56xxp1; struct cvmx_mio_boot_thr_cn30xx cn58xx; struct cvmx_mio_boot_thr_cn30xx cn58xxp1; } cvmx_mio_boot_thr_t; /** * cvmx_mio_fus_bnk_dat# * * Notes: * The intial state of MIO_FUS_BNK_DAT* is as if bank1 was just read i.e. DAT* = fus[511:256] * */ typedef union { uint64_t u64; struct cvmx_mio_fus_bnk_datx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t dat : 64; /**< Efuse bank store For reads, the DAT gets the fus bank last read For write, the DAT determines which fuses to blow */ #else uint64_t dat : 64; #endif } s; struct cvmx_mio_fus_bnk_datx_s cn50xx; struct cvmx_mio_fus_bnk_datx_s cn52xx; struct cvmx_mio_fus_bnk_datx_s cn52xxp1; struct cvmx_mio_fus_bnk_datx_s cn56xx; struct cvmx_mio_fus_bnk_datx_s cn56xxp1; struct cvmx_mio_fus_bnk_datx_s cn58xx; struct cvmx_mio_fus_bnk_datx_s cn58xxp1; } cvmx_mio_fus_bnk_datx_t; /** * cvmx_mio_fus_dat0 */ typedef union { uint64_t u64; struct cvmx_mio_fus_dat0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t man_info : 32; /**< Fuse information - manufacturing info [31:0] */ #else uint64_t man_info : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_mio_fus_dat0_s cn30xx; struct cvmx_mio_fus_dat0_s cn31xx; struct cvmx_mio_fus_dat0_s cn38xx; struct cvmx_mio_fus_dat0_s cn38xxp2; struct cvmx_mio_fus_dat0_s cn50xx; struct cvmx_mio_fus_dat0_s cn52xx; struct cvmx_mio_fus_dat0_s cn52xxp1; struct cvmx_mio_fus_dat0_s cn56xx; struct cvmx_mio_fus_dat0_s cn56xxp1; struct cvmx_mio_fus_dat0_s cn58xx; struct cvmx_mio_fus_dat0_s cn58xxp1; } cvmx_mio_fus_dat0_t; /** * cvmx_mio_fus_dat1 */ typedef union { uint64_t u64; struct cvmx_mio_fus_dat1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t man_info : 32; /**< Fuse information - manufacturing info [63:32] */ #else uint64_t man_info : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_mio_fus_dat1_s cn30xx; struct cvmx_mio_fus_dat1_s cn31xx; struct cvmx_mio_fus_dat1_s cn38xx; struct cvmx_mio_fus_dat1_s cn38xxp2; struct cvmx_mio_fus_dat1_s cn50xx; struct cvmx_mio_fus_dat1_s cn52xx; struct cvmx_mio_fus_dat1_s cn52xxp1; struct cvmx_mio_fus_dat1_s cn56xx; struct cvmx_mio_fus_dat1_s cn56xxp1; struct cvmx_mio_fus_dat1_s cn58xx; struct cvmx_mio_fus_dat1_s cn58xxp1; } cvmx_mio_fus_dat1_t; /** * cvmx_mio_fus_dat2 * * Notes: * CHIP_ID is consumed in several places within Octeon. * * * Core COP0 ProcessorIdentification[Revision] * * Core EJTAG DeviceIdentification[Version] * * PCI_CFG02[RID] * * JTAG controller * * Note: The JTAG controller gets CHIP_ID[3:0] solely from the laser fuses. * Modification to the efuses will not change what the JTAG controller reports * for CHIP_ID. */ typedef union { uint64_t u64; struct cvmx_mio_fus_dat2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t fus318 : 1; /**< Fuse information - a copy of fuse318 */ uint64_t raid_en : 1; /**< Fuse information - RAID enabled */ uint64_t reserved_30_31 : 2; uint64_t nokasu : 1; /**< Fuse information - Disable Kasumi */ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */ uint64_t nomul : 1; /**< Fuse information - VMUL disable */ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */ uint64_t reserved_0_15 : 16; #else uint64_t reserved_0_15 : 16; uint64_t chip_id : 8; uint64_t bist_dis : 1; uint64_t rst_sht : 1; uint64_t nocrypto : 1; uint64_t nomul : 1; uint64_t nodfa_cp2 : 1; uint64_t nokasu : 1; uint64_t reserved_30_31 : 2; uint64_t raid_en : 1; uint64_t fus318 : 1; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_mio_fus_dat2_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */ uint64_t nomul : 1; /**< Fuse information - VMUL disable */ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */ uint64_t pll_off : 4; /**< Fuse information - core pll offset Used to compute the base offset for the core pll. the offset will be (PLL_OFF ^ 8) Note, these fuses can only be set from laser fuse */ uint64_t reserved_1_11 : 11; uint64_t pp_dis : 1; /**< Fuse information - PP_DISABLES */ #else uint64_t pp_dis : 1; uint64_t reserved_1_11 : 11; uint64_t pll_off : 4; uint64_t chip_id : 8; uint64_t bist_dis : 1; uint64_t rst_sht : 1; uint64_t nocrypto : 1; uint64_t nomul : 1; uint64_t nodfa_cp2 : 1; uint64_t reserved_29_63 : 35; #endif } cn30xx; struct cvmx_mio_fus_dat2_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */ uint64_t nomul : 1; /**< Fuse information - VMUL disable */ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */ uint64_t pll_off : 4; /**< Fuse information - core pll offset Used to compute the base offset for the core pll. the offset will be (PLL_OFF ^ 8) Note, these fuses can only be set from laser fuse */ uint64_t reserved_2_11 : 10; uint64_t pp_dis : 2; /**< Fuse information - PP_DISABLES */ #else uint64_t pp_dis : 2; uint64_t reserved_2_11 : 10; uint64_t pll_off : 4; uint64_t chip_id : 8; uint64_t bist_dis : 1; uint64_t rst_sht : 1; uint64_t nocrypto : 1; uint64_t nomul : 1; uint64_t nodfa_cp2 : 1; uint64_t reserved_29_63 : 35; #endif } cn31xx; struct cvmx_mio_fus_dat2_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) (PASS2 Only) */ uint64_t nomul : 1; /**< Fuse information - VMUL disable (PASS2 Only) */ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable (PASS2 Only) */ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */ uint64_t pp_dis : 16; /**< Fuse information - PP_DISABLES */ #else uint64_t pp_dis : 16; uint64_t chip_id : 8; uint64_t bist_dis : 1; uint64_t rst_sht : 1; uint64_t nocrypto : 1; uint64_t nomul : 1; uint64_t nodfa_cp2 : 1; uint64_t reserved_29_63 : 35; #endif } cn38xx; struct cvmx_mio_fus_dat2_cn38xx cn38xxp2; struct cvmx_mio_fus_dat2_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t fus318 : 1; /**< Fuse information - a copy of fuse318 */ uint64_t raid_en : 1; /**< Fuse information - RAID enabled (5020 does not have RAID co-processor) */ uint64_t reserved_30_31 : 2; uint64_t nokasu : 1; /**< Fuse information - Disable Kasumi */ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) (5020 does not have DFA co-processor) */ uint64_t nomul : 1; /**< Fuse information - VMUL disable */ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */ uint64_t reserved_2_15 : 14; uint64_t pp_dis : 2; /**< Fuse information - PP_DISABLES */ #else uint64_t pp_dis : 2; uint64_t reserved_2_15 : 14; uint64_t chip_id : 8; uint64_t bist_dis : 1; uint64_t rst_sht : 1; uint64_t nocrypto : 1; uint64_t nomul : 1; uint64_t nodfa_cp2 : 1; uint64_t nokasu : 1; uint64_t reserved_30_31 : 2; uint64_t raid_en : 1; uint64_t fus318 : 1; uint64_t reserved_34_63 : 30; #endif } cn50xx; struct cvmx_mio_fus_dat2_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t fus318 : 1; /**< Fuse information - a copy of fuse318 */ uint64_t raid_en : 1; /**< Fuse information - RAID enabled */ uint64_t reserved_30_31 : 2; uint64_t nokasu : 1; /**< Fuse information - Disable Kasumi */ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */ uint64_t nomul : 1; /**< Fuse information - VMUL disable */ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */ uint64_t reserved_4_15 : 12; uint64_t pp_dis : 4; /**< Fuse information - PP_DISABLES */ #else uint64_t pp_dis : 4; uint64_t reserved_4_15 : 12; uint64_t chip_id : 8; uint64_t bist_dis : 1; uint64_t rst_sht : 1; uint64_t nocrypto : 1; uint64_t nomul : 1; uint64_t nodfa_cp2 : 1; uint64_t nokasu : 1; uint64_t reserved_30_31 : 2; uint64_t raid_en : 1; uint64_t fus318 : 1; uint64_t reserved_34_63 : 30; #endif } cn52xx; struct cvmx_mio_fus_dat2_cn52xx cn52xxp1; struct cvmx_mio_fus_dat2_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t fus318 : 1; /**< Fuse information - a copy of fuse318 */ uint64_t raid_en : 1; /**< Fuse information - RAID enabled */ uint64_t reserved_30_31 : 2; uint64_t nokasu : 1; /**< Fuse information - Disable Kasumi */ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */ uint64_t nomul : 1; /**< Fuse information - VMUL disable */ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */ uint64_t reserved_12_15 : 4; uint64_t pp_dis : 12; /**< Fuse information - PP_DISABLES */ #else uint64_t pp_dis : 12; uint64_t reserved_12_15 : 4; uint64_t chip_id : 8; uint64_t bist_dis : 1; uint64_t rst_sht : 1; uint64_t nocrypto : 1; uint64_t nomul : 1; uint64_t nodfa_cp2 : 1; uint64_t nokasu : 1; uint64_t reserved_30_31 : 2; uint64_t raid_en : 1; uint64_t fus318 : 1; uint64_t reserved_34_63 : 30; #endif } cn56xx; struct cvmx_mio_fus_dat2_cn56xx cn56xxp1; struct cvmx_mio_fus_dat2_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_30_63 : 34; uint64_t nokasu : 1; /**< Fuse information - Disable Kasumi */ uint64_t nodfa_cp2 : 1; /**< Fuse information - DFA Disable (CP2) */ uint64_t nomul : 1; /**< Fuse information - VMUL disable */ uint64_t nocrypto : 1; /**< Fuse information - AES/DES/HASH disable */ uint64_t rst_sht : 1; /**< Fuse information - When set, use short reset count */ uint64_t bist_dis : 1; /**< Fuse information - BIST Disable */ uint64_t chip_id : 8; /**< Fuse information - CHIP_ID */ uint64_t pp_dis : 16; /**< Fuse information - PP_DISABLES */ #else uint64_t pp_dis : 16; uint64_t chip_id : 8; uint64_t bist_dis : 1; uint64_t rst_sht : 1; uint64_t nocrypto : 1; uint64_t nomul : 1; uint64_t nodfa_cp2 : 1; uint64_t nokasu : 1; uint64_t reserved_30_63 : 34; #endif } cn58xx; struct cvmx_mio_fus_dat2_cn58xx cn58xxp1; } cvmx_mio_fus_dat2_t; /** * cvmx_mio_fus_dat3 */ typedef union { uint64_t u64; struct cvmx_mio_fus_dat3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t pll_div4 : 1; /**< Fuse information - PLL DIV4 mode (laser fuse only) */ uint64_t zip_crip : 2; /**< Fuse information - Zip Cripple */ uint64_t bar2_en : 1; /**< Fuse information - BAR2 Enable (when blown '1') */ uint64_t efus_lck : 1; /**< Fuse information - efuse lockdown */ uint64_t efus_ign : 1; /**< Fuse information - efuse ignore This bit only has side effects when blown in the laser fuses. It is ignore if only set in efuse store. */ uint64_t nozip : 1; /**< Fuse information - ZIP disable */ uint64_t nodfa_dte : 1; /**< Fuse information - DFA Disable (DTE) */ uint64_t icache : 24; /**< Fuse information - ICACHE Hard Repair Data */ #else uint64_t icache : 24; uint64_t nodfa_dte : 1; uint64_t nozip : 1; uint64_t efus_ign : 1; uint64_t efus_lck : 1; uint64_t bar2_en : 1; uint64_t zip_crip : 2; uint64_t pll_div4 : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_mio_fus_dat3_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t pll_div4 : 1; /**< Fuse information - PLL DIV4 mode (laser fuse only) */ uint64_t reserved_29_30 : 2; uint64_t bar2_en : 1; /**< Fuse information - BAR2 Enable (when blown '1') */ uint64_t efus_lck : 1; /**< Fuse information - efuse lockdown */ uint64_t efus_ign : 1; /**< Fuse information - efuse ignore This bit only has side effects when blown in the laser fuses. It is ignore if only set in efuse store. */ uint64_t nozip : 1; /**< Fuse information - ZIP disable */ uint64_t nodfa_dte : 1; /**< Fuse information - DFA Disable (DTE) */ uint64_t icache : 24; /**< Fuse information - ICACHE Hard Repair Data */ #else uint64_t icache : 24; uint64_t nodfa_dte : 1; uint64_t nozip : 1; uint64_t efus_ign : 1; uint64_t efus_lck : 1; uint64_t bar2_en : 1; uint64_t reserved_29_30 : 2; uint64_t pll_div4 : 1; uint64_t reserved_32_63 : 32; #endif } cn30xx; struct cvmx_mio_fus_dat3_s cn31xx; struct cvmx_mio_fus_dat3_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t zip_crip : 2; /**< Fuse information - Zip Cripple (PASS3 Only) */ uint64_t bar2_en : 1; /**< Fuse information - BAR2 Enable (when blown '1') (PASS2 Only) */ uint64_t efus_lck : 1; /**< Fuse information - efuse lockdown (PASS2 Only) */ uint64_t efus_ign : 1; /**< Fuse information - efuse ignore This bit only has side effects when blown in the laser fuses. It is ignore if only set in efuse store. (PASS2 Only) */ uint64_t nozip : 1; /**< Fuse information - ZIP disable (PASS2 Only) */ uint64_t nodfa_dte : 1; /**< Fuse information - DFA Disable (DTE) (PASS2 Only) */ uint64_t icache : 24; /**< Fuse information - ICACHE Hard Repair Data */ #else uint64_t icache : 24; uint64_t nodfa_dte : 1; uint64_t nozip : 1; uint64_t efus_ign : 1; uint64_t efus_lck : 1; uint64_t bar2_en : 1; uint64_t zip_crip : 2; uint64_t reserved_31_63 : 33; #endif } cn38xx; struct cvmx_mio_fus_dat3_cn38xxp2 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t bar2_en : 1; /**< Fuse information - BAR2 Enable (when blown '1') (PASS2 Only) */ uint64_t efus_lck : 1; /**< Fuse information - efuse lockdown (PASS2 Only) */ uint64_t efus_ign : 1; /**< Fuse information - efuse ignore This bit only has side effects when blown in the laser fuses. It is ignore if only set in efuse store. (PASS2 Only) */ uint64_t nozip : 1; /**< Fuse information - ZIP disable (PASS2 Only) */ uint64_t nodfa_dte : 1; /**< Fuse information - DFA Disable (DTE) (PASS2 Only) */ uint64_t icache : 24; /**< Fuse information - ICACHE Hard Repair Data */ #else uint64_t icache : 24; uint64_t nodfa_dte : 1; uint64_t nozip : 1; uint64_t efus_ign : 1; uint64_t efus_lck : 1; uint64_t bar2_en : 1; uint64_t reserved_29_63 : 35; #endif } cn38xxp2; struct cvmx_mio_fus_dat3_cn38xx cn50xx; struct cvmx_mio_fus_dat3_cn38xx cn52xx; struct cvmx_mio_fus_dat3_cn38xx cn52xxp1; struct cvmx_mio_fus_dat3_cn38xx cn56xx; struct cvmx_mio_fus_dat3_cn38xx cn56xxp1; struct cvmx_mio_fus_dat3_cn38xx cn58xx; struct cvmx_mio_fus_dat3_cn38xx cn58xxp1; } cvmx_mio_fus_dat3_t; /** * cvmx_mio_fus_ema */ typedef union { uint64_t u64; struct cvmx_mio_fus_ema_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t eff_ema : 3; /**< Effective EMA value */ uint64_t reserved_3_3 : 1; uint64_t ema : 3; /**< EMA Settings */ #else uint64_t ema : 3; uint64_t reserved_3_3 : 1; uint64_t eff_ema : 3; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_mio_fus_ema_s cn50xx; struct cvmx_mio_fus_ema_s cn52xx; struct cvmx_mio_fus_ema_s cn52xxp1; struct cvmx_mio_fus_ema_s cn56xx; struct cvmx_mio_fus_ema_s cn56xxp1; struct cvmx_mio_fus_ema_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t ema : 2; /**< EMA Settings */ #else uint64_t ema : 2; uint64_t reserved_2_63 : 62; #endif } cn58xx; struct cvmx_mio_fus_ema_cn58xx cn58xxp1; } cvmx_mio_fus_ema_t; /** * cvmx_mio_fus_pdf */ typedef union { uint64_t u64; struct cvmx_mio_fus_pdf_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t pdf : 64; /**< Fuse information - Product Definition Field */ #else uint64_t pdf : 64; #endif } s; struct cvmx_mio_fus_pdf_s cn50xx; struct cvmx_mio_fus_pdf_s cn52xx; struct cvmx_mio_fus_pdf_s cn52xxp1; struct cvmx_mio_fus_pdf_s cn56xx; struct cvmx_mio_fus_pdf_s cn56xxp1; struct cvmx_mio_fus_pdf_s cn58xx; } cvmx_mio_fus_pdf_t; /** * cvmx_mio_fus_pll */ typedef union { uint64_t u64; struct cvmx_mio_fus_pll_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t rfslip : 1; /**< PLL reference clock slip */ uint64_t fbslip : 1; /**< PLL feedback clock slip */ #else uint64_t fbslip : 1; uint64_t rfslip : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_mio_fus_pll_s cn50xx; struct cvmx_mio_fus_pll_s cn52xx; struct cvmx_mio_fus_pll_s cn52xxp1; struct cvmx_mio_fus_pll_s cn56xx; struct cvmx_mio_fus_pll_s cn56xxp1; struct cvmx_mio_fus_pll_s cn58xx; struct cvmx_mio_fus_pll_s cn58xxp1; } cvmx_mio_fus_pll_t; /** * cvmx_mio_fus_prog * * Notes: * To write a bank of fuses, SW must set MIO_FUS_WADR[ADDR] to the bank to be * programmed and then set each bit within MIO_FUS_BNK_DATX to indicate which * fuses to blow. Once ADDR, and DAT are setup, SW can write to * MIO_FUS_PROG[PROG] to start the bank write and poll on PROG. Once PROG is * clear, the bank write is complete. */ typedef union { uint64_t u64; struct cvmx_mio_fus_prog_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t prog : 1; /**< Blow the fuse bank SW will set PROG, and then the HW will clear when the PROG bank is complete */ #else uint64_t prog : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_fus_prog_s cn30xx; struct cvmx_mio_fus_prog_s cn31xx; struct cvmx_mio_fus_prog_s cn38xx; struct cvmx_mio_fus_prog_s cn38xxp2; struct cvmx_mio_fus_prog_s cn50xx; struct cvmx_mio_fus_prog_s cn52xx; struct cvmx_mio_fus_prog_s cn52xxp1; struct cvmx_mio_fus_prog_s cn56xx; struct cvmx_mio_fus_prog_s cn56xxp1; struct cvmx_mio_fus_prog_s cn58xx; struct cvmx_mio_fus_prog_s cn58xxp1; } cvmx_mio_fus_prog_t; /** * cvmx_mio_fus_prog_times * * Notes: * All values must be > 0 for correct electrical operation. * * The reset values are a conservative version for a 50MHz ref_clk. */ typedef union { uint64_t u64; struct cvmx_mio_fus_prog_times_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_33_63 : 31; uint64_t prog_pin : 1; /**< efuse program pin */ uint64_t out : 8; /**< efuse timing param (ref_clks to delay 10ns) */ uint64_t sclk_lo : 4; /**< efuse timing param (ref_clks to delay 5ns) */ uint64_t sclk_hi : 12; /**< efuse timing param (ref_clks to delay 1000ns) */ uint64_t setup : 8; /**< efuse timing param (ref_clks to delay 10ns) */ #else uint64_t setup : 8; uint64_t sclk_hi : 12; uint64_t sclk_lo : 4; uint64_t out : 8; uint64_t prog_pin : 1; uint64_t reserved_33_63 : 31; #endif } s; struct cvmx_mio_fus_prog_times_s cn50xx; struct cvmx_mio_fus_prog_times_s cn52xx; struct cvmx_mio_fus_prog_times_s cn52xxp1; struct cvmx_mio_fus_prog_times_s cn56xx; struct cvmx_mio_fus_prog_times_s cn56xxp1; struct cvmx_mio_fus_prog_times_s cn58xx; struct cvmx_mio_fus_prog_times_s cn58xxp1; } cvmx_mio_fus_prog_times_t; /** * cvmx_mio_fus_rcmd * * Notes: * To read an efuse, SW writes MIO_FUS_RCMD[ADDR,PEND] with the byte address of * the fuse in question, then SW can poll MIO_FUS_RCMD[PEND]. When PEND is * clear, then MIO_FUS_RCMD[DAT] is valid. In addition, if the efuse read went * to the efuse banks (e.g. ADDR > (320/8) || EFUSE is set) SW can read * MIO_FUS_BNK_DATX which contains all 256 fuses in the bank associated in * ADDR. */ typedef union { uint64_t u64; struct cvmx_mio_fus_rcmd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t dat : 8; /**< 8bits of fuse data */ uint64_t reserved_13_15 : 3; uint64_t pend : 1; /**< SW sets this bit on a write to start FUSE read operation. HW clears when read is complete and the DAT is valid */ uint64_t reserved_9_11 : 3; uint64_t efuse : 1; /**< When set, return data from the efuse storage rather than the local storage for the 320 HW fuses */ uint64_t addr : 8; /**< The byte address of the fuse to read */ #else uint64_t addr : 8; uint64_t efuse : 1; uint64_t reserved_9_11 : 3; uint64_t pend : 1; uint64_t reserved_13_15 : 3; uint64_t dat : 8; uint64_t reserved_24_63 : 40; #endif } s; struct cvmx_mio_fus_rcmd_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t dat : 8; /**< 8bits of fuse data */ uint64_t reserved_13_15 : 3; uint64_t pend : 1; /**< SW sets this bit on a write to start FUSE read operation. HW clears when read is complete and the DAT is valid */ uint64_t reserved_9_11 : 3; uint64_t efuse : 1; /**< When set, return data from the efuse storage rather than the local storage for the 320 HW fuses */ uint64_t reserved_7_7 : 1; uint64_t addr : 7; /**< The byte address of the fuse to read */ #else uint64_t addr : 7; uint64_t reserved_7_7 : 1; uint64_t efuse : 1; uint64_t reserved_9_11 : 3; uint64_t pend : 1; uint64_t reserved_13_15 : 3; uint64_t dat : 8; uint64_t reserved_24_63 : 40; #endif } cn30xx; struct cvmx_mio_fus_rcmd_cn30xx cn31xx; struct cvmx_mio_fus_rcmd_cn30xx cn38xx; struct cvmx_mio_fus_rcmd_cn30xx cn38xxp2; struct cvmx_mio_fus_rcmd_cn30xx cn50xx; struct cvmx_mio_fus_rcmd_s cn52xx; struct cvmx_mio_fus_rcmd_s cn52xxp1; struct cvmx_mio_fus_rcmd_s cn56xx; struct cvmx_mio_fus_rcmd_s cn56xxp1; struct cvmx_mio_fus_rcmd_cn30xx cn58xx; struct cvmx_mio_fus_rcmd_cn30xx cn58xxp1; } cvmx_mio_fus_rcmd_t; /** * cvmx_mio_fus_spr_repair_res * * Notes: * Pass3 Only * */ typedef union { uint64_t u64; struct cvmx_mio_fus_spr_repair_res_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_42_63 : 22; uint64_t repair2 : 14; /**< SPR BISR Results */ uint64_t repair1 : 14; /**< SPR BISR Results */ uint64_t repair0 : 14; /**< SPR BISR Results */ #else uint64_t repair0 : 14; uint64_t repair1 : 14; uint64_t repair2 : 14; uint64_t reserved_42_63 : 22; #endif } s; struct cvmx_mio_fus_spr_repair_res_s cn30xx; struct cvmx_mio_fus_spr_repair_res_s cn31xx; struct cvmx_mio_fus_spr_repair_res_s cn38xx; struct cvmx_mio_fus_spr_repair_res_s cn50xx; struct cvmx_mio_fus_spr_repair_res_s cn52xx; struct cvmx_mio_fus_spr_repair_res_s cn52xxp1; struct cvmx_mio_fus_spr_repair_res_s cn56xx; struct cvmx_mio_fus_spr_repair_res_s cn56xxp1; struct cvmx_mio_fus_spr_repair_res_s cn58xx; struct cvmx_mio_fus_spr_repair_res_s cn58xxp1; } cvmx_mio_fus_spr_repair_res_t; /** * cvmx_mio_fus_spr_repair_sum * * Notes: * Pass3 Only * */ typedef union { uint64_t u64; struct cvmx_mio_fus_spr_repair_sum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t too_many : 1; /**< Too Many Defects - cannot repair - bad part */ #else uint64_t too_many : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_fus_spr_repair_sum_s cn30xx; struct cvmx_mio_fus_spr_repair_sum_s cn31xx; struct cvmx_mio_fus_spr_repair_sum_s cn38xx; struct cvmx_mio_fus_spr_repair_sum_s cn50xx; struct cvmx_mio_fus_spr_repair_sum_s cn52xx; struct cvmx_mio_fus_spr_repair_sum_s cn52xxp1; struct cvmx_mio_fus_spr_repair_sum_s cn56xx; struct cvmx_mio_fus_spr_repair_sum_s cn56xxp1; struct cvmx_mio_fus_spr_repair_sum_s cn58xx; struct cvmx_mio_fus_spr_repair_sum_s cn58xxp1; } cvmx_mio_fus_spr_repair_sum_t; /** * cvmx_mio_fus_unlock */ typedef union { uint64_t u64; struct cvmx_mio_fus_unlock_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t key : 24; /**< When set to the typical value, allows SW to program the efuses */ #else uint64_t key : 24; uint64_t reserved_24_63 : 40; #endif } s; struct cvmx_mio_fus_unlock_s cn30xx; struct cvmx_mio_fus_unlock_s cn31xx; } cvmx_mio_fus_unlock_t; /** * cvmx_mio_fus_wadr */ typedef union { uint64_t u64; struct cvmx_mio_fus_wadr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t addr : 10; /**< Which of the four banks of 256 fuses to blow */ #else uint64_t addr : 10; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_mio_fus_wadr_s cn30xx; struct cvmx_mio_fus_wadr_s cn31xx; struct cvmx_mio_fus_wadr_s cn38xx; struct cvmx_mio_fus_wadr_s cn38xxp2; struct cvmx_mio_fus_wadr_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t addr : 2; /**< Which of the four banks of 256 fuses to blow */ #else uint64_t addr : 2; uint64_t reserved_2_63 : 62; #endif } cn50xx; struct cvmx_mio_fus_wadr_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t addr : 3; /**< Which of the four banks of 256 fuses to blow */ #else uint64_t addr : 3; uint64_t reserved_3_63 : 61; #endif } cn52xx; struct cvmx_mio_fus_wadr_cn52xx cn52xxp1; struct cvmx_mio_fus_wadr_cn52xx cn56xx; struct cvmx_mio_fus_wadr_cn52xx cn56xxp1; struct cvmx_mio_fus_wadr_cn50xx cn58xx; struct cvmx_mio_fus_wadr_cn50xx cn58xxp1; } cvmx_mio_fus_wadr_t; /** * cvmx_mio_ndf_dma_cfg * * MIO_NDF_DMA_CFG = MIO NAND Flash DMA Config Register * * SIZE is specified in number of 64 bit transfers (encoded in -1 notation). * * ADR must be 64 bit aligned. */ typedef union { uint64_t u64; struct cvmx_mio_ndf_dma_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t en : 1; /**< DMA Engine enable */ uint64_t rw : 1; /**< DMA Engine R/W bit (0 = read, 1 = write) */ uint64_t clr : 1; /**< DMA Engine clear EN on device terminated burst */ uint64_t reserved_60_60 : 1; uint64_t swap32 : 1; /**< DMA Engine 32 bit swap */ uint64_t swap16 : 1; /**< DMA Engine 16 bit swap */ uint64_t swap8 : 1; /**< DMA Engine 8 bit swap */ uint64_t endian : 1; /**< DMA Engine NCB endian mode (0 = big, 1 = little) */ uint64_t size : 20; /**< DMA Engine size */ uint64_t adr : 36; /**< DMA Engine address */ #else uint64_t adr : 36; uint64_t size : 20; uint64_t endian : 1; uint64_t swap8 : 1; uint64_t swap16 : 1; uint64_t swap32 : 1; uint64_t reserved_60_60 : 1; uint64_t clr : 1; uint64_t rw : 1; uint64_t en : 1; #endif } s; struct cvmx_mio_ndf_dma_cfg_s cn52xx; } cvmx_mio_ndf_dma_cfg_t; /** * cvmx_mio_ndf_dma_int * * MIO_NDF_DMA_INT = MIO NAND Flash DMA Interrupt Register * */ typedef union { uint64_t u64; struct cvmx_mio_ndf_dma_int_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t done : 1; /**< DMA Engine request completion interrupt */ #else uint64_t done : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_ndf_dma_int_s cn52xx; } cvmx_mio_ndf_dma_int_t; /** * cvmx_mio_ndf_dma_int_en * * MIO_NDF_DMA_INT_EN = MIO NAND Flash DMA Interrupt Enable Register * */ typedef union { uint64_t u64; struct cvmx_mio_ndf_dma_int_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t done : 1; /**< DMA Engine request completion interrupt enable */ #else uint64_t done : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_ndf_dma_int_en_s cn52xx; } cvmx_mio_ndf_dma_int_en_t; /** * cvmx_mio_pll_ctl */ typedef union { uint64_t u64; struct cvmx_mio_pll_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t bw_ctl : 5; /**< Core PLL bandwidth control */ #else uint64_t bw_ctl : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_mio_pll_ctl_s cn30xx; struct cvmx_mio_pll_ctl_s cn31xx; } cvmx_mio_pll_ctl_t; /** * cvmx_mio_pll_setting */ typedef union { uint64_t u64; struct cvmx_mio_pll_setting_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t setting : 17; /**< Core PLL setting */ #else uint64_t setting : 17; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_mio_pll_setting_s cn30xx; struct cvmx_mio_pll_setting_s cn31xx; } cvmx_mio_pll_setting_t; /** * cvmx_mio_tws#_int * * MIO_TWSX_INT = TWSX Interrupt Register * * This register contains the TWSI interrupt enable mask and the interrupt source bits. Note: the * interrupt source bit for the TWSI core interrupt (CORE_INT) is read-only, the appropriate sequence * must be written to the TWSI core to clear this interrupt. The other interrupt source bits are write- * one-to-clear. TS_INT is set on the update of the MIO_TWS_TWSI_SW register (i.e. when it is written * by a TWSI device). ST_INT is set whenever the valid bit of the MIO_TWS_SW_TWSI is cleared (see above * for reasons). * * Note: When using the high-level controller, CORE_EN should be clear and CORE_INT should be ignored. * Conversely, when the high-level controller is disabled, ST_EN / TS_EN should be clear and ST_INT / * TS_INT should be ignored. * * This register also contains a read-only copy of the TWSI bus (SCL and SDA) as well as control bits to * override the current state of the TWSI bus (SCL_OVR and SDA_OVR). Setting an override bit high will * result in the open drain driver being activated, thus driving the corresponding signal low. */ typedef union { uint64_t u64; struct cvmx_mio_twsx_int_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t scl : 1; /**< SCL (NOT IN PASS1 OR PASS2) */ uint64_t sda : 1; /**< SDA (NOT IN PASS1 OR PASS2) */ uint64_t scl_ovr : 1; /**< SCL override (NOT IN PASS1 OR PASS2) */ uint64_t sda_ovr : 1; /**< SDA override (NOT IN PASS1 OR PASS2) */ uint64_t reserved_7_7 : 1; uint64_t core_en : 1; /**< TWSI core interrupt enable */ uint64_t ts_en : 1; /**< MIO_TWS_TWSI_SW register update interrupt enable */ uint64_t st_en : 1; /**< MIO_TWS_SW_TWSI register update interrupt enable */ uint64_t reserved_3_3 : 1; uint64_t core_int : 1; /**< TWSI core interrupt */ uint64_t ts_int : 1; /**< MIO_TWS_TWSI_SW register update interrupt */ uint64_t st_int : 1; /**< MIO_TWS_SW_TWSI register update interrupt */ #else uint64_t st_int : 1; uint64_t ts_int : 1; uint64_t core_int : 1; uint64_t reserved_3_3 : 1; uint64_t st_en : 1; uint64_t ts_en : 1; uint64_t core_en : 1; uint64_t reserved_7_7 : 1; uint64_t sda_ovr : 1; uint64_t scl_ovr : 1; uint64_t sda : 1; uint64_t scl : 1; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_mio_twsx_int_s cn30xx; struct cvmx_mio_twsx_int_s cn31xx; struct cvmx_mio_twsx_int_s cn38xx; struct cvmx_mio_twsx_int_cn38xxp2 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t core_en : 1; /**< TWSI core interrupt enable */ uint64_t ts_en : 1; /**< MIO_TWS_TWSI_SW register update interrupt enable */ uint64_t st_en : 1; /**< MIO_TWS_SW_TWSI register update interrupt enable */ uint64_t reserved_3_3 : 1; uint64_t core_int : 1; /**< TWSI core interrupt */ uint64_t ts_int : 1; /**< MIO_TWS_TWSI_SW register update interrupt */ uint64_t st_int : 1; /**< MIO_TWS_SW_TWSI register update interrupt */ #else uint64_t st_int : 1; uint64_t ts_int : 1; uint64_t core_int : 1; uint64_t reserved_3_3 : 1; uint64_t st_en : 1; uint64_t ts_en : 1; uint64_t core_en : 1; uint64_t reserved_7_63 : 57; #endif } cn38xxp2; struct cvmx_mio_twsx_int_s cn50xx; struct cvmx_mio_twsx_int_s cn52xx; struct cvmx_mio_twsx_int_s cn52xxp1; struct cvmx_mio_twsx_int_s cn56xx; struct cvmx_mio_twsx_int_s cn56xxp1; struct cvmx_mio_twsx_int_s cn58xx; struct cvmx_mio_twsx_int_s cn58xxp1; } cvmx_mio_twsx_int_t; /** * cvmx_mio_tws#_sw_twsi * * MIO_TWSX_SW_TWSI = TWSX Software to TWSI Register * * This register allows software to * - initiate TWSI interface master-mode operations with a write and read the result with a read * - load four bytes for later retrieval (slave mode) with a write and check validity with a read * - launch a TWSI controller configuration read/write with a write and read the result with a read * * This register should be read or written by software, and read by the TWSI device. The TWSI device can * use either two-byte or five-byte reads to reference this register. * * The TWSI device considers this register valid when V==1 and SLONLY==1. */ typedef union { uint64_t u64; struct cvmx_mio_twsx_sw_twsi_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t v : 1; /**< Valid bit - Set on a write (should always be written with a 1) - Cleared when a TWSI master mode op completes - Cleared when a TWSI configuration register access completes - Cleared when the TWSI device reads the register if SLONLY==1 */ uint64_t slonly : 1; /**< Slave Only Mode - No operation is initiated with a write when this bit is set - only D field is updated in this case - When clear, a write initiates either a TWSI master-mode operation or a TWSI configuration register access */ uint64_t eia : 1; /**< Extended Internal Address - send additional internal address byte (MSB of IA is from IA field of MIO_TWS_SW_TWSI_EXT) (NOT IN PASS 1) */ uint64_t op : 4; /**< Opcode field - When the register is written with SLONLY==0, initiate a read or write: 0000 => 7-bit Byte Master Mode TWSI Op 0001 => 7-bit Byte Combined Read Master Mode Op 7-bit Byte Write w/ IA Master Mode Op 0010 => 10-bit Byte Master Mode TWSI Op 0011 => 10-bit Byte Combined Read Master Mode Op 10-bit Byte Write w/ IA Master Mode Op 0100 => TWSI Master Clock Register 0110 => See EOP field 1000 => 7-bit 4-byte Master Mode TWSI Op 1001 => 7-bit 4-byte Comb. Read Master Mode Op 7-bit 4-byte Write w/ IA Master Mode Op 1010 => 10-bit 4-byte Master Mode TWSI Op 1011 => 10-bit 4-byte Comb. Read Master Mode Op 10-bit 4-byte Write w/ IA Master Mode Op */ uint64_t r : 1; /**< Read bit or result - If set on a write when SLONLY==0, the operation is a read - On a read, this bit returns the result indication for the most recent master mode operation (1 = success, 0 = fail) */ uint64_t sovr : 1; /**< Size Override - if set, use the SIZE field to determine Master Mode Op size rather than what the Opcode field specifies. For operations greater than 4 bytes, the additional data will be contained in the D field of MIO_TWS_SW_TWSI_EXT (NOT IN PASS 1) */ uint64_t size : 3; /**< Size in bytes of Master Mode Op if the Size Override bit is set. Specified in -1 notation (i.e. 0 = 1 byte, 1 = 2 bytes ... 7 = 8 bytes) (NOT IN PASS 1) */ uint64_t scr : 2; /**< Scratch - unused, but retain state */ uint64_t a : 10; /**< Address field - the address of the remote device for a master mode operation - A<9:7> are only used for 10-bit addressing */ uint64_t ia : 5; /**< Internal Address - Used when launching a master mode combined read / write with internal address (lower 3 bits are contained in the EOP_IA field) */ uint64_t eop_ia : 3; /**< Extra opcode (when OP<3:0> == 0110 and SLONLY==0): 000 => TWSI Slave Address Register 001 => TWSI Data Register 010 => TWSI Control Register 011 => TWSI Clock Control Register (when R == 0) 011 => TWSI Status Register (when R == 1) 100 => TWSI Extended Slave Register 111 => TWSI Soft Reset Register Also the lower 3 bits of Internal Address when launching a master mode combined read / write with internal address */ uint64_t d : 32; /**< Data Field Used on a write when - initiating a master-mode write (SLONLY==0) - writing a TWSI config register (SLONLY==0) - a slave mode write (SLONLY==1) The read value is updated by - a write to this register - master mode completion (contains result or error code) - TWSI config register read (contains result) */ #else uint64_t d : 32; uint64_t eop_ia : 3; uint64_t ia : 5; uint64_t a : 10; uint64_t scr : 2; uint64_t size : 3; uint64_t sovr : 1; uint64_t r : 1; uint64_t op : 4; uint64_t eia : 1; uint64_t slonly : 1; uint64_t v : 1; #endif } s; struct cvmx_mio_twsx_sw_twsi_s cn30xx; struct cvmx_mio_twsx_sw_twsi_s cn31xx; struct cvmx_mio_twsx_sw_twsi_s cn38xx; struct cvmx_mio_twsx_sw_twsi_s cn38xxp2; struct cvmx_mio_twsx_sw_twsi_s cn50xx; struct cvmx_mio_twsx_sw_twsi_s cn52xx; struct cvmx_mio_twsx_sw_twsi_s cn52xxp1; struct cvmx_mio_twsx_sw_twsi_s cn56xx; struct cvmx_mio_twsx_sw_twsi_s cn56xxp1; struct cvmx_mio_twsx_sw_twsi_s cn58xx; struct cvmx_mio_twsx_sw_twsi_s cn58xxp1; } cvmx_mio_twsx_sw_twsi_t; /** * cvmx_mio_tws#_sw_twsi_ext * * MIO_TWSX_SW_TWSI_EXT = TWSX Software to TWSI Extension Register * * This register contains an additional byte of internal address and 4 additional bytes of data to be * used with TWSI master mode operations. IA will be sent as the first byte of internal address when * performing master mode combined read / write with internal address operations and the EIA bit of * MIO_TWS_SW_TWSI is set. D extends the data field of MIO_TWS_SW_TWSI for a total of 8 bytes (SOVR * must be set to perform operations greater than 4 bytes). */ typedef union { uint64_t u64; struct cvmx_mio_twsx_sw_twsi_ext_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t ia : 8; /**< Extended Internal Address */ uint64_t d : 32; /**< Extended Data Field */ #else uint64_t d : 32; uint64_t ia : 8; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_mio_twsx_sw_twsi_ext_s cn30xx; struct cvmx_mio_twsx_sw_twsi_ext_s cn31xx; struct cvmx_mio_twsx_sw_twsi_ext_s cn38xx; struct cvmx_mio_twsx_sw_twsi_ext_s cn38xxp2; struct cvmx_mio_twsx_sw_twsi_ext_s cn50xx; struct cvmx_mio_twsx_sw_twsi_ext_s cn52xx; struct cvmx_mio_twsx_sw_twsi_ext_s cn52xxp1; struct cvmx_mio_twsx_sw_twsi_ext_s cn56xx; struct cvmx_mio_twsx_sw_twsi_ext_s cn56xxp1; struct cvmx_mio_twsx_sw_twsi_ext_s cn58xx; struct cvmx_mio_twsx_sw_twsi_ext_s cn58xxp1; } cvmx_mio_twsx_sw_twsi_ext_t; /** * cvmx_mio_tws#_twsi_sw * * MIO_TWSX_TWSI_SW = TWSX TWSI to Software Register * * This register allows the TWSI device to transfer data to software and later check that software has * received the information. * * This register should be read or written by the TWSI device, and read by software. The TWSI device can * use one-byte or four-byte payload writes, and two-byte payload reads. * * The TWSI device considers this register valid when V==1. */ typedef union { uint64_t u64; struct cvmx_mio_twsx_twsi_sw_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t v : 2; /**< Valid Bits - Not directly writable - Set to 1 on any write by the TWSI device - Cleared on any read by software */ uint64_t reserved_32_61 : 30; uint64_t d : 32; /**< Data Field - updated on a write by the TWSI device */ #else uint64_t d : 32; uint64_t reserved_32_61 : 30; uint64_t v : 2; #endif } s; struct cvmx_mio_twsx_twsi_sw_s cn30xx; struct cvmx_mio_twsx_twsi_sw_s cn31xx; struct cvmx_mio_twsx_twsi_sw_s cn38xx; struct cvmx_mio_twsx_twsi_sw_s cn38xxp2; struct cvmx_mio_twsx_twsi_sw_s cn50xx; struct cvmx_mio_twsx_twsi_sw_s cn52xx; struct cvmx_mio_twsx_twsi_sw_s cn52xxp1; struct cvmx_mio_twsx_twsi_sw_s cn56xx; struct cvmx_mio_twsx_twsi_sw_s cn56xxp1; struct cvmx_mio_twsx_twsi_sw_s cn58xx; struct cvmx_mio_twsx_twsi_sw_s cn58xxp1; } cvmx_mio_twsx_twsi_sw_t; /** * cvmx_mio_uart#_dlh * * MIO_UARTX_DLH = MIO UARTX Divisor Latch High Register * * The DLH (Divisor Latch High) register in conjunction with DLL (Divisor Latch Low) register form a * 16-bit, read/write, Divisor Latch register that contains the baud rate divisor for the UART. It is * accessed by first setting the DLAB bit (bit 7) in the Line Control Register (LCR). The output baud * rate is equal to eclk frequency divided by sixteen times the value of the baud rate divisor, as * follows: baud rate = eclk / (16 * divisor). * * Note that the BUSY bit (bit 0) of the UART Status Register (USR) must be clear before writing this * register. BUSY bit is always clear in PASS3. * * Note that with the Divisor Latch Registers (DLL and DLH) set to zero, the baud clock is disabled * and no serial communications will occur. Also, once the DLL or DLH is set, at least 8 clock cycles * of eclk should be allowed to pass before transmitting or receiving data. * * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the * IER and DLH registers are the same. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_dlh_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t dlh : 8; /**< Divisor Latch High Register */ #else uint64_t dlh : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_dlh_s cn30xx; struct cvmx_mio_uartx_dlh_s cn31xx; struct cvmx_mio_uartx_dlh_s cn38xx; struct cvmx_mio_uartx_dlh_s cn38xxp2; struct cvmx_mio_uartx_dlh_s cn50xx; struct cvmx_mio_uartx_dlh_s cn52xx; struct cvmx_mio_uartx_dlh_s cn52xxp1; struct cvmx_mio_uartx_dlh_s cn56xx; struct cvmx_mio_uartx_dlh_s cn56xxp1; struct cvmx_mio_uartx_dlh_s cn58xx; struct cvmx_mio_uartx_dlh_s cn58xxp1; } cvmx_mio_uartx_dlh_t; typedef cvmx_mio_uartx_dlh_t cvmx_uart_dlh_t; /** * cvmx_mio_uart#_dll * * MIO_UARTX_DLL = MIO UARTX Divisor Latch Low Register * * The DLH (Divisor Latch High) register in conjunction with DLL (Divisor Latch Low) register form a * 16-bit, read/write, Divisor Latch register that contains the baud rate divisor for the UART. It is * accessed by first setting the DLAB bit (bit 7) in the Line Control Register (LCR). The output baud * rate is equal to eclk frequency divided by sixteen times the value of the baud rate divisor, as * follows: baud rate = eclk / (16 * divisor). * * Note that the BUSY bit (bit 0) of the UART Status Register (USR) must be clear before writing this * register. BUSY bit is always clear in PASS3. * * Note that with the Divisor Latch Registers (DLL and DLH) set to zero, the baud clock is disabled * and no serial communications will occur. Also, once the DLL or DLH is set, at least 8 clock cycles * of eclk should be allowed to pass before transmitting or receiving data. * * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the * RBR, THR, and DLL registers are the same. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_dll_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t dll : 8; /**< Divisor Latch Low Register */ #else uint64_t dll : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_dll_s cn30xx; struct cvmx_mio_uartx_dll_s cn31xx; struct cvmx_mio_uartx_dll_s cn38xx; struct cvmx_mio_uartx_dll_s cn38xxp2; struct cvmx_mio_uartx_dll_s cn50xx; struct cvmx_mio_uartx_dll_s cn52xx; struct cvmx_mio_uartx_dll_s cn52xxp1; struct cvmx_mio_uartx_dll_s cn56xx; struct cvmx_mio_uartx_dll_s cn56xxp1; struct cvmx_mio_uartx_dll_s cn58xx; struct cvmx_mio_uartx_dll_s cn58xxp1; } cvmx_mio_uartx_dll_t; typedef cvmx_mio_uartx_dll_t cvmx_uart_dll_t; /** * cvmx_mio_uart#_far * * MIO_UARTX_FAR = MIO UARTX FIFO Access Register * * The FIFO Access Register (FAR) is used to enable a FIFO access mode for testing, so that the receive * FIFO can be written by software and the transmit FIFO can be read by software when the FIFOs are * enabled. When FIFOs are not enabled it allows the RBR to be written by software and the THR to be read * by software. Note, that when the FIFO access mode is enabled/disabled, the control portion of the * receive FIFO and transmit FIFO is reset and the FIFOs are treated as empty. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_far_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t far : 1; /**< FIFO Access Register */ #else uint64_t far : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_uartx_far_s cn30xx; struct cvmx_mio_uartx_far_s cn31xx; struct cvmx_mio_uartx_far_s cn38xx; struct cvmx_mio_uartx_far_s cn38xxp2; struct cvmx_mio_uartx_far_s cn50xx; struct cvmx_mio_uartx_far_s cn52xx; struct cvmx_mio_uartx_far_s cn52xxp1; struct cvmx_mio_uartx_far_s cn56xx; struct cvmx_mio_uartx_far_s cn56xxp1; struct cvmx_mio_uartx_far_s cn58xx; struct cvmx_mio_uartx_far_s cn58xxp1; } cvmx_mio_uartx_far_t; typedef cvmx_mio_uartx_far_t cvmx_uart_far_t; /** * cvmx_mio_uart#_fcr * * MIO_UARTX_FCR = MIO UARTX FIFO Control Register * * The FIFO Control Register (FCR) is a write-only register that controls the read and write data FIFO * operation. When FIFOs and Programmable THRE Interrupt mode are enabled, this register also controls * the THRE Interrupt empty threshold level. * * Setting bit 0 of the FCR enables the transmit and receive FIFOs. Whenever the value of this bit is * changed both the TX and RX FIFOs will be reset. * * Writing a '1' to bit 1 of the FCR resets and flushes data in the receive FIFO. Note that this bit is * self-clearing and it is not necessary to clear this bit. * * Writing a '1' to bit 2 of the FCR resets and flushes data in the transmit FIFO. Note that this bit is * self-clearing and it is not necessary to clear this bit. * * If the FIFOs and Programmable THRE Interrupt mode are enabled, bits 4 and 5 control the empty * threshold level at which THRE Interrupts are generated when the mode is active. See the following * table for encodings: * * TX Trigger * ---------- * 00 = empty FIFO * 01 = 2 chars in FIFO * 10 = FIFO 1/4 full * 11 = FIFO 1/2 full * * If the FIFO mode is enabled (bit 0 of the FCR is set to '1') bits 6 and 7 are active. Bit 6 and bit 7 * set the trigger level in the receiver FIFO for the Enable Received Data Available Interrupt (ERBFI). * In auto flow control mode the trigger is used to determine when the rts_n signal will be deasserted. * See the following table for encodings: * * RX Trigger * ---------- * 00 = 1 char in FIFO * 01 = FIFO 1/4 full * 10 = FIFO 1/2 full * 11 = FIFO 2 chars less than full * * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the * IIR and FCR registers are the same. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_fcr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t rxtrig : 2; /**< RX Trigger */ uint64_t txtrig : 2; /**< TX Trigger */ uint64_t reserved_3_3 : 1; uint64_t txfr : 1; /**< TX FIFO reset */ uint64_t rxfr : 1; /**< RX FIFO reset */ uint64_t en : 1; /**< FIFO enable */ #else uint64_t en : 1; uint64_t rxfr : 1; uint64_t txfr : 1; uint64_t reserved_3_3 : 1; uint64_t txtrig : 2; uint64_t rxtrig : 2; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_fcr_s cn30xx; struct cvmx_mio_uartx_fcr_s cn31xx; struct cvmx_mio_uartx_fcr_s cn38xx; struct cvmx_mio_uartx_fcr_s cn38xxp2; struct cvmx_mio_uartx_fcr_s cn50xx; struct cvmx_mio_uartx_fcr_s cn52xx; struct cvmx_mio_uartx_fcr_s cn52xxp1; struct cvmx_mio_uartx_fcr_s cn56xx; struct cvmx_mio_uartx_fcr_s cn56xxp1; struct cvmx_mio_uartx_fcr_s cn58xx; struct cvmx_mio_uartx_fcr_s cn58xxp1; } cvmx_mio_uartx_fcr_t; typedef cvmx_mio_uartx_fcr_t cvmx_uart_fcr_t; /** * cvmx_mio_uart#_htx * * MIO_UARTX_HTX = MIO UARTX Halt TX Register * * The Halt TX Register (HTX) is used to halt transmissions for testing, so that the transmit FIFO can be * filled by software when FIFOs are enabled. If FIFOs are not enabled, setting the HTX register will * have no effect. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_htx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t htx : 1; /**< Halt TX */ #else uint64_t htx : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_uartx_htx_s cn30xx; struct cvmx_mio_uartx_htx_s cn31xx; struct cvmx_mio_uartx_htx_s cn38xx; struct cvmx_mio_uartx_htx_s cn38xxp2; struct cvmx_mio_uartx_htx_s cn50xx; struct cvmx_mio_uartx_htx_s cn52xx; struct cvmx_mio_uartx_htx_s cn52xxp1; struct cvmx_mio_uartx_htx_s cn56xx; struct cvmx_mio_uartx_htx_s cn56xxp1; struct cvmx_mio_uartx_htx_s cn58xx; struct cvmx_mio_uartx_htx_s cn58xxp1; } cvmx_mio_uartx_htx_t; typedef cvmx_mio_uartx_htx_t cvmx_uart_htx_t; /** * cvmx_mio_uart#_ier * * MIO_UARTX_IER = MIO UARTX Interrupt Enable Register * * Interrupt Enable Register (IER) is a read/write register that contains four bits that enable * the generation of interrupts. These four bits are the Enable Received Data Available Interrupt * (ERBFI), the Enable Transmitter Holding Register Empty Interrupt (ETBEI), the Enable Receiver Line * Status Interrupt (ELSI), and the Enable Modem Status Interrupt (EDSSI). * * The IER also contains an enable bit (PTIME) for the Programmable THRE Interrupt mode. * * Note: The Divisor Latch Address Bit (DLAB) of the Line Control Register (LCR) must be clear to access * this register. * * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the * IER and DLH registers are the same. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_ier_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t ptime : 1; /**< Programmable THRE Interrupt mode enable */ uint64_t reserved_4_6 : 3; uint64_t edssi : 1; /**< Enable Modem Status Interrupt */ uint64_t elsi : 1; /**< Enable Receiver Line Status Interrupt */ uint64_t etbei : 1; /**< Enable Transmitter Holding Register Empty Interrupt */ uint64_t erbfi : 1; /**< Enable Received Data Available Interrupt */ #else uint64_t erbfi : 1; uint64_t etbei : 1; uint64_t elsi : 1; uint64_t edssi : 1; uint64_t reserved_4_6 : 3; uint64_t ptime : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_ier_s cn30xx; struct cvmx_mio_uartx_ier_s cn31xx; struct cvmx_mio_uartx_ier_s cn38xx; struct cvmx_mio_uartx_ier_s cn38xxp2; struct cvmx_mio_uartx_ier_s cn50xx; struct cvmx_mio_uartx_ier_s cn52xx; struct cvmx_mio_uartx_ier_s cn52xxp1; struct cvmx_mio_uartx_ier_s cn56xx; struct cvmx_mio_uartx_ier_s cn56xxp1; struct cvmx_mio_uartx_ier_s cn58xx; struct cvmx_mio_uartx_ier_s cn58xxp1; } cvmx_mio_uartx_ier_t; typedef cvmx_mio_uartx_ier_t cvmx_uart_ier_t; /** * cvmx_mio_uart#_iir * * MIO_UARTX_IIR = MIO UARTX Interrupt Identity Register * * The Interrupt Identity Register (IIR) is a read-only register that identifies the source of an * interrupt. The upper two bits of the register are FIFO-enabled bits. These bits are '00' if the FIFOs * are disabled, and '11' if they are enabled. The lower four bits identify the highest priority pending * interrupt. The following table defines interrupt source decoding, interrupt priority, and interrupt * reset control: * * Interrupt Priority Interrupt Interrupt Interrupt * ID Level Type Source Reset By * --------------------------------------------------------------------------------------------------------------------------------- * 0001 - None None - * * 0110 Highest Receiver Line Overrun, parity, or framing errors or break Reading the Line Status Register * Status interrupt * * 0100 Second Received Data Receiver data available (FIFOs disabled) or Reading the Receiver Buffer Register * Available RX FIFO trigger level reached (FIFOs (FIFOs disabled) or the FIFO drops below * enabled) the trigger level (FIFOs enabled) * * 1100 Second Character No characters in or out of the RX FIFO Reading the Receiver Buffer Register * Timeout during the last 4 character times and there * Indication is at least 1 character in it during this * time * * 0010 Third Transmitter Transmitter Holding Register Empty Reading the Interrupt Identity Register * Holding (Programmable THRE Mode disabled) or TX (if source of interrupt) or writing into * Register FIFO at or below threshold (Programmable THR (FIFOs or THRE Mode disabled) or TX * Empty THRE Mode enabled) FIFO above threshold (FIFOs and THRE * Mode enabled) * * 0000 Fourth Modem Status Clear To Send (CTS) or Data Set Ready (DSR) Reading the Modem Status Register * Changed or Ring Indicator (RI) or Data Carrier * Detect (DCD) changed (note: if auto flow * control mode is enabled, a change in CTS * will not cause an interrupt) * * 0111 Fifth Busy Detect Software has tried to write to the Line Reading the UART Status Register * Indication Control Register while the BUSY bit of the * UART Status Register was set * * Note: The Busy Detect Indication interrupt has been removed from PASS3 and will never assert. * * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the * IIR and FCR registers are the same. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_iir_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t fen : 2; /**< FIFO-enabled bits */ uint64_t reserved_4_5 : 2; cvmx_uart_iid_t iid : 4; /**< Interrupt ID */ #else cvmx_uart_iid_t iid : 4; uint64_t reserved_4_5 : 2; uint64_t fen : 2; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_iir_s cn30xx; struct cvmx_mio_uartx_iir_s cn31xx; struct cvmx_mio_uartx_iir_s cn38xx; struct cvmx_mio_uartx_iir_s cn38xxp2; struct cvmx_mio_uartx_iir_s cn50xx; struct cvmx_mio_uartx_iir_s cn52xx; struct cvmx_mio_uartx_iir_s cn52xxp1; struct cvmx_mio_uartx_iir_s cn56xx; struct cvmx_mio_uartx_iir_s cn56xxp1; struct cvmx_mio_uartx_iir_s cn58xx; struct cvmx_mio_uartx_iir_s cn58xxp1; } cvmx_mio_uartx_iir_t; typedef cvmx_mio_uartx_iir_t cvmx_uart_iir_t; /** * cvmx_mio_uart#_lcr * * MIO_UARTX_LCR = MIO UARTX Line Control Register * * The Line Control Register (LCR) controls the format of the data that is transmitted and received by * the UART. * * LCR bits 0 and 1 are the Character Length Select field. This field is used to select the number of * data bits per character that are transmitted and received. See the following table for encodings: * * CLS * --- * 00 = 5 bits (bits 0-4 sent) * 01 = 6 bits (bits 0-5 sent) * 10 = 7 bits (bits 0-6 sent) * 11 = 8 bits (all bits sent) * * LCR bit 2 controls the number of stop bits transmitted. If bit 2 is a '0', one stop bit is transmitted * in the serial data. If bit 2 is a '1' and the data bits are set to '00', one and a half stop bits are * generated. Otherwise, two stop bits are generated and transmitted in the serial data out. Note that * regardless of the number of stop bits selected the receiver will only check the first stop bit. * * LCR bit 3 is the Parity Enable bit. This bit is used to enable and disable parity generation and * detection in transmitted and received serial character respectively. * * LCR bit 4 is the Even Parity Select bit. If parity is enabled, bit 4 selects between even and odd * parity. If bit 4 is a '1', an even number of ones is transmitted or checked. If bit 4 is a '0', an odd * number of ones is transmitted or checked. * * LCR bit 6 is the Break Control bit. Setting the Break bit sends a break signal by holding the sout * line low (when not in Loopback mode, as determined by Modem Control Register bit 4). When in Loopback * mode, the break condition is internally looped back to the receiver. * * LCR bit 7 is the Divisor Latch Address bit. Setting this bit enables reading and writing of the * Divisor Latch register (DLL and DLH) to set the baud rate of the UART. This bit must be cleared after * initial baud rate setup in order to access other registers. * * Note: The LCR is writeable only when the UART is not busy (when the BUSY bit (bit 0) of the UART * Status Register (USR) is clear). The LCR is always readable. In PASS3, the LCR is always writable * because the BUSY bit is always clear. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_lcr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t dlab : 1; /**< Divisor Latch Address bit */ uint64_t brk : 1; /**< Break Control bit */ uint64_t reserved_5_5 : 1; uint64_t eps : 1; /**< Even Parity Select bit */ uint64_t pen : 1; /**< Parity Enable bit */ uint64_t stop : 1; /**< Stop Control bit */ cvmx_uart_bits_t cls : 2; /**< Character Length Select */ #else cvmx_uart_bits_t cls : 2; uint64_t stop : 1; uint64_t pen : 1; uint64_t eps : 1; uint64_t reserved_5_5 : 1; uint64_t brk : 1; uint64_t dlab : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_lcr_s cn30xx; struct cvmx_mio_uartx_lcr_s cn31xx; struct cvmx_mio_uartx_lcr_s cn38xx; struct cvmx_mio_uartx_lcr_s cn38xxp2; struct cvmx_mio_uartx_lcr_s cn50xx; struct cvmx_mio_uartx_lcr_s cn52xx; struct cvmx_mio_uartx_lcr_s cn52xxp1; struct cvmx_mio_uartx_lcr_s cn56xx; struct cvmx_mio_uartx_lcr_s cn56xxp1; struct cvmx_mio_uartx_lcr_s cn58xx; struct cvmx_mio_uartx_lcr_s cn58xxp1; } cvmx_mio_uartx_lcr_t; typedef cvmx_mio_uartx_lcr_t cvmx_uart_lcr_t; /** * cvmx_mio_uart#_lsr * * MIO_UARTX_LSR = MIO UARTX Line Status Register * * The Line Status Register (LSR) contains status of the receiver and transmitter data transfers. This * status can be read by the user at anytime. * * LSR bit 0 is the Data Ready (DR) bit. When set, this bit indicates the receiver contains at least one * character in the RBR or the receiver FIFO. This bit is cleared when the RBR is read in the non-FIFO * mode, or when the receiver FIFO is empty, in FIFO mode. * * LSR bit 1 is the Overrun Error (OE) bit. When set, this bit indicates an overrun error has occurred * because a new data character was received before the previous data was read. In the non-FIFO mode, the * OE bit is set when a new character arrives in the receiver before the previous character was read from * the RBR. When this happens, the data in the RBR is overwritten. In the FIFO mode, an overrun error * occurs when the FIFO is full and a new character arrives at the receiver. The data in the FIFO is * retained and the data in the receive shift register is lost. * * LSR bit 2 is the Parity Error (PE) bit. This bit is set whenever there is a parity error in the * receiver if the Parity Enable (PEN) bit in the LCR is set. In the FIFO mode, since the parity error is * associated with a character received, it is revealed when the character with the parity error arrives * at the top of the FIFO. It should be noted that the Parity Error (PE) bit will be set if a break * interrupt has occurred, as indicated by the Break Interrupt (BI) bit. * * LSR bit 3 is the Framing Error (FE) bit. This bit is set whenever there is a framing error in the * receiver. A framing error occurs when the receiver does not detect a valid STOP bit in the received * data. In the FIFO mode, since the framing error is associated with a character received, it is * revealed when the character with the framing error is at the top of the FIFO. When a framing error * occurs the UART will try resynchronize. It does this by assuming that the error was due to the start * bit of the next character and then continues receiving the other bits (i.e. data and/or parity and * stop). It should be noted that the Framing Error (FE) bit will be set if a break interrupt has * occurred, as indicated by the Break Interrupt (BI) bit. * * Note: The OE, PE, and FE bits are reset when a read of the LSR is performed. * * LSR bit 4 is the Break Interrupt (BI) bit. This bit is set whenever the serial input (sin) is held in * a 0 state for longer than the sum of start time + data bits + parity + stop bits. A break condition on * sin causes one and only one character, consisting of all zeros, to be received by the UART. In the * FIFO mode, the character associated with the break condition is carried through the FIFO and is * revealed when the character is at the top of the FIFO. Reading the LSR clears the BI bit. In the non- * FIFO mode, the BI indication occurs immediately and persists until the LSR is read. * * LSR bit 5 is the Transmitter Holding Register Empty (THRE) bit. When Programmable THRE Interrupt mode * is disabled, this bit indicates that the UART can accept a new character for transmission. This bit is * set whenever data is transferred from the THR (or TX FIFO) to the transmitter shift register and no * new data has been written to the THR (or TX FIFO). This also causes a THRE Interrupt to occur, if the * THRE Interrupt is enabled. When FIFOs and Programmable THRE Interrupt mode are enabled, LSR bit 5 * functionality is switched to indicate the transmitter FIFO is full, and no longer controls THRE * Interrupts, which are then controlled by the FCR[5:4] threshold setting. * * LSR bit 6 is the Transmitter Empty (TEMT) bit. In the FIFO mode, this bit is set whenever the * Transmitter Shift Register and the FIFO are both empty. In the non-FIFO mode, this bit is set whenever * the Transmitter Holding Register and the Transmitter Shift Register are both empty. This bit is * typically used to make sure it is safe to change control registers. Changing control registers while * the transmitter is busy can result in corrupt data being transmitted. * * LSR bit 7 is the Error in Receiver FIFO (FERR) bit. This bit is active only when FIFOs are enabled. It * is set when there is at least one parity error, framing error, or break indication in the FIFO. This * bit is cleared when the LSR is read and the character with the error is at the top of the receiver * FIFO and there are no subsequent errors in the FIFO. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_lsr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t ferr : 1; /**< Error in Receiver FIFO bit */ uint64_t temt : 1; /**< Transmitter Empty bit */ uint64_t thre : 1; /**< Transmitter Holding Register Empty bit */ uint64_t bi : 1; /**< Break Interrupt bit */ uint64_t fe : 1; /**< Framing Error bit */ uint64_t pe : 1; /**< Parity Error bit */ uint64_t oe : 1; /**< Overrun Error bit */ uint64_t dr : 1; /**< Data Ready bit */ #else uint64_t dr : 1; uint64_t oe : 1; uint64_t pe : 1; uint64_t fe : 1; uint64_t bi : 1; uint64_t thre : 1; uint64_t temt : 1; uint64_t ferr : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_lsr_s cn30xx; struct cvmx_mio_uartx_lsr_s cn31xx; struct cvmx_mio_uartx_lsr_s cn38xx; struct cvmx_mio_uartx_lsr_s cn38xxp2; struct cvmx_mio_uartx_lsr_s cn50xx; struct cvmx_mio_uartx_lsr_s cn52xx; struct cvmx_mio_uartx_lsr_s cn52xxp1; struct cvmx_mio_uartx_lsr_s cn56xx; struct cvmx_mio_uartx_lsr_s cn56xxp1; struct cvmx_mio_uartx_lsr_s cn58xx; struct cvmx_mio_uartx_lsr_s cn58xxp1; } cvmx_mio_uartx_lsr_t; typedef cvmx_mio_uartx_lsr_t cvmx_uart_lsr_t; /** * cvmx_mio_uart#_mcr * * MIO_UARTX_MCR = MIO UARTX Modem Control Register * * The lower four bits of the Modem Control Register (MCR) directly manipulate the outputs of the UART. * The DTR (bit 0), RTS (bit 1), OUT1 (bit 2), and OUT2 (bit 3) bits are inverted and then drive the * corresponding UART outputs, dtr_n, rts_n, out1_n, and out2_n. In loopback mode, these outputs are * driven inactive high while the values in these locations are internally looped back to the inputs. * * Note: When Auto RTS is enabled, the rts_n output is controlled in the same way, but is also gated * with the receiver FIFO threshold trigger (rts_n is inactive high when above the threshold). The * rts_n output will be de-asserted whenever RTS (bit 1) is set low. * * Note: The UART0 out1_n and out2_n outputs are not present on the pins of the chip, but the UART0 OUT1 * and OUT2 bits still function in Loopback mode. The UART1 dtr_n, out1_n, and out2_n outputs are not * present on the pins of the chip, but the UART1 DTR, OUT1, and OUT2 bits still function in Loopback * mode. * * MCR bit 4 is the Loopback bit. When set, data on the sout line is held high, while serial data output * is looped back to the sin line, internally. In this mode all the interrupts are fully functional. This * feature is used for diagnostic purposes. Also, in loopback mode, the modem control inputs (dsr_n, * cts_n, ri_n, dcd_n) are disconnected and the four modem control outputs (dtr_n, rts_n, out1_n, out1_n) * are looped back to the inputs, internally. * * MCR bit 5 is the Auto Flow Control Enable (AFCE) bit. When FIFOs are enabled and this bit is set, * 16750-compatible Auto RTS and Auto CTS serial data flow control features are enabled. * * Auto RTS becomes active when the following occurs: * 1. MCR bit 1 is set * 2. FIFOs are enabled by setting FIFO Control Register (FCR) bit 0 * 3. MCR bit 5 is set (must be set after FCR bit 0) * * When active, the rts_n output is forced inactive-high when the receiver FIFO level reaches the * threshold set by FCR[7:6]. When rts_n is connected to the cts_n input of another UART device, the * other UART stops sending serial data until the receiver FIFO has available space. * * The selectable receiver FIFO threshold values are: 1, 1/4, 1/2, and 2 less than full. Since one * additional character may be transmitted to the UART after rts_n has become inactive (due to data * already having entered the transmitter block in the other UART), setting the threshold to 2 less * than full allows maximum use of the FIFO with a safety zone of one character. * * Once the receiver FIFO becomes completely empty by reading the Receiver Buffer Register (RBR), rts_n * again becomes active-low, signalling the other UART to continue sending data. It is important to note * that, even if everything else is set to Enabled and the correct MCR bits are set, if the FIFOs are * disabled through FCR[0], Auto Flow Control is also disabled. When Auto RTS is disabled or inactive, * rts_n is controlled solely by MCR[1]. * * Auto CTS becomes active when the following occurs: * 1. FIFOs are enabled by setting FIFO Control Register (FCR) bit 0 * 2. MCR bit 5 is set (must be set after FCR bit 0) * * When active, the UART transmitter is disabled whenever the cts_n input becomes inactive-high. This * prevents overflowing the FIFO of the receiving UART. * * Note that, if the cts_n input is not inactivated before the middle of the last stop bit, another * character is transmitted before the transmitter is disabled. While the transmitter is disabled, the * transmitter FIFO can still be written to, and even overflowed. Therefore, when using this mode, either * the true FIFO depth (64 characters) must be known to software, or the Programmable THRE Interrupt mode * must be enabled to access the FIFO full status through the Line Status Register. When using the FIFO * full status, software can poll this before each write to the Transmitter FIFO. * * Note: FIFO full status is also available in the UART Status Register (USR) or the actual level of the * FIFO may be read through the Transmit FIFO Level (TFL) register. * * When the cts_n input becomes active-low again, transmission resumes. It is important to note that, * even if everything else is set to Enabled, Auto Flow Control is also disabled if the FIFOs are * disabled through FCR[0]. When Auto CTS is disabled or inactive, the transmitter is unaffected by * cts_n. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_mcr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t afce : 1; /**< Auto Flow Control Enable bit */ uint64_t loop : 1; /**< Loopback bit */ uint64_t out2 : 1; /**< OUT2 output bit */ uint64_t out1 : 1; /**< OUT1 output bit */ uint64_t rts : 1; /**< Request To Send output bit */ uint64_t dtr : 1; /**< Data Terminal Ready output bit */ #else uint64_t dtr : 1; uint64_t rts : 1; uint64_t out1 : 1; uint64_t out2 : 1; uint64_t loop : 1; uint64_t afce : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_mio_uartx_mcr_s cn30xx; struct cvmx_mio_uartx_mcr_s cn31xx; struct cvmx_mio_uartx_mcr_s cn38xx; struct cvmx_mio_uartx_mcr_s cn38xxp2; struct cvmx_mio_uartx_mcr_s cn50xx; struct cvmx_mio_uartx_mcr_s cn52xx; struct cvmx_mio_uartx_mcr_s cn52xxp1; struct cvmx_mio_uartx_mcr_s cn56xx; struct cvmx_mio_uartx_mcr_s cn56xxp1; struct cvmx_mio_uartx_mcr_s cn58xx; struct cvmx_mio_uartx_mcr_s cn58xxp1; } cvmx_mio_uartx_mcr_t; typedef cvmx_mio_uartx_mcr_t cvmx_uart_mcr_t; /** * cvmx_mio_uart#_msr * * MIO_UARTX_MSR = MIO UARTX Modem Status Register * * The Modem Status Register (MSR) contains the current status of the modem control input lines and if * they changed. * * DCTS (bit 0), DDSR (bit 1), and DDCD (bit 3) bits record whether the modem control lines (cts_n, * dsr_n, and dcd_n) have changed since the last time the user read the MSR. TERI (bit 2) indicates ri_n * has changed from an active-low, to an inactive-high state since the last time the MSR was read. In * Loopback mode, DCTS reflects changes on MCR bit 1 (RTS), DDSR reflects changes on MCR bit 0 (DTR), and * DDCD reflects changes on MCR bit 3 (Out2), while TERI reflects when MCR bit 2 (Out1) has changed state * from a high to a low. * * Note: if the DCTS bit is not set and the cts_n signal is asserted (low) and a reset occurs (software * or otherwise), then the DCTS bit will get set when the reset is removed if the cts_n signal remains * asserted. * * The CTS, DSR, RI, and DCD Modem Status bits contain information on the current state of the modem * control lines. CTS (bit 4) is the compliment of cts_n, DSR (bit 5) is the compliment of dsr_n, RI * (bit 6) is the compliment of ri_n, and DCD (bit 7) is the compliment of dcd_n. In Loopback mode, CTS * is the same as MCR bit 1 (RTS), DSR is the same as MCR bit 0 (DTR), RI is the same as MCR bit 2 * (Out1), and DCD is the same as MCR bit 3 (Out2). * * Note: The UART0 dsr_n and ri_n inputs are internally tied to power and not present on the pins of chip. * Thus the UART0 DSR and RI bits will be '0' when not in Loopback mode. The UART1 dsr_n, ri_n, and dcd_n * inputs are internally tied to power and not present on the pins of chip. Thus the UART1 DSR, RI, and * DCD bits will be '0' when not in Loopback mode. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_msr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t dcd : 1; /**< Data Carrier Detect input bit */ uint64_t ri : 1; /**< Ring Indicator input bit */ uint64_t dsr : 1; /**< Data Set Ready input bit */ uint64_t cts : 1; /**< Clear To Send input bit */ uint64_t ddcd : 1; /**< Delta Data Carrier Detect bit */ uint64_t teri : 1; /**< Trailing Edge of Ring Indicator bit */ uint64_t ddsr : 1; /**< Delta Data Set Ready bit */ uint64_t dcts : 1; /**< Delta Clear To Send bit */ #else uint64_t dcts : 1; uint64_t ddsr : 1; uint64_t teri : 1; uint64_t ddcd : 1; uint64_t cts : 1; uint64_t dsr : 1; uint64_t ri : 1; uint64_t dcd : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_msr_s cn30xx; struct cvmx_mio_uartx_msr_s cn31xx; struct cvmx_mio_uartx_msr_s cn38xx; struct cvmx_mio_uartx_msr_s cn38xxp2; struct cvmx_mio_uartx_msr_s cn50xx; struct cvmx_mio_uartx_msr_s cn52xx; struct cvmx_mio_uartx_msr_s cn52xxp1; struct cvmx_mio_uartx_msr_s cn56xx; struct cvmx_mio_uartx_msr_s cn56xxp1; struct cvmx_mio_uartx_msr_s cn58xx; struct cvmx_mio_uartx_msr_s cn58xxp1; } cvmx_mio_uartx_msr_t; typedef cvmx_mio_uartx_msr_t cvmx_uart_msr_t; /** * cvmx_mio_uart#_rbr * * MIO_UARTX_RBR = MIO UARTX Receive Buffer Register * * The Receive Buffer Register (RBR) is a read-only register that contains the data byte received on the * serial input port (sin). The data in this register is valid only if the Data Ready (DR) bit in the * Line status Register (LSR) is set. When the FIFOs are programmed OFF, the data in the RBR must be * read before the next data arrives, otherwise it is overwritten, resulting in an overrun error. When * the FIFOs are programmed ON, this register accesses the head of the receive FIFO. If the receive FIFO * is full (64 characters) and this register is not read before the next data character arrives, then the * data already in the FIFO is preserved, but any incoming data is lost. An overrun error also occurs. * * Note: The Divisor Latch Address Bit (DLAB) of the Line Control Register (LCR) must be clear to access * this register. * * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the * RBR, THR, and DLL registers are the same. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_rbr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t rbr : 8; /**< Receive Buffer Register */ #else uint64_t rbr : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_rbr_s cn30xx; struct cvmx_mio_uartx_rbr_s cn31xx; struct cvmx_mio_uartx_rbr_s cn38xx; struct cvmx_mio_uartx_rbr_s cn38xxp2; struct cvmx_mio_uartx_rbr_s cn50xx; struct cvmx_mio_uartx_rbr_s cn52xx; struct cvmx_mio_uartx_rbr_s cn52xxp1; struct cvmx_mio_uartx_rbr_s cn56xx; struct cvmx_mio_uartx_rbr_s cn56xxp1; struct cvmx_mio_uartx_rbr_s cn58xx; struct cvmx_mio_uartx_rbr_s cn58xxp1; } cvmx_mio_uartx_rbr_t; typedef cvmx_mio_uartx_rbr_t cvmx_uart_rbr_t; /** * cvmx_mio_uart#_rfl * * MIO_UARTX_RFL = MIO UARTX Receive FIFO Level Register * * The Receive FIFO Level Register (RFL) indicates the number of data entries in the receive FIFO. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_rfl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t rfl : 7; /**< Receive FIFO Level Register */ #else uint64_t rfl : 7; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_mio_uartx_rfl_s cn30xx; struct cvmx_mio_uartx_rfl_s cn31xx; struct cvmx_mio_uartx_rfl_s cn38xx; struct cvmx_mio_uartx_rfl_s cn38xxp2; struct cvmx_mio_uartx_rfl_s cn50xx; struct cvmx_mio_uartx_rfl_s cn52xx; struct cvmx_mio_uartx_rfl_s cn52xxp1; struct cvmx_mio_uartx_rfl_s cn56xx; struct cvmx_mio_uartx_rfl_s cn56xxp1; struct cvmx_mio_uartx_rfl_s cn58xx; struct cvmx_mio_uartx_rfl_s cn58xxp1; } cvmx_mio_uartx_rfl_t; typedef cvmx_mio_uartx_rfl_t cvmx_uart_rfl_t; /** * cvmx_mio_uart#_rfw * * MIO_UARTX_RFW = MIO UARTX Receive FIFO Write Register * * The Receive FIFO Write Register (RFW) is only valid when FIFO access mode is enabled (FAR bit 0 is * set). When FIFOs are enabled, this register is used to write data to the receive FIFO. Each * consecutive write pushes the new data to the next write location in the receive FIFO. When FIFOs are * not enabled, this register is used to write data to the RBR. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_rfw_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t rffe : 1; /**< Receive FIFO Framing Error */ uint64_t rfpe : 1; /**< Receive FIFO Parity Error */ uint64_t rfwd : 8; /**< Receive FIFO Write Data */ #else uint64_t rfwd : 8; uint64_t rfpe : 1; uint64_t rffe : 1; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_mio_uartx_rfw_s cn30xx; struct cvmx_mio_uartx_rfw_s cn31xx; struct cvmx_mio_uartx_rfw_s cn38xx; struct cvmx_mio_uartx_rfw_s cn38xxp2; struct cvmx_mio_uartx_rfw_s cn50xx; struct cvmx_mio_uartx_rfw_s cn52xx; struct cvmx_mio_uartx_rfw_s cn52xxp1; struct cvmx_mio_uartx_rfw_s cn56xx; struct cvmx_mio_uartx_rfw_s cn56xxp1; struct cvmx_mio_uartx_rfw_s cn58xx; struct cvmx_mio_uartx_rfw_s cn58xxp1; } cvmx_mio_uartx_rfw_t; typedef cvmx_mio_uartx_rfw_t cvmx_uart_rfw_t; /** * cvmx_mio_uart#_sbcr * * MIO_UARTX_SBCR = MIO UARTX Shadow Break Control Register * * The Shadow Break Control Register (SBCR) is a shadow register for the BREAK bit (LCR bit 6) that can * be used to remove the burden of having to perform a read-modify-write on the LCR. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_sbcr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t sbcr : 1; /**< Shadow Break Control */ #else uint64_t sbcr : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_uartx_sbcr_s cn30xx; struct cvmx_mio_uartx_sbcr_s cn31xx; struct cvmx_mio_uartx_sbcr_s cn38xx; struct cvmx_mio_uartx_sbcr_s cn38xxp2; struct cvmx_mio_uartx_sbcr_s cn50xx; struct cvmx_mio_uartx_sbcr_s cn52xx; struct cvmx_mio_uartx_sbcr_s cn52xxp1; struct cvmx_mio_uartx_sbcr_s cn56xx; struct cvmx_mio_uartx_sbcr_s cn56xxp1; struct cvmx_mio_uartx_sbcr_s cn58xx; struct cvmx_mio_uartx_sbcr_s cn58xxp1; } cvmx_mio_uartx_sbcr_t; typedef cvmx_mio_uartx_sbcr_t cvmx_uart_sbcr_t; /** * cvmx_mio_uart#_scr * * MIO_UARTX_SCR = MIO UARTX Scratchpad Register * * The Scratchpad Register (SCR) is an 8-bit read/write register for programmers to use as a temporary * storage space. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_scr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t scr : 8; /**< Scratchpad Register */ #else uint64_t scr : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_scr_s cn30xx; struct cvmx_mio_uartx_scr_s cn31xx; struct cvmx_mio_uartx_scr_s cn38xx; struct cvmx_mio_uartx_scr_s cn38xxp2; struct cvmx_mio_uartx_scr_s cn50xx; struct cvmx_mio_uartx_scr_s cn52xx; struct cvmx_mio_uartx_scr_s cn52xxp1; struct cvmx_mio_uartx_scr_s cn56xx; struct cvmx_mio_uartx_scr_s cn56xxp1; struct cvmx_mio_uartx_scr_s cn58xx; struct cvmx_mio_uartx_scr_s cn58xxp1; } cvmx_mio_uartx_scr_t; typedef cvmx_mio_uartx_scr_t cvmx_uart_scr_t; /** * cvmx_mio_uart#_sfe * * MIO_UARTX_SFE = MIO UARTX Shadow FIFO Enable Register * * The Shadow FIFO Enable Register (SFE) is a shadow register for the FIFO enable bit (FCR bit 0) that * can be used to remove the burden of having to store the previously written value to the FCR in memory * and having to mask this value so that only the FIFO enable bit gets updated. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_sfe_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t sfe : 1; /**< Shadow FIFO Enable */ #else uint64_t sfe : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_uartx_sfe_s cn30xx; struct cvmx_mio_uartx_sfe_s cn31xx; struct cvmx_mio_uartx_sfe_s cn38xx; struct cvmx_mio_uartx_sfe_s cn38xxp2; struct cvmx_mio_uartx_sfe_s cn50xx; struct cvmx_mio_uartx_sfe_s cn52xx; struct cvmx_mio_uartx_sfe_s cn52xxp1; struct cvmx_mio_uartx_sfe_s cn56xx; struct cvmx_mio_uartx_sfe_s cn56xxp1; struct cvmx_mio_uartx_sfe_s cn58xx; struct cvmx_mio_uartx_sfe_s cn58xxp1; } cvmx_mio_uartx_sfe_t; typedef cvmx_mio_uartx_sfe_t cvmx_uart_sfe_t; /** * cvmx_mio_uart#_srr * * MIO_UARTX_SRR = MIO UARTX Software Reset Register * * The Software Reset Register (SRR) is a write-only register that resets the UART and/or the receive * FIFO and/or the transmit FIFO. * * Bit 0 of the SRR is the UART Soft Reset (USR) bit. Setting this bit resets the UART. * * Bit 1 of the SRR is a shadow copy of the RX FIFO Reset bit (FCR bit 1). This can be used to remove * the burden on software having to store previously written FCR values (which are pretty static) just * to reset the receive FIFO. * * Bit 2 of the SRR is a shadow copy of the TX FIFO Reset bit (FCR bit 2). This can be used to remove * the burden on software having to store previously written FCR values (which are pretty static) just * to reset the transmit FIFO. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_srr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t stfr : 1; /**< Shadow TX FIFO Reset */ uint64_t srfr : 1; /**< Shadow RX FIFO Reset */ uint64_t usr : 1; /**< UART Soft Reset */ #else uint64_t usr : 1; uint64_t srfr : 1; uint64_t stfr : 1; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_mio_uartx_srr_s cn30xx; struct cvmx_mio_uartx_srr_s cn31xx; struct cvmx_mio_uartx_srr_s cn38xx; struct cvmx_mio_uartx_srr_s cn38xxp2; struct cvmx_mio_uartx_srr_s cn50xx; struct cvmx_mio_uartx_srr_s cn52xx; struct cvmx_mio_uartx_srr_s cn52xxp1; struct cvmx_mio_uartx_srr_s cn56xx; struct cvmx_mio_uartx_srr_s cn56xxp1; struct cvmx_mio_uartx_srr_s cn58xx; struct cvmx_mio_uartx_srr_s cn58xxp1; } cvmx_mio_uartx_srr_t; typedef cvmx_mio_uartx_srr_t cvmx_uart_srr_t; /** * cvmx_mio_uart#_srt * * MIO_UARTX_SRT = MIO UARTX Shadow RX Trigger Register * * The Shadow RX Trigger Register (SRT) is a shadow register for the RX Trigger bits (FCR bits 7:6) that * can be used to remove the burden of having to store the previously written value to the FCR in memory * and having to mask this value so that only the RX Trigger bits get updated. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_srt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t srt : 2; /**< Shadow RX Trigger */ #else uint64_t srt : 2; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_mio_uartx_srt_s cn30xx; struct cvmx_mio_uartx_srt_s cn31xx; struct cvmx_mio_uartx_srt_s cn38xx; struct cvmx_mio_uartx_srt_s cn38xxp2; struct cvmx_mio_uartx_srt_s cn50xx; struct cvmx_mio_uartx_srt_s cn52xx; struct cvmx_mio_uartx_srt_s cn52xxp1; struct cvmx_mio_uartx_srt_s cn56xx; struct cvmx_mio_uartx_srt_s cn56xxp1; struct cvmx_mio_uartx_srt_s cn58xx; struct cvmx_mio_uartx_srt_s cn58xxp1; } cvmx_mio_uartx_srt_t; typedef cvmx_mio_uartx_srt_t cvmx_uart_srt_t; /** * cvmx_mio_uart#_srts * * MIO_UARTX_SRTS = MIO UARTX Shadow Request To Send Register * * The Shadow Request To Send Register (SRTS) is a shadow register for the RTS bit (MCR bit 1) that can * be used to remove the burden of having to perform a read-modify-write on the MCR. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_srts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t srts : 1; /**< Shadow Request To Send */ #else uint64_t srts : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_uartx_srts_s cn30xx; struct cvmx_mio_uartx_srts_s cn31xx; struct cvmx_mio_uartx_srts_s cn38xx; struct cvmx_mio_uartx_srts_s cn38xxp2; struct cvmx_mio_uartx_srts_s cn50xx; struct cvmx_mio_uartx_srts_s cn52xx; struct cvmx_mio_uartx_srts_s cn52xxp1; struct cvmx_mio_uartx_srts_s cn56xx; struct cvmx_mio_uartx_srts_s cn56xxp1; struct cvmx_mio_uartx_srts_s cn58xx; struct cvmx_mio_uartx_srts_s cn58xxp1; } cvmx_mio_uartx_srts_t; typedef cvmx_mio_uartx_srts_t cvmx_uart_srts_t; /** * cvmx_mio_uart#_stt * * MIO_UARTX_STT = MIO UARTX Shadow TX Trigger Register * * The Shadow TX Trigger Register (STT) is a shadow register for the TX Trigger bits (FCR bits 5:4) that * can be used to remove the burden of having to store the previously written value to the FCR in memory * and having to mask this value so that only the TX Trigger bits get updated. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_stt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t stt : 2; /**< Shadow TX Trigger */ #else uint64_t stt : 2; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_mio_uartx_stt_s cn30xx; struct cvmx_mio_uartx_stt_s cn31xx; struct cvmx_mio_uartx_stt_s cn38xx; struct cvmx_mio_uartx_stt_s cn38xxp2; struct cvmx_mio_uartx_stt_s cn50xx; struct cvmx_mio_uartx_stt_s cn52xx; struct cvmx_mio_uartx_stt_s cn52xxp1; struct cvmx_mio_uartx_stt_s cn56xx; struct cvmx_mio_uartx_stt_s cn56xxp1; struct cvmx_mio_uartx_stt_s cn58xx; struct cvmx_mio_uartx_stt_s cn58xxp1; } cvmx_mio_uartx_stt_t; typedef cvmx_mio_uartx_stt_t cvmx_uart_stt_t; /** * cvmx_mio_uart#_tfl * * MIO_UARTX_TFL = MIO UARTX Transmit FIFO Level Register * * The Transmit FIFO Level Register (TFL) indicates the number of data entries in the transmit FIFO. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_tfl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t tfl : 7; /**< Transmit FIFO Level Register */ #else uint64_t tfl : 7; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_mio_uartx_tfl_s cn30xx; struct cvmx_mio_uartx_tfl_s cn31xx; struct cvmx_mio_uartx_tfl_s cn38xx; struct cvmx_mio_uartx_tfl_s cn38xxp2; struct cvmx_mio_uartx_tfl_s cn50xx; struct cvmx_mio_uartx_tfl_s cn52xx; struct cvmx_mio_uartx_tfl_s cn52xxp1; struct cvmx_mio_uartx_tfl_s cn56xx; struct cvmx_mio_uartx_tfl_s cn56xxp1; struct cvmx_mio_uartx_tfl_s cn58xx; struct cvmx_mio_uartx_tfl_s cn58xxp1; } cvmx_mio_uartx_tfl_t; typedef cvmx_mio_uartx_tfl_t cvmx_uart_tfl_t; /** * cvmx_mio_uart#_tfr * * MIO_UARTX_TFR = MIO UARTX Transmit FIFO Read Register * * The Transmit FIFO Read Register (TFR) is only valid when FIFO access mode is enabled (FAR bit 0 is * set). When FIFOs are enabled, reading this register gives the data at the top of the transmit FIFO. * Each consecutive read pops the transmit FIFO and gives the next data value that is currently at the * top of the FIFO. When FIFOs are not enabled, reading this register gives the data in the THR. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_tfr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t tfr : 8; /**< Transmit FIFO Read Register */ #else uint64_t tfr : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_tfr_s cn30xx; struct cvmx_mio_uartx_tfr_s cn31xx; struct cvmx_mio_uartx_tfr_s cn38xx; struct cvmx_mio_uartx_tfr_s cn38xxp2; struct cvmx_mio_uartx_tfr_s cn50xx; struct cvmx_mio_uartx_tfr_s cn52xx; struct cvmx_mio_uartx_tfr_s cn52xxp1; struct cvmx_mio_uartx_tfr_s cn56xx; struct cvmx_mio_uartx_tfr_s cn56xxp1; struct cvmx_mio_uartx_tfr_s cn58xx; struct cvmx_mio_uartx_tfr_s cn58xxp1; } cvmx_mio_uartx_tfr_t; typedef cvmx_mio_uartx_tfr_t cvmx_uart_tfr_t; /** * cvmx_mio_uart#_thr * * MIO_UARTX_THR = MIO UARTX Transmit Holding Register * * Transmit Holding Register (THR) is a write-only register that contains data to be transmitted on the * serial output port (sout). Data can be written to the THR any time that the THR Empty (THRE) bit of * the Line Status Register (LSR) is set. * * If FIFOs are not enabled and THRE is set, writing a single character to the THR clears the THRE. Any * additional writes to the THR before the THRE is set again causes the THR data to be overwritten. * * If FIFOs are enabled and THRE is set (and Programmable THRE mode disabled), 64 characters of data may * be written to the THR before the FIFO is full. Any attempt to write data when the FIFO is full results * in the write data being lost. * * Note: The Divisor Latch Address Bit (DLAB) of the Line Control Register (LCR) must be clear to access * this register. * * Note: The address below is an alias to simplify these CSR descriptions. It should be known that the * RBR, THR, and DLL registers are the same. */ typedef union { uint64_t u64; struct cvmx_mio_uartx_thr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t thr : 8; /**< Transmit Holding Register */ #else uint64_t thr : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uartx_thr_s cn30xx; struct cvmx_mio_uartx_thr_s cn31xx; struct cvmx_mio_uartx_thr_s cn38xx; struct cvmx_mio_uartx_thr_s cn38xxp2; struct cvmx_mio_uartx_thr_s cn50xx; struct cvmx_mio_uartx_thr_s cn52xx; struct cvmx_mio_uartx_thr_s cn52xxp1; struct cvmx_mio_uartx_thr_s cn56xx; struct cvmx_mio_uartx_thr_s cn56xxp1; struct cvmx_mio_uartx_thr_s cn58xx; struct cvmx_mio_uartx_thr_s cn58xxp1; } cvmx_mio_uartx_thr_t; typedef cvmx_mio_uartx_thr_t cvmx_uart_thr_t; /** * cvmx_mio_uart#_usr * * MIO_UARTX_USR = MIO UARTX UART Status Register * * The UART Status Register (USR) contains UART status information. * * USR bit 0 is the BUSY bit. When set this bit indicates that a serial transfer is in progress, when * clear it indicates that the UART is idle or inactive. * * Note: In PASS3, the BUSY bit will always be clear. * * USR bits 1-4 indicate the following FIFO status: TX FIFO Not Full (TFNF), TX FIFO Empty (TFE), RX * FIFO Not Empty (RFNE), and RX FIFO Full (RFF). */ typedef union { uint64_t u64; struct cvmx_mio_uartx_usr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t rff : 1; /**< RX FIFO Full */ uint64_t rfne : 1; /**< RX FIFO Not Empty */ uint64_t tfe : 1; /**< TX FIFO Empty */ uint64_t tfnf : 1; /**< TX FIFO Not Full */ uint64_t busy : 1; /**< Busy bit (always 0 in PASS3) */ #else uint64_t busy : 1; uint64_t tfnf : 1; uint64_t tfe : 1; uint64_t rfne : 1; uint64_t rff : 1; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_mio_uartx_usr_s cn30xx; struct cvmx_mio_uartx_usr_s cn31xx; struct cvmx_mio_uartx_usr_s cn38xx; struct cvmx_mio_uartx_usr_s cn38xxp2; struct cvmx_mio_uartx_usr_s cn50xx; struct cvmx_mio_uartx_usr_s cn52xx; struct cvmx_mio_uartx_usr_s cn52xxp1; struct cvmx_mio_uartx_usr_s cn56xx; struct cvmx_mio_uartx_usr_s cn56xxp1; struct cvmx_mio_uartx_usr_s cn58xx; struct cvmx_mio_uartx_usr_s cn58xxp1; } cvmx_mio_uartx_usr_t; typedef cvmx_mio_uartx_usr_t cvmx_uart_usr_t; /** * cvmx_mio_uart2_dlh */ typedef union { uint64_t u64; struct cvmx_mio_uart2_dlh_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t dlh : 8; /**< Divisor Latch High Register */ #else uint64_t dlh : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_dlh_s cn52xx; struct cvmx_mio_uart2_dlh_s cn52xxp1; } cvmx_mio_uart2_dlh_t; /** * cvmx_mio_uart2_dll */ typedef union { uint64_t u64; struct cvmx_mio_uart2_dll_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t dll : 8; /**< Divisor Latch Low Register */ #else uint64_t dll : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_dll_s cn52xx; struct cvmx_mio_uart2_dll_s cn52xxp1; } cvmx_mio_uart2_dll_t; /** * cvmx_mio_uart2_far */ typedef union { uint64_t u64; struct cvmx_mio_uart2_far_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t far : 1; /**< FIFO Access Register */ #else uint64_t far : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_uart2_far_s cn52xx; struct cvmx_mio_uart2_far_s cn52xxp1; } cvmx_mio_uart2_far_t; /** * cvmx_mio_uart2_fcr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_fcr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t rxtrig : 2; /**< RX Trigger */ uint64_t txtrig : 2; /**< TX Trigger */ uint64_t reserved_3_3 : 1; uint64_t txfr : 1; /**< TX FIFO reset */ uint64_t rxfr : 1; /**< RX FIFO reset */ uint64_t en : 1; /**< FIFO enable */ #else uint64_t en : 1; uint64_t rxfr : 1; uint64_t txfr : 1; uint64_t reserved_3_3 : 1; uint64_t txtrig : 2; uint64_t rxtrig : 2; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_fcr_s cn52xx; struct cvmx_mio_uart2_fcr_s cn52xxp1; } cvmx_mio_uart2_fcr_t; /** * cvmx_mio_uart2_htx */ typedef union { uint64_t u64; struct cvmx_mio_uart2_htx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t htx : 1; /**< Halt TX */ #else uint64_t htx : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_uart2_htx_s cn52xx; struct cvmx_mio_uart2_htx_s cn52xxp1; } cvmx_mio_uart2_htx_t; /** * cvmx_mio_uart2_ier */ typedef union { uint64_t u64; struct cvmx_mio_uart2_ier_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t ptime : 1; /**< Programmable THRE Interrupt mode enable */ uint64_t reserved_4_6 : 3; uint64_t edssi : 1; /**< Enable Modem Status Interrupt */ uint64_t elsi : 1; /**< Enable Receiver Line Status Interrupt */ uint64_t etbei : 1; /**< Enable Transmitter Holding Register Empty Interrupt */ uint64_t erbfi : 1; /**< Enable Received Data Available Interrupt */ #else uint64_t erbfi : 1; uint64_t etbei : 1; uint64_t elsi : 1; uint64_t edssi : 1; uint64_t reserved_4_6 : 3; uint64_t ptime : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_ier_s cn52xx; struct cvmx_mio_uart2_ier_s cn52xxp1; } cvmx_mio_uart2_ier_t; /** * cvmx_mio_uart2_iir */ typedef union { uint64_t u64; struct cvmx_mio_uart2_iir_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t fen : 2; /**< FIFO-enabled bits */ uint64_t reserved_4_5 : 2; uint64_t iid : 4; /**< Interrupt ID */ #else uint64_t iid : 4; uint64_t reserved_4_5 : 2; uint64_t fen : 2; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_iir_s cn52xx; struct cvmx_mio_uart2_iir_s cn52xxp1; } cvmx_mio_uart2_iir_t; /** * cvmx_mio_uart2_lcr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_lcr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t dlab : 1; /**< Divisor Latch Address bit */ uint64_t brk : 1; /**< Break Control bit */ uint64_t reserved_5_5 : 1; uint64_t eps : 1; /**< Even Parity Select bit */ uint64_t pen : 1; /**< Parity Enable bit */ uint64_t stop : 1; /**< Stop Control bit */ uint64_t cls : 2; /**< Character Length Select */ #else uint64_t cls : 2; uint64_t stop : 1; uint64_t pen : 1; uint64_t eps : 1; uint64_t reserved_5_5 : 1; uint64_t brk : 1; uint64_t dlab : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_lcr_s cn52xx; struct cvmx_mio_uart2_lcr_s cn52xxp1; } cvmx_mio_uart2_lcr_t; /** * cvmx_mio_uart2_lsr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_lsr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t ferr : 1; /**< Error in Receiver FIFO bit */ uint64_t temt : 1; /**< Transmitter Empty bit */ uint64_t thre : 1; /**< Transmitter Holding Register Empty bit */ uint64_t bi : 1; /**< Break Interrupt bit */ uint64_t fe : 1; /**< Framing Error bit */ uint64_t pe : 1; /**< Parity Error bit */ uint64_t oe : 1; /**< Overrun Error bit */ uint64_t dr : 1; /**< Data Ready bit */ #else uint64_t dr : 1; uint64_t oe : 1; uint64_t pe : 1; uint64_t fe : 1; uint64_t bi : 1; uint64_t thre : 1; uint64_t temt : 1; uint64_t ferr : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_lsr_s cn52xx; struct cvmx_mio_uart2_lsr_s cn52xxp1; } cvmx_mio_uart2_lsr_t; /** * cvmx_mio_uart2_mcr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_mcr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t afce : 1; /**< Auto Flow Control Enable bit */ uint64_t loop : 1; /**< Loopback bit */ uint64_t out2 : 1; /**< OUT2 output bit */ uint64_t out1 : 1; /**< OUT1 output bit */ uint64_t rts : 1; /**< Request To Send output bit */ uint64_t dtr : 1; /**< Data Terminal Ready output bit */ #else uint64_t dtr : 1; uint64_t rts : 1; uint64_t out1 : 1; uint64_t out2 : 1; uint64_t loop : 1; uint64_t afce : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_mio_uart2_mcr_s cn52xx; struct cvmx_mio_uart2_mcr_s cn52xxp1; } cvmx_mio_uart2_mcr_t; /** * cvmx_mio_uart2_msr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_msr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t dcd : 1; /**< Data Carrier Detect input bit */ uint64_t ri : 1; /**< Ring Indicator input bit */ uint64_t dsr : 1; /**< Data Set Ready input bit */ uint64_t cts : 1; /**< Clear To Send input bit */ uint64_t ddcd : 1; /**< Delta Data Carrier Detect bit */ uint64_t teri : 1; /**< Trailing Edge of Ring Indicator bit */ uint64_t ddsr : 1; /**< Delta Data Set Ready bit */ uint64_t dcts : 1; /**< Delta Clear To Send bit */ #else uint64_t dcts : 1; uint64_t ddsr : 1; uint64_t teri : 1; uint64_t ddcd : 1; uint64_t cts : 1; uint64_t dsr : 1; uint64_t ri : 1; uint64_t dcd : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_msr_s cn52xx; struct cvmx_mio_uart2_msr_s cn52xxp1; } cvmx_mio_uart2_msr_t; /** * cvmx_mio_uart2_rbr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_rbr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t rbr : 8; /**< Receive Buffer Register */ #else uint64_t rbr : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_rbr_s cn52xx; struct cvmx_mio_uart2_rbr_s cn52xxp1; } cvmx_mio_uart2_rbr_t; /** * cvmx_mio_uart2_rfl */ typedef union { uint64_t u64; struct cvmx_mio_uart2_rfl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t rfl : 7; /**< Receive FIFO Level Register */ #else uint64_t rfl : 7; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_mio_uart2_rfl_s cn52xx; struct cvmx_mio_uart2_rfl_s cn52xxp1; } cvmx_mio_uart2_rfl_t; /** * cvmx_mio_uart2_rfw */ typedef union { uint64_t u64; struct cvmx_mio_uart2_rfw_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t rffe : 1; /**< Receive FIFO Framing Error */ uint64_t rfpe : 1; /**< Receive FIFO Parity Error */ uint64_t rfwd : 8; /**< Receive FIFO Write Data */ #else uint64_t rfwd : 8; uint64_t rfpe : 1; uint64_t rffe : 1; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_mio_uart2_rfw_s cn52xx; struct cvmx_mio_uart2_rfw_s cn52xxp1; } cvmx_mio_uart2_rfw_t; /** * cvmx_mio_uart2_sbcr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_sbcr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t sbcr : 1; /**< Shadow Break Control */ #else uint64_t sbcr : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_uart2_sbcr_s cn52xx; struct cvmx_mio_uart2_sbcr_s cn52xxp1; } cvmx_mio_uart2_sbcr_t; /** * cvmx_mio_uart2_scr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_scr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t scr : 8; /**< Scratchpad Register */ #else uint64_t scr : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_scr_s cn52xx; struct cvmx_mio_uart2_scr_s cn52xxp1; } cvmx_mio_uart2_scr_t; /** * cvmx_mio_uart2_sfe */ typedef union { uint64_t u64; struct cvmx_mio_uart2_sfe_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t sfe : 1; /**< Shadow FIFO Enable */ #else uint64_t sfe : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_uart2_sfe_s cn52xx; struct cvmx_mio_uart2_sfe_s cn52xxp1; } cvmx_mio_uart2_sfe_t; /** * cvmx_mio_uart2_srr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_srr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t stfr : 1; /**< Shadow TX FIFO Reset */ uint64_t srfr : 1; /**< Shadow RX FIFO Reset */ uint64_t usr : 1; /**< UART Soft Reset */ #else uint64_t usr : 1; uint64_t srfr : 1; uint64_t stfr : 1; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_mio_uart2_srr_s cn52xx; struct cvmx_mio_uart2_srr_s cn52xxp1; } cvmx_mio_uart2_srr_t; /** * cvmx_mio_uart2_srt */ typedef union { uint64_t u64; struct cvmx_mio_uart2_srt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t srt : 2; /**< Shadow RX Trigger */ #else uint64_t srt : 2; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_mio_uart2_srt_s cn52xx; struct cvmx_mio_uart2_srt_s cn52xxp1; } cvmx_mio_uart2_srt_t; /** * cvmx_mio_uart2_srts */ typedef union { uint64_t u64; struct cvmx_mio_uart2_srts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t srts : 1; /**< Shadow Request To Send */ #else uint64_t srts : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_mio_uart2_srts_s cn52xx; struct cvmx_mio_uart2_srts_s cn52xxp1; } cvmx_mio_uart2_srts_t; /** * cvmx_mio_uart2_stt */ typedef union { uint64_t u64; struct cvmx_mio_uart2_stt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t stt : 2; /**< Shadow TX Trigger */ #else uint64_t stt : 2; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_mio_uart2_stt_s cn52xx; struct cvmx_mio_uart2_stt_s cn52xxp1; } cvmx_mio_uart2_stt_t; /** * cvmx_mio_uart2_tfl */ typedef union { uint64_t u64; struct cvmx_mio_uart2_tfl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t tfl : 7; /**< Transmit FIFO Level Register */ #else uint64_t tfl : 7; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_mio_uart2_tfl_s cn52xx; struct cvmx_mio_uart2_tfl_s cn52xxp1; } cvmx_mio_uart2_tfl_t; /** * cvmx_mio_uart2_tfr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_tfr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t tfr : 8; /**< Transmit FIFO Read Register */ #else uint64_t tfr : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_tfr_s cn52xx; struct cvmx_mio_uart2_tfr_s cn52xxp1; } cvmx_mio_uart2_tfr_t; /** * cvmx_mio_uart2_thr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_thr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t thr : 8; /**< Transmit Holding Register */ #else uint64_t thr : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mio_uart2_thr_s cn52xx; struct cvmx_mio_uart2_thr_s cn52xxp1; } cvmx_mio_uart2_thr_t; /** * cvmx_mio_uart2_usr */ typedef union { uint64_t u64; struct cvmx_mio_uart2_usr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t rff : 1; /**< RX FIFO Full */ uint64_t rfne : 1; /**< RX FIFO Not Empty */ uint64_t tfe : 1; /**< TX FIFO Empty */ uint64_t tfnf : 1; /**< TX FIFO Not Full */ uint64_t busy : 1; /**< Busy bit (always 0 in PASS3) */ #else uint64_t busy : 1; uint64_t tfnf : 1; uint64_t tfe : 1; uint64_t rfne : 1; uint64_t rff : 1; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_mio_uart2_usr_s cn52xx; struct cvmx_mio_uart2_usr_s cn52xxp1; } cvmx_mio_uart2_usr_t; /** * cvmx_mix#_bist * * MIX_BIST = MIX BIST Register * * Description: * NOTE: To read the MIX_BIST register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_bist_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t mrqdat : 1; /**< Bist Results for NBR CSR RdReq RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t ipfdat : 1; /**< Bist Results for MIX Inbound Packet RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t irfdat : 1; /**< Bist Results for MIX I-Ring Entry RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t orfdat : 1; /**< Bist Results for MIX O-Ring Entry RAM - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t orfdat : 1; uint64_t irfdat : 1; uint64_t ipfdat : 1; uint64_t mrqdat : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_mixx_bist_s cn52xx; struct cvmx_mixx_bist_s cn52xxp1; struct cvmx_mixx_bist_s cn56xx; struct cvmx_mixx_bist_s cn56xxp1; } cvmx_mixx_bist_t; /** * cvmx_mix#_ctl * * MIX_CTL = MIX Control Register * * Description: * NOTE: To write to the MIX_CTL register, a device would issue an IOBST directed at the MIO. * To read the MIX_CTL register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t crc_strip : 1; /**< HW CRC Strip Enable When enabled, the last 4 bytes(CRC) of the ingress packet are not included in cumulative packet byte length. In other words, the cumulative LEN field for all I-Ring Buffer Entries associated with a given ingress packet will be 4 bytes less (so that the final 4B HW CRC packet data is not processed by software). */ uint64_t busy : 1; /**< MIX Busy Status bit MIX will assert busy status any time there are: 1) L2/DRAM reads in-flight (NCB-arb to read response) 2) L2/DRAM writes in-flight (NCB-arb to write data is sent. 3) L2/DRAM write commits in-flight (NCB-arb to write commit response). NOTE: After MIX_CTL[EN]=0, the MIX will eventually complete any "inflight" transactions, at which point the BUSY will de-assert. */ uint64_t en : 1; /**< MIX Enable bit When EN=0, MIX will no longer arbitrate for any new L2/DRAM read/write requests on the NCB Bus. MIX will complete any requests that are currently pended for the NCB Bus. */ uint64_t reset : 1; /**< MIX Soft Reset When SW writes a '1' to MIX_CTL[RESET], the MIX logic will be soft reset. NOTE: The MIX-AGL RSL-CSR accesses are not effected by soft reset (to allow RSL accesses during soft reset). NOTE: The MIX-MIX NCB-direct CSR accesses are not effected by soft reset (to allow RSL accesses during soft reset). NOTE: Writing '1' will create a "64 eclk" soft reset pulse chain used by both MIX/AGL subcomponents to soft reset the MIX/AGL. SW should avoid sending any MIX/AGL CSR R/Ws until after this 64 eclk reset window has expired (unpredictable results). NOTE: RESET is intentionally 'read as zero'. The intended "soft reset" sequence is: 1) Write MIX_CTL[EN]=0 [To prevent any NEW transactions from being started] 2) Wait for MIX_CTL[BUSY]=0 [To indicate that all inflight transactions have completed] 3) Write MIX_CTL[RESET]=1, followed by a MIX_CTL CSR read and wait for the result. This will generate the soft-reset pulse chain that will reset MIX/AGL (except logic to gain access to CSRs). 4) Re-Initialize the MIX/AGL just as would be done for a hard reset. */ uint64_t lendian : 1; /**< Packet Little Endian Mode (0: Big Endian Mode/1: Little Endian Mode) When the mode is set, MIX will byte-swap packet data loads/stores at the MIX/NCB boundary. */ uint64_t nbtarb : 1; /**< MIX CB-Request Arbitration Mode. When set to zero, the arbiter is fixed priority with the following priority scheme: Highest Priority: I-Ring Packet Write Request O-Ring Packet Read Request I-Ring Entry Write Request I-Ring Entry Read Request O-Ring Entry Read Request When set to one, the arbiter is round robin. */ uint64_t mrq_hwm : 2; /**< MIX CB-Request FIFO Programmable High Water Mark. The MRQ contains 16 CB-Requests which are CSR Rd/Wr Requests. If the MRQ backs up with "HWM" entries, then new CB-Requests are 'stalled'. [0]: HWM = 16 [1]: HWM = 15 [2]: HWM = 14 [3]: HWM = 13 NOTE: This must only be written at power-on/boot time. */ #else uint64_t mrq_hwm : 2; uint64_t nbtarb : 1; uint64_t lendian : 1; uint64_t reset : 1; uint64_t en : 1; uint64_t busy : 1; uint64_t crc_strip : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mixx_ctl_s cn52xx; struct cvmx_mixx_ctl_s cn52xxp1; struct cvmx_mixx_ctl_s cn56xx; struct cvmx_mixx_ctl_s cn56xxp1; } cvmx_mixx_ctl_t; /** * cvmx_mix#_intena * * MIX_INTENA = MIX Local Interrupt Enable Mask Register * * Description: * NOTE: To write to the MIX_INTENA register, a device would issue an IOBST directed at the MIO. * To read the MIX_INTENA register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_intena_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t orunena : 1; /**< ORCNT UnderFlow Detected If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and this local interrupt mask bit is set, than an interrupt is reported for an ORCNT underflow condition MIX_ISR[ORUN]. */ uint64_t irunena : 1; /**< IRCNT UnderFlow Interrupt Enable If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and this local interrupt mask bit is set, than an interrupt is reported for an IRCNT underflow condition MIX_ISR[IRUN]. */ uint64_t data_drpena : 1; /**< Data was dropped due to RX FIFO full Interrupt enable. If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and the local interrupt mask bit(DATA_DRPENA) is set, than an interrupt is reported for this event. */ uint64_t ithena : 1; /**< Inbound Ring Threshold Exceeded Interrupt Enable If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and this local interrupt mask bit is set, than an interrupt is reported for an Inbound Ring Threshold Exceeded event(IRTHRESH). */ uint64_t othena : 1; /**< Outbound Ring Threshold Exceeded Interrupt Enable If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and this local interrupt mask bit is set, than an interrupt is reported for an Outbound Ring Threshold Exceeded event(ORTHRESH). */ uint64_t ivfena : 1; /**< Inbound DoorBell(IDBELL) Overflow Detected If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and this local interrupt mask bit is set, than an interrupt is reported for an Inbound Doorbell Overflow event(IDBOVF). */ uint64_t ovfena : 1; /**< Outbound DoorBell(ODBELL) Overflow Interrupt Enable If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and this local interrupt mask bit is set, than an interrupt is reported for an Outbound Doorbell Overflow event(ODBOVF). */ #else uint64_t ovfena : 1; uint64_t ivfena : 1; uint64_t othena : 1; uint64_t ithena : 1; uint64_t data_drpena : 1; uint64_t irunena : 1; uint64_t orunena : 1; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_mixx_intena_s cn52xx; struct cvmx_mixx_intena_s cn52xxp1; struct cvmx_mixx_intena_s cn56xx; struct cvmx_mixx_intena_s cn56xxp1; } cvmx_mixx_intena_t; /** * cvmx_mix#_ircnt * * MIX_IRCNT = MIX I-Ring Pending Packet Counter * * Description: * NOTE: To write to the MIX_IRCNT register, a device would issue an IOBST directed at the MIO. * To read the MIX_IRCNT register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_ircnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t ircnt : 20; /**< Pending \# of I-Ring Packets. Whenever HW writes a completion code of Done, Trunc, CRCErr or Err, it increments the IRCNT (to indicate to SW the \# of pending Input packets in system memory). NOTE: The HW guarantees that the completion code write is always visible in system memory BEFORE it increments the IRCNT. Reads of IRCNT return the current inbound packet count. Writes of IRCNT decrement the count by the value written. This register is used to generate interrupts to alert SW of pending inbound MIX packets in system memory. NOTE: In the case of inbound packets that span multiple I-Ring entries, SW must keep track of the \# of I-Ring Entries associated with a given inbound packet to reclaim the proper \# of I-Ring Entries for re-use. */ #else uint64_t ircnt : 20; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_mixx_ircnt_s cn52xx; struct cvmx_mixx_ircnt_s cn52xxp1; struct cvmx_mixx_ircnt_s cn56xx; struct cvmx_mixx_ircnt_s cn56xxp1; } cvmx_mixx_ircnt_t; /** * cvmx_mix#_irhwm * * MIX_IRHWM = MIX I-Ring High-Water Mark Threshold Register * * Description: * NOTE: To write to the MIX_IHWM register, a device would issue an IOBST directed at the MIO. * To read the MIX_IHWM register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_irhwm_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t ibplwm : 20; /**< I-Ring BackPressure Low Water Mark Threshold. When the \#of available I-Ring Entries (IDBELL) is less than IBPLWM, the AGL-MAC will: a) In full-duplex mode: send periodic PAUSE packets. b) In half-duplex mode: Force collisions. This programmable mechanism is provided as a means to backpressure input traffic 'early' enough (so that packets are not 'dropped' by OCTEON). */ uint64_t irhwm : 20; /**< I-Ring Entry High Water Mark Threshold. Used to determine when the \# of Inbound packets in system memory(MIX_IRCNT[IRCNT]) exceeds this IRHWM threshold. NOTE: The power-on value of the CIU_INTx_EN*[MII] interrupt enable bits is zero and must be enabled to allow interrupts to be reported. */ #else uint64_t irhwm : 20; uint64_t ibplwm : 20; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_mixx_irhwm_s cn52xx; struct cvmx_mixx_irhwm_s cn52xxp1; struct cvmx_mixx_irhwm_s cn56xx; struct cvmx_mixx_irhwm_s cn56xxp1; } cvmx_mixx_irhwm_t; /** * cvmx_mix#_iring1 * * MIX_IRING1 = MIX Inbound Ring Register \#1 * * Description: * NOTE: To write to the MIX_IRING1 register, a device would issue an IOBST directed at the MIO. * To read the MIX_IRING1 register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_iring1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_60_63 : 4; uint64_t isize : 20; /**< Represents the Inbound Ring Buffer's Size(in 8B words). The ring can be as large as 1M entries. NOTE: This CSR MUST BE setup written by SW poweron (when IDBELL/IRCNT=0). */ uint64_t reserved_36_39 : 4; uint64_t ibase : 33; /**< Represents the 8B-aligned base address of the first Inbound Ring entry in system memory. NOTE: SW MUST ONLY write to this register during power-on/boot code. */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t ibase : 33; uint64_t reserved_36_39 : 4; uint64_t isize : 20; uint64_t reserved_60_63 : 4; #endif } s; struct cvmx_mixx_iring1_s cn52xx; struct cvmx_mixx_iring1_s cn52xxp1; struct cvmx_mixx_iring1_s cn56xx; struct cvmx_mixx_iring1_s cn56xxp1; } cvmx_mixx_iring1_t; /** * cvmx_mix#_iring2 * * MIX_IRING2 = MIX Inbound Ring Register \#2 * * Description: * NOTE: To write to the MIX_IRING2 register, a device would issue an IOBST directed at the MIO. * To read the MIX_IRING2 register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_iring2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_52_63 : 12; uint64_t itlptr : 20; /**< The Inbound Ring Tail Pointer selects the I-Ring Entry that the HW will process next. After the HW completes receiving an inbound packet, it increments the I-Ring Tail Pointer. [NOTE: The I-Ring Tail Pointer HW increment is always modulo ISIZE. NOTE: This field is 'read-only' to SW. */ uint64_t reserved_20_31 : 12; uint64_t idbell : 20; /**< Represents the cumulative total of pending Inbound Ring Buffer Entries. Each I-Ring Buffer Entry contains 1) an L2/DRAM byte pointer along with a 2) a Byte Length. After SW inserts a new entry into the I-Ring Buffer, it "rings the doorbell for the inbound ring". When the MIX HW receives the doorbell ring, it advances the doorbell count for the I-Ring. SW must never cause the doorbell count for the I-Ring to exceed the size of the I-ring(ISIZE). A read of the CSR indicates the current doorbell count. */ #else uint64_t idbell : 20; uint64_t reserved_20_31 : 12; uint64_t itlptr : 20; uint64_t reserved_52_63 : 12; #endif } s; struct cvmx_mixx_iring2_s cn52xx; struct cvmx_mixx_iring2_s cn52xxp1; struct cvmx_mixx_iring2_s cn56xx; struct cvmx_mixx_iring2_s cn56xxp1; } cvmx_mixx_iring2_t; /** * cvmx_mix#_isr * * MIX_ISR = MIX Interrupt/Status Register * * Description: * NOTE: To write to the MIX_ISR register, a device would issue an IOBST directed at the MIO. * To read the MIX_ISR register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_isr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t orun : 1; /**< ORCNT UnderFlow Detected If SW writes a larger value than what is currently in the MIX_ORCNT[ORCNT], then HW will report the underflow condition. NOTE: The MIX_ORCNT[IOCNT] will clamp to to zero. NOTE: If an ORUN underflow condition is detected, the integrity of the MIX/AGL HW state has been compromised. To recover, SW must issue a software reset sequence (see: MIX_CTL[RESET] */ uint64_t irun : 1; /**< IRCNT UnderFlow Detected If SW writes a larger value than what is currently in the MIX_IRCNT[IRCNT], then HW will report the underflow condition. NOTE: The MIX_IRCNT[IRCNT] will clamp to to zero. NOTE: If an IRUN underflow condition is detected, the integrity of the MIX/AGL HW state has been compromised. To recover, SW must issue a software reset sequence (see: MIX_CTL[RESET] */ uint64_t data_drp : 1; /**< Data was dropped due to RX FIFO full If this does occur, the DATA_DRP is set and the CIU_INTx_SUM0,4[MII] bits are set. If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and the local interrupt mask bit(DATA_DRPENA) is set, than an interrupt is reported for this event. */ uint64_t irthresh : 1; /**< Inbound Ring Packet Threshold Exceeded When the pending \#inbound packets in system memory(IRCNT) has exceeded a programmable threshold (IRHWM), then this bit is set. If this does occur, the IRTHRESH is set and the CIU_INTx_SUM0,4[MII] bits are set if ((MIX_ISR & MIX_INTENA) != 0)). If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and the local interrupt mask bit(ITHENA) is set, than an interrupt is reported for this event. */ uint64_t orthresh : 1; /**< Outbound Ring Packet Threshold Exceeded When the pending \#outbound packets in system memory(ORCNT) has exceeded a programmable threshold (ORHWM), then this bit is set. If this does occur, the ORTHRESH is set and the CIU_INTx_SUM0,4[MII] bits are set if ((MIX_ISR & MIX_INTENA) != 0)). If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and the local interrupt mask bit(OTHENA) is set, than an interrupt is reported for this event. */ uint64_t idblovf : 1; /**< Inbound DoorBell(IDBELL) Overflow Detected If SW attempts to write to the MIX_IRING2[IDBELL] with a value greater than the remaining \#of I-Ring Buffer Entries (MIX_REMCNT[IREMCNT]), then the following occurs: 1) The MIX_IRING2[IDBELL] write is IGNORED 2) The ODBLOVF is set and the CIU_INTx_SUM0,4[MII] bits are set if ((MIX_ISR & MIX_INTENA) != 0)). If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and the local interrupt mask bit(IVFENA) is set, than an interrupt is reported for this event. SW should keep track of the \#I-Ring Entries in use (ie: cumulative \# of IDBELL writes), and ensure that future IDBELL writes don't exceed the size of the I-Ring Buffer (MIX_IRING2[ISIZE]). SW must reclaim I-Ring Entries by keeping track of the \#IRing-Entries, and writing to the MIX_IRCNT[IRCNT]. NOTE: The MIX_IRCNT[IRCNT] register represents the total \#packets(not IRing Entries) and SW must further keep track of the \# of I-Ring Entries associated with each packet as they are processed. NOTE: There is no recovery from an IDBLOVF Interrupt. If it occurs, it's an indication that SW has overwritten the I-Ring buffer, and the only recourse is a HW reset. */ uint64_t odblovf : 1; /**< Outbound DoorBell(ODBELL) Overflow Detected If SW attempts to write to the MIX_ORING2[ODBELL] with a value greater than the remaining \#of O-Ring Buffer Entries (MIX_REMCNT[OREMCNT]), then the following occurs: 1) The MIX_ORING2[ODBELL] write is IGNORED 2) The ODBLOVF is set and the CIU_INTx_SUM0,4[MII] bits are set if ((MIX_ISR & MIX_INTENA) != 0)). If both the global interrupt mask bits (CIU_INTx_EN*[MII]) and the local interrupt mask bit(OVFENA) is set, than an interrupt is reported for this event. SW should keep track of the \#I-Ring Entries in use (ie: cumulative \# of ODBELL writes), and ensure that future ODBELL writes don't exceed the size of the O-Ring Buffer (MIX_ORING2[OSIZE]). SW must reclaim O-Ring Entries by writing to the MIX_ORCNT[ORCNT]. . NOTE: There is no recovery from an ODBLOVF Interrupt. If it occurs, it's an indication that SW has overwritten the O-Ring buffer, and the only recourse is a HW reset. */ #else uint64_t odblovf : 1; uint64_t idblovf : 1; uint64_t orthresh : 1; uint64_t irthresh : 1; uint64_t data_drp : 1; uint64_t irun : 1; uint64_t orun : 1; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_mixx_isr_s cn52xx; struct cvmx_mixx_isr_s cn52xxp1; struct cvmx_mixx_isr_s cn56xx; struct cvmx_mixx_isr_s cn56xxp1; } cvmx_mixx_isr_t; /** * cvmx_mix#_orcnt * * MIX_ORCNT = MIX O-Ring Packets Sent Counter * * Description: * NOTE: To write to the MIX_ORCNT register, a device would issue an IOBST directed at the MIO. * To read the MIX_ORCNT register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_orcnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t orcnt : 20; /**< Pending \# of O-Ring Packets. Whenever HW removes a packet from the O-Ring, it increments the ORCNT (to indicate to SW the \# of Output packets in system memory that can be reclaimed). Reads of ORCNT return the current count. Writes of ORCNT decrement the count by the value written. This register is used to generate interrupts to alert SW of pending outbound MIX packets that have been removed from system memory. (see MIX_ISR[ORTHRESH] description for more details). NOTE: For outbound packets, the \# of O-Ring Packets is equal to the \# of O-Ring Entries. */ #else uint64_t orcnt : 20; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_mixx_orcnt_s cn52xx; struct cvmx_mixx_orcnt_s cn52xxp1; struct cvmx_mixx_orcnt_s cn56xx; struct cvmx_mixx_orcnt_s cn56xxp1; } cvmx_mixx_orcnt_t; /** * cvmx_mix#_orhwm * * MIX_ORHWM = MIX O-Ring High-Water Mark Threshold Register * * Description: * NOTE: To write to the MIX_ORHWM register, a device would issue an IOBST directed at the MIO. * To read the MIX_ORHWM register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_orhwm_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t orhwm : 20; /**< O-Ring Entry High Water Mark Threshold. Used to determine when the \# of Outbound packets in system memory that can be reclaimed (MIX_ORCNT[ORCNT]) exceeds this ORHWM threshold. NOTE: The power-on value of the CIU_INTx_EN*[MII] interrupt enable bits is zero and must be enabled to allow interrupts to be reported. */ #else uint64_t orhwm : 20; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_mixx_orhwm_s cn52xx; struct cvmx_mixx_orhwm_s cn52xxp1; struct cvmx_mixx_orhwm_s cn56xx; struct cvmx_mixx_orhwm_s cn56xxp1; } cvmx_mixx_orhwm_t; /** * cvmx_mix#_oring1 * * MIX_ORING1 = MIX Outbound Ring Register \#1 * * Description: * NOTE: To write to the MIX_ORING1 register, a device would issue an IOBST directed at the MIO. * To read the MIX_ORING1 register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_oring1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_60_63 : 4; uint64_t osize : 20; /**< Represents the Outbound Ring Buffer's Size(in 8B words). The ring can be as large as 1M entries. NOTE: This CSR MUST BE setup written by SW poweron (when ODBELL/ORCNT=0). */ uint64_t reserved_36_39 : 4; uint64_t obase : 33; /**< Represents the 8B-aligned base address of the first Outbound Ring(O-Ring) Entry in system memory. NOTE: SW MUST ONLY write to this register during power-on/boot code. */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t obase : 33; uint64_t reserved_36_39 : 4; uint64_t osize : 20; uint64_t reserved_60_63 : 4; #endif } s; struct cvmx_mixx_oring1_s cn52xx; struct cvmx_mixx_oring1_s cn52xxp1; struct cvmx_mixx_oring1_s cn56xx; struct cvmx_mixx_oring1_s cn56xxp1; } cvmx_mixx_oring1_t; /** * cvmx_mix#_oring2 * * MIX_ORING2 = MIX Outbound Ring Register \#2 * * Description: * NOTE: To write to the MIX_ORING2 register, a device would issue an IOBST directed at the MIO. * To read the MIX_ORING2 register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_oring2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_52_63 : 12; uint64_t otlptr : 20; /**< The Outbound Ring Tail Pointer selects the O-Ring Entry that the HW will process next. After the HW completes sending an outbound packet, it increments the O-Ring Tail Pointer. [NOTE: The O-Ring Tail Pointer HW increment is always modulo MIX_ORING2[OSIZE]. NOTE: This field is 'read-only' to SW. */ uint64_t reserved_20_31 : 12; uint64_t odbell : 20; /**< Represents the cumulative total of pending Outbound Ring(O-Ring) Buffer Entries. Each O-Ring Buffer Entry contains 1) an L2/DRAM byte pointer along with a 2) a Byte Length. After SW inserts new entries into the O-Ring Buffer, it "rings the doorbell with the count of the newly inserted entries". When the MIX HW receives the doorbell ring, it increments the current doorbell count by the CSR write value. SW must never cause the doorbell count for the O-Ring to exceed the size of the ring(OSIZE). A read of the CSR indicates the current doorbell count. */ #else uint64_t odbell : 20; uint64_t reserved_20_31 : 12; uint64_t otlptr : 20; uint64_t reserved_52_63 : 12; #endif } s; struct cvmx_mixx_oring2_s cn52xx; struct cvmx_mixx_oring2_s cn52xxp1; struct cvmx_mixx_oring2_s cn56xx; struct cvmx_mixx_oring2_s cn56xxp1; } cvmx_mixx_oring2_t; /** * cvmx_mix#_remcnt * * MIX_REMCNT = MIX Ring Buffer Remainder Counts (useful for HW debug only) * * Description: * NOTE: To read the MIX_REMCNT register, a device would issue an IOBLD64 directed at the MIO. */ typedef union { uint64_t u64; struct cvmx_mixx_remcnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_52_63 : 12; uint64_t iremcnt : 20; /**< Remaining I-Ring Buffer Count Reflects the \# of unused/remaining I-Ring Entries that HW currently detects in the I-Ring Buffer. HW uses this value to detect I-Ring Doorbell overflows. (see: MIX_ISR[IDBLOVF]) When SW writes the MIX_IRING1[ISIZE], the IREMCNT is loaded with MIX_IRING2[ISIZE] value. (NOTE: ISIZE should only be written at power-on, when it's known that there are no I-Ring Entries currently in use by HW). When SW writes to the IDBELL register, the IREMCNT is decremented by the CSR write value. When HW issues an IRing Write Request(onto NCB Bus), the IREMCNT is incremented by 1. */ uint64_t reserved_20_31 : 12; uint64_t oremcnt : 20; /**< Remaining O-Ring Buffer Count Reflects the \# of unused/remaining O-Ring Entries that HW currently detects in the O-Ring Buffer. HW uses this value to detect O-Ring Doorbell overflows. (see: MIX_ISR[ODBLOVF]) When SW writes the MIX_IRING1[OSIZE], the OREMCNT is loaded with MIX_ORING2[OSIZE] value. (NOTE: OSIZE should only be written at power-on, when it's known that there are no O-Ring Entries currently in use by HW). When SW writes to the ODBELL register, the OREMCNT is decremented by the CSR write value. When SW writes to MIX_[OREMCNT], the OREMCNT is decremented by the CSR write value. */ #else uint64_t oremcnt : 20; uint64_t reserved_20_31 : 12; uint64_t iremcnt : 20; uint64_t reserved_52_63 : 12; #endif } s; struct cvmx_mixx_remcnt_s cn52xx; struct cvmx_mixx_remcnt_s cn52xxp1; struct cvmx_mixx_remcnt_s cn56xx; struct cvmx_mixx_remcnt_s cn56xxp1; } cvmx_mixx_remcnt_t; /** * cvmx_mpi_cfg */ typedef union { uint64_t u64; struct cvmx_mpi_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t clkdiv : 13; /**< Fsclk = Feclk / (2 * CLKDIV) CLKDIV = Feclk / (2 * Fsclk) */ uint64_t reserved_12_15 : 4; uint64_t cslate : 1; /**< If 0, MPI_CS asserts 1/2 SCLK before transaction 1, MPI_CS assert coincident with transaction NOTE: only used if CSENA == 1 */ uint64_t tritx : 1; /**< If 0, MPI_TX pin is driven when slave is not expected to be driving 1, MPI_TX pin is tristated when not transmitting NOTE: only used when WIREOR==1 */ uint64_t idleclks : 2; /**< Guarantee IDLECLKS idle sclk cycles between commands. */ uint64_t cshi : 1; /**< If 0, CS is low asserted 1, CS is high asserted */ uint64_t csena : 1; /**< If 0, the MPI_CS is a GPIO, not used by MPI_TX 1, CS is driven per MPI_TX intruction */ uint64_t int_ena : 1; /**< If 0, polling is required 1, MPI engine interrupts X end of transaction */ uint64_t lsbfirst : 1; /**< If 0, shift MSB first 1, shift LSB first */ uint64_t wireor : 1; /**< If 0, MPI_TX and MPI_RX are separate wires (SPI) MPI_TX pin is always driven 1, MPI_TX/RX is all from MPI_TX pin (MPI) MPI_TX pin is tristated when not transmitting NOTE: if WIREOR==1, MPI_RX pin is not used by the MPI engine */ uint64_t clk_cont : 1; /**< If 0, clock idles to value given by IDLELO after completion of MPI transaction 1, clock never idles, requires CS deassertion assertion between commands */ uint64_t idlelo : 1; /**< If 0, MPI_CLK idles high, 1st transition is hi->lo 1, MPI_CLK idles low, 1st transition is lo->hi */ uint64_t enable : 1; /**< If 0, all MPI pins are GPIOs 1, MPI_CLK, MPI_CS, and MPI_TX are driven */ #else uint64_t enable : 1; uint64_t idlelo : 1; uint64_t clk_cont : 1; uint64_t wireor : 1; uint64_t lsbfirst : 1; uint64_t int_ena : 1; uint64_t csena : 1; uint64_t cshi : 1; uint64_t idleclks : 2; uint64_t tritx : 1; uint64_t cslate : 1; uint64_t reserved_12_15 : 4; uint64_t clkdiv : 13; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_mpi_cfg_s cn30xx; struct cvmx_mpi_cfg_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t clkdiv : 13; /**< Fsclk = Feclk / (2 * CLKDIV) CLKDIV = Feclk / (2 * Fsclk) */ uint64_t reserved_11_15 : 5; uint64_t tritx : 1; /**< If 0, MPI_TX pin is driven when slave is not expected to be driving 1, MPI_TX pin is tristated when not transmitting NOTE: only used when WIREOR==1 */ uint64_t idleclks : 2; /**< Guarantee IDLECLKS idle sclk cycles between commands. */ uint64_t cshi : 1; /**< If 0, CS is low asserted 1, CS is high asserted */ uint64_t csena : 1; /**< If 0, the MPI_CS is a GPIO, not used by MPI_TX 1, CS is driven per MPI_TX intruction */ uint64_t int_ena : 1; /**< If 0, polling is required 1, MPI engine interrupts X end of transaction */ uint64_t lsbfirst : 1; /**< If 0, shift MSB first 1, shift LSB first */ uint64_t wireor : 1; /**< If 0, MPI_TX and MPI_RX are separate wires (SPI) MPI_TX pin is always driven 1, MPI_TX/RX is all from MPI_TX pin (MPI) MPI_TX pin is tristated when not transmitting NOTE: if WIREOR==1, MPI_RX pin is not used by the MPI engine */ uint64_t clk_cont : 1; /**< If 0, clock idles to value given by IDLELO after completion of MPI transaction 1, clock never idles, requires CS deassertion assertion between commands */ uint64_t idlelo : 1; /**< If 0, MPI_CLK idles high, 1st transition is hi->lo 1, MPI_CLK idles low, 1st transition is lo->hi */ uint64_t enable : 1; /**< If 0, all MPI pins are GPIOs 1, MPI_CLK, MPI_CS, and MPI_TX are driven */ #else uint64_t enable : 1; uint64_t idlelo : 1; uint64_t clk_cont : 1; uint64_t wireor : 1; uint64_t lsbfirst : 1; uint64_t int_ena : 1; uint64_t csena : 1; uint64_t cshi : 1; uint64_t idleclks : 2; uint64_t tritx : 1; uint64_t reserved_11_15 : 5; uint64_t clkdiv : 13; uint64_t reserved_29_63 : 35; #endif } cn31xx; struct cvmx_mpi_cfg_s cn50xx; } cvmx_mpi_cfg_t; /** * cvmx_mpi_dat# */ typedef union { uint64_t u64; struct cvmx_mpi_datx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t data : 8; /**< Data to transmit/received */ #else uint64_t data : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_mpi_datx_s cn30xx; struct cvmx_mpi_datx_s cn31xx; struct cvmx_mpi_datx_s cn50xx; } cvmx_mpi_datx_t; /** * cvmx_mpi_sts */ typedef union { uint64_t u64; struct cvmx_mpi_sts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t rxnum : 5; /**< Number of bytes written for transaction */ uint64_t reserved_1_7 : 7; uint64_t busy : 1; /**< If 0, no MPI transaction in progress 1, MPI engine is processing a transaction */ #else uint64_t busy : 1; uint64_t reserved_1_7 : 7; uint64_t rxnum : 5; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_mpi_sts_s cn30xx; struct cvmx_mpi_sts_s cn31xx; struct cvmx_mpi_sts_s cn50xx; } cvmx_mpi_sts_t; /** * cvmx_mpi_tx */ typedef union { uint64_t u64; struct cvmx_mpi_tx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t leavecs : 1; /**< If 0, deassert CS after transaction is done 1, leave CS asserted after transactrion is done */ uint64_t reserved_13_15 : 3; uint64_t txnum : 5; /**< Number of bytes to transmit */ uint64_t reserved_5_7 : 3; uint64_t totnum : 5; /**< Number of bytes to shift (transmit + receive) */ #else uint64_t totnum : 5; uint64_t reserved_5_7 : 3; uint64_t txnum : 5; uint64_t reserved_13_15 : 3; uint64_t leavecs : 1; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_mpi_tx_s cn30xx; struct cvmx_mpi_tx_s cn31xx; struct cvmx_mpi_tx_s cn50xx; } cvmx_mpi_tx_t; /** * cvmx_ndf_bt_pg_info * * Notes: * NDF_BT_PG_INFO provides page size and number of column plus row address cycles information. SW writes to this CSR * during boot from Nand Flash. Additionally SW also writes the multiplier value for timing parameters. This value is * used during boot, in the SET_TM_PARAM command. This information is used only by the boot load state machine and is * otherwise a don't care, once boot is disabled. Also, boot dma's do not use this value. * * Bytes per Nand Flash page = 2 ** (SIZE + 1) times 256 bytes. * 512, 1k, 2k, 4k, 8k, 16k, 32k and 64k are legal bytes per page values * * Legal values for ADR_CYC field are 3 through 8. SW CSR writes with a value less than 3 will write a 3 to this * field, and a SW CSR write with a value greater than 8, will write an 8 to this field. * * Like all NDF_... registers, 64-bit operations must be used to access this register */ typedef union { uint64_t u64; struct cvmx_ndf_bt_pg_info_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t t_mult : 4; /**< Boot time TIM_MULT[3:0] field of SET__TM_PAR[63:0] command */ uint64_t adr_cyc : 4; /**< # of column address cycles */ uint64_t size : 3; /**< bytes per page in the nand device */ #else uint64_t size : 3; uint64_t adr_cyc : 4; uint64_t t_mult : 4; uint64_t reserved_11_63 : 53; #endif } s; struct cvmx_ndf_bt_pg_info_s cn52xx; } cvmx_ndf_bt_pg_info_t; /** * cvmx_ndf_cmd * * Notes: * When SW reads this csr, RD_VAL bit in NDF_MISC csr is cleared to 0. SW must always write all 8 bytes whenever it writes * this csr. If there are fewer than 8 bytes left in the command sequence that SW wants the NAND flash controller to execute, it * must insert Idle (WAIT) commands to make up 8 bytes. SW also must ensure there is enough vacancy in the command fifo to accept these * 8 bytes, by first reading the FR_BYT field in the NDF_MISC csr. * * Like all NDF_... registers, 64-bit operations must be used to access this register */ typedef union { uint64_t u64; struct cvmx_ndf_cmd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t nf_cmd : 64; /**< 8 Command Bytes */ #else uint64_t nf_cmd : 64; #endif } s; struct cvmx_ndf_cmd_s cn52xx; } cvmx_ndf_cmd_t; /** * cvmx_ndf_drbell * * Notes: * SW csr writes will increment CNT by the signed 8 bit value being written. SW csr reads return the current CNT value. * HW will also modify the value of the CNT field. Everytime HW executes a BUS_ACQ[15:0] command, to arbitrate and win the * flash bus, it decrements the CNT field by 1. If the CNT field is already 0 or negative, HW command execution unit will * stall when it fetches the new BUS_ACQ[15:0] command, from the command fifo. Only when the SW writes to this CSR with a * non-zero data value, can the execution unit come out of the stalled condition, and resume execution. * * The intended use of this doorbell CSR is to control execution of the Nand Flash commands. The NDF execution unit * has to arbitrate for the flash bus, before it can enable a Nand Flash device connected to the Octeon chip, by * asserting the device's chip enable. Therefore SW should first load the command fifo, with a full sequence of * commands to perform a Nand Flash device task. This command sequence will start with a bus acquire command and * the last command in the sequence will be a bus release command. The execution unit will start execution of * the sequence only if the [CNT] field is non-zero when it fetches the bus acquire command, which is the first * command in this sequence. SW can also, load multiple such sequences, each starting with a chip enable command * and ending with a chip disable command, and then write a non-zero data value to this csr to increment the * CNT field by the number of the command sequences, loaded to the command fifo. * * Like all NDF_... registers, 64-bit operations must be used to access this register */ typedef union { uint64_t u64; struct cvmx_ndf_drbell_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t cnt : 8; /**< Doorbell count register, 2's complement 8 bit value */ #else uint64_t cnt : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_ndf_drbell_s cn52xx; } cvmx_ndf_drbell_t; /** * cvmx_ndf_ecc_cnt * * Notes: * XOR_ECC[31:8] = [ecc_gen_byt258, ecc_gen_byt257, ecc_gen_byt256] xor [ecc_258, ecc_257, ecc_256] * ecc_258, ecc_257 and ecc_256 are bytes stored in Nand Flash and read out during boot * ecc_gen_byt258, ecc_gen_byt257, ecc_gen_byt256 are generated from data read out from Nand Flash * * Like all NDF_... registers, 64-bit operations must be used to access this register */ typedef union { uint64_t u64; struct cvmx_ndf_ecc_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t xor_ecc : 24; /**< result of XOR of ecc read bytes and ecc genarated bytes. The value pertains to the last 1 bit ecc err */ uint64_t ecc_err : 8; /**< Count = \# of 1 bit errors fixed during boot This count saturates instead of wrapping around. */ #else uint64_t ecc_err : 8; uint64_t xor_ecc : 24; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_ndf_ecc_cnt_s cn52xx; } cvmx_ndf_ecc_cnt_t; /** * cvmx_ndf_int * * Notes: * FULL status is updated when the command fifo becomes full as a result of SW writing a new command to it. * * EMPTY status is updated when the command fifo becomes empty as a result of command execution unit fetching the * last instruction out of the command fifo. * * Like all NDF_... registers, 64-bit operations must be used to access this register */ typedef union { uint64_t u64; struct cvmx_ndf_int_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t ovrf : 1; /**< NDF_CMD write when fifo is full. Generally a fatal error. */ uint64_t ecc_mult : 1; /**< Multi bit ECC error detected during boot */ uint64_t ecc_1bit : 1; /**< Single bit ECC error detected and fixed during boot */ uint64_t sm_bad : 1; /**< One of the state machines in a bad state */ uint64_t wdog : 1; /**< Watch Dog timer expired during command execution */ uint64_t full : 1; /**< Command fifo is full */ uint64_t empty : 1; /**< Command fifo is empty */ #else uint64_t empty : 1; uint64_t full : 1; uint64_t wdog : 1; uint64_t sm_bad : 1; uint64_t ecc_1bit : 1; uint64_t ecc_mult : 1; uint64_t ovrf : 1; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_ndf_int_s cn52xx; } cvmx_ndf_int_t; /** * cvmx_ndf_int_en * * Notes: * Like all NDF_... registers, 64-bit operations must be used to access this register * */ typedef union { uint64_t u64; struct cvmx_ndf_int_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t ovrf : 1; /**< Wrote to a full command fifo */ uint64_t ecc_mult : 1; /**< Multi bit ECC error detected during boot */ uint64_t ecc_1bit : 1; /**< Single bit ECC error detected and fixed during boot */ uint64_t sm_bad : 1; /**< One of the state machines in a bad state */ uint64_t wdog : 1; /**< Watch Dog timer expired during command execution */ uint64_t full : 1; /**< Command fifo is full */ uint64_t empty : 1; /**< Command fifo is empty */ #else uint64_t empty : 1; uint64_t full : 1; uint64_t wdog : 1; uint64_t sm_bad : 1; uint64_t ecc_1bit : 1; uint64_t ecc_mult : 1; uint64_t ovrf : 1; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_ndf_int_en_s cn52xx; } cvmx_ndf_int_en_t; /** * cvmx_ndf_misc * * Notes: * NBR_HWM this field specifies the high water mark for the NCB outbound load/store commands receive fifo. * the fifo size is 16 entries. * * WAIT_CNT this field allows glitch filtering of the WAIT_n input to octeon, from Flash Memory. The count * represents number of eclk cycles. * * FR_BYT this field specifies \# of unfilled bytes in the command fifo. Bytes become unfilled as commands * complete execution and exit. (fifo is 256 bytes when BT_DIS=0, and 1536 bytes when BT_DIS=1) * * RD_DONE this W1C bit is set to 1 by HW when it reads the last 8 bytes out of the command fifo, * in response to RD_CMD bit being set to 1 by SW. * * RD_VAL this read only bit is set to 1 by HW when it reads next 8 bytes from command fifo in response * to RD_CMD bit being set to 1. A SW read of NDF_CMD csr clears this bit to 0. * * RD_CMD this R/W bit starts read out from the command fifo, 8 bytes at a time. SW should first read the * RD_VAL bit in this csr to see if next 8 bytes from the command fifo are available in the * NDF_CMD csr. All command fifo reads start and end on an 8 byte boundary. A RD_CMD in the * middle of command execution will cause the execution to freeze until RD_DONE is set to 1. RD_CMD * bit will be cleared on any NDF_CMD csr write by SW. * * BT_DMA this indicates to the NAND flash boot control state machine that boot dma read can begin. * SW should set this bit to 1 after SW has loaded the command fifo. HW sets the bit to 0 * when boot dma command execution is complete. If chip enable 0 is not nand flash, this bit is * permanently 1'b0 with SW writes ignored. Whenever BT_DIS=1, this bit will be 0. * * BT_DIS this R/W bit indicates to NAND flash boot control state machine that boot operation has ended. * whenever this bit changes from 0 to a 1, the command fifo is emptied as a side effect. This bit must * never be set when booting from nand flash and region zero is enabled. * * EX_DIS When 1, command execution stops after completing execution of all commands currently in the command * fifo. Once command execution has stopped, and then new commands are loaded into the command fifo, execution * will not resume as long as this bit is 1. When this bit is 0, command execution will resume if command fifo * is not empty. EX_DIS should be set to 1, during boot i.e. when BT_DIS = 0. * * RST_FF reset command fifo to make it empty, any command inflight is not aborted before reseting * the fifo. The fifo comes up empty at the end of power on reset. * * Like all NDF_... registers, 64-bit operations must be used to access this register */ typedef union { uint64_t u64; struct cvmx_ndf_misc_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_27_63 : 37; uint64_t nbr_hwm : 3; /**< Hi Water mark for NBR fifo or load/stores */ uint64_t wait_cnt : 6; /**< WAIT input filter count */ uint64_t fr_byt : 11; /**< Number of unfilled Command fifo bytes */ uint64_t rd_done : 1; /**< This W1C bit is set to 1 by HW when it completes command fifo read out, in response to RD_CMD */ uint64_t rd_val : 1; /**< This RO bit is set to 1 by HW when it reads next 8 bytes from Command fifo into the NDF_CMD csr SW reads NDF_CMD csr, HW clears this bit to 0 */ uint64_t rd_cmd : 1; /**< When 1, HW reads out contents of the Command fifo 8 bytes at a time into the NDF_CMD csr */ uint64_t bt_dma : 1; /**< When set to 1, boot time dma is enabled */ uint64_t bt_dis : 1; /**< When boot operation is over SW must set to 1 causes boot state mchines to sleep */ uint64_t ex_dis : 1; /**< When set to 1, suspends execution of commands at next command in the fifo. */ uint64_t rst_ff : 1; /**< 1=reset command fifo to make it empty, 0=normal operation */ #else uint64_t rst_ff : 1; uint64_t ex_dis : 1; uint64_t bt_dis : 1; uint64_t bt_dma : 1; uint64_t rd_cmd : 1; uint64_t rd_val : 1; uint64_t rd_done : 1; uint64_t fr_byt : 11; uint64_t wait_cnt : 6; uint64_t nbr_hwm : 3; uint64_t reserved_27_63 : 37; #endif } s; struct cvmx_ndf_misc_s cn52xx; } cvmx_ndf_misc_t; /** * cvmx_ndf_st_reg * * Notes: * This CSR aggregates all state machines used in nand flash controller for debug. * Like all NDF_... registers, 64-bit operations must be used to access this register */ typedef union { uint64_t u64; struct cvmx_ndf_st_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t exe_idle : 1; /**< Command Execution status 1=IDLE, 0=Busy 1 means execution of command sequence is complete and command fifo is empty */ uint64_t exe_sm : 4; /**< Command Execution State machine states */ uint64_t bt_sm : 4; /**< Boot load and Boot dma State machine states */ uint64_t rd_ff_bad : 1; /**< CMD fifo read back State machine in bad state */ uint64_t rd_ff : 2; /**< CMD fifo read back State machine states */ uint64_t main_bad : 1; /**< Main State machine in bad state */ uint64_t main_sm : 3; /**< Main State machine states */ #else uint64_t main_sm : 3; uint64_t main_bad : 1; uint64_t rd_ff : 2; uint64_t rd_ff_bad : 1; uint64_t bt_sm : 4; uint64_t exe_sm : 4; uint64_t exe_idle : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_ndf_st_reg_s cn52xx; } cvmx_ndf_st_reg_t; /** * cvmx_npei_bar1_index# * * Total Address is 16Kb; 0x0000 - 0x3fff, 0x000 - 0x7fe(Reg, every other 8B) * * General 5kb; 0x0000 - 0x13ff, 0x000 - 0x27e(Reg-General) * PktMem 10Kb; 0x1400 - 0x3bff, 0x280 - 0x77e(Reg-General-Packet) * Rsvd 1Kb; 0x3c00 - 0x3fff, 0x780 - 0x7fe(Reg-NCB Only Mode) * == NPEI_PKT_CNT_INT_ENB[PORT] * == NPEI_PKT_TIME_INT_ENB[PORT] * == NPEI_PKT_CNT_INT[PORT] * == NPEI_PKT_TIME_INT[PORT] * == NPEI_PKT_PCIE_PORT[PP] * == NPEI_PKT_SLIST_ROR[ROR] * == NPEI_PKT_SLIST_ROR[NSR] ? * == NPEI_PKT_SLIST_ES[ES] * == NPEI_PKTn_SLIST_BAOFF_DBELL[AOFF] * == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL] * == NPEI_PKTn_CNTS[CNT] * NPEI_CTL_STATUS[OUTn_ENB] == NPEI_PKT_OUT_ENB[ENB] * NPEI_BASE_ADDRESS_OUTPUTn[BADDR] == NPEI_PKTn_SLIST_BADDR[ADDR] * NPEI_DESC_OUTPUTn[SIZE] == NPEI_PKTn_SLIST_FIFO_RSIZE[RSIZE] * NPEI_Pn_DBPAIR_ADDR[NADDR] == NPEI_PKTn_SLIST_BADDR[ADDR] + NPEI_PKTn_SLIST_BAOFF_DBELL[AOFF] * NPEI_PKT_CREDITSn[PTR_CNT] == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL] * NPEI_P0_PAIR_CNTS[AVAIL] == NPEI_PKTn_SLIST_BAOFF_DBELL[DBELL] * NPEI_P0_PAIR_CNTS[FCNT] == * NPEI_PKTS_SENTn[PKT_CNT] == NPEI_PKTn_CNTS[CNT] * NPEI_OUTPUT_CONTROL[Pn_BMODE] == NPEI_PKT_OUT_BMODE[BMODE] * NPEI_PKT_CREDITSn[PKT_CNT] == NPEI_PKTn_CNTS[CNT] * NPEI_BUFF_SIZE_OUTPUTn[BSIZE] == NPEI_PKT_SLIST_ID_SIZE[BSIZE] * NPEI_BUFF_SIZE_OUTPUTn[ISIZE] == NPEI_PKT_SLIST_ID_SIZE[ISIZE] * NPEI_OUTPUT_CONTROL[On_CSRM] == NPEI_PKT_DPADDR[DPTR] & NPEI_PKT_OUT_USE_IPTR[PORT] * NPEI_OUTPUT_CONTROL[On_ES] == NPEI_PKT_DATA_OUT_ES[ES] * NPEI_OUTPUT_CONTROL[On_NS] == NPEI_PKT_DATA_OUT_NS[NSR] ? * NPEI_OUTPUT_CONTROL[On_RO] == NPEI_PKT_DATA_OUT_ROR[ROR] * NPEI_PKTS_SENT_INT_LEVn[PKT_CNT] == NPEI_PKT_INT_LEVELS[CNT] * NPEI_PKTS_SENT_TIMEn[PKT_TIME] == NPEI_PKT_INT_LEVELS[TIME] * NPEI_OUTPUT_CONTROL[IPTR_On] == NPEI_PKT_IPTR[IPTR] * NPEI_PCIE_PORT_OUTPUT[] == NPEI_PKT_PCIE_PORT[PP] * * NPEI_BAR1_INDEXX = NPEI BAR1 IndexX Register * * Contains address index and control bits for access to memory ranges of BAR-1. Index is build from supplied address [25:22]. * NPEI_BAR1_INDEX0 through NPEI_BAR1_INDEX15 is used for transactions orginating with PCIE-PORT0 and NPEI_BAR1_INDEX16 * through NPEI_BAR1_INDEX31 is used for transactions originating with PCIE-PORT1. */ typedef union { uint32_t u32; struct cvmx_npei_bar1_indexx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_18_31 : 14; uint32_t addr_idx : 14; /**< Address bits [35:22] sent to L2C */ uint32_t ca : 1; /**< Set '1' when access is not to be cached in L2. */ uint32_t end_swp : 2; /**< Endian Swap Mode */ uint32_t addr_v : 1; /**< Set '1' when the selected address range is valid. */ #else uint32_t addr_v : 1; uint32_t end_swp : 2; uint32_t ca : 1; uint32_t addr_idx : 14; uint32_t reserved_18_31 : 14; #endif } s; struct cvmx_npei_bar1_indexx_s cn52xx; struct cvmx_npei_bar1_indexx_s cn52xxp1; struct cvmx_npei_bar1_indexx_s cn56xx; struct cvmx_npei_bar1_indexx_s cn56xxp1; } cvmx_npei_bar1_indexx_t; /** * cvmx_npei_bist_status * * NPEI_BIST_STATUS = NPI's BIST Status Register * * Results from BIST runs of NPEI's memories. */ typedef union { uint64_t u64; struct cvmx_npei_bist_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t pkt_rdf : 1; /**< BIST Status for PKT Read FIFO */ uint64_t reserved_60_62 : 3; uint64_t pcr_gim : 1; /**< BIST Status for PKT Gather Instr MEM */ uint64_t pkt_pif : 1; /**< BIST Status for PKT INB FIFO */ uint64_t pcsr_int : 1; /**< BIST Status for PKT pout_int_bstatus */ uint64_t pcsr_im : 1; /**< BIST Status for PKT pcsr_instr_mem_bstatus */ uint64_t pcsr_cnt : 1; /**< BIST Status for PKT pin_cnt_bstatus */ uint64_t pcsr_id : 1; /**< BIST Status for PKT pcsr_in_done_bstatus */ uint64_t pcsr_sl : 1; /**< BIST Status for PKT pcsr_slist_bstatus */ uint64_t reserved_50_52 : 3; uint64_t pkt_ind : 1; /**< BIST Status for PKT Instruction Done MEM */ uint64_t pkt_slm : 1; /**< BIST Status for PKT SList MEM */ uint64_t reserved_36_47 : 12; uint64_t d0_pst : 1; /**< BIST Status for DMA0 Pcie Store */ uint64_t d1_pst : 1; /**< BIST Status for DMA1 Pcie Store */ uint64_t d2_pst : 1; /**< BIST Status for DMA2 Pcie Store */ uint64_t d3_pst : 1; /**< BIST Status for DMA3 Pcie Store */ uint64_t reserved_31_31 : 1; uint64_t n2p0_c : 1; /**< BIST Status for N2P Port0 Cmd */ uint64_t n2p0_o : 1; /**< BIST Status for N2P Port0 Data */ uint64_t n2p1_c : 1; /**< BIST Status for N2P Port1 Cmd */ uint64_t n2p1_o : 1; /**< BIST Status for N2P Port1 Data */ uint64_t cpl_p0 : 1; /**< BIST Status for CPL Port 0 */ uint64_t cpl_p1 : 1; /**< BIST Status for CPL Port 1 */ uint64_t p2n1_po : 1; /**< BIST Status for P2N Port1 P Order */ uint64_t p2n1_no : 1; /**< BIST Status for P2N Port1 N Order */ uint64_t p2n1_co : 1; /**< BIST Status for P2N Port1 C Order */ uint64_t p2n0_po : 1; /**< BIST Status for P2N Port0 P Order */ uint64_t p2n0_no : 1; /**< BIST Status for P2N Port0 N Order */ uint64_t p2n0_co : 1; /**< BIST Status for P2N Port0 C Order */ uint64_t p2n0_c0 : 1; /**< BIST Status for P2N Port0 C0 */ uint64_t p2n0_c1 : 1; /**< BIST Status for P2N Port0 C1 */ uint64_t p2n0_n : 1; /**< BIST Status for P2N Port0 N */ uint64_t p2n0_p0 : 1; /**< BIST Status for P2N Port0 P0 */ uint64_t p2n0_p1 : 1; /**< BIST Status for P2N Port0 P1 */ uint64_t p2n1_c0 : 1; /**< BIST Status for P2N Port1 C0 */ uint64_t p2n1_c1 : 1; /**< BIST Status for P2N Port1 C1 */ uint64_t p2n1_n : 1; /**< BIST Status for P2N Port1 N */ uint64_t p2n1_p0 : 1; /**< BIST Status for P2N Port1 P0 */ uint64_t p2n1_p1 : 1; /**< BIST Status for P2N Port1 P1 */ uint64_t csm0 : 1; /**< BIST Status for CSM0 */ uint64_t csm1 : 1; /**< BIST Status for CSM1 */ uint64_t dif0 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif1 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif2 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif3 : 1; /**< BIST Status for DMA Instr0 */ uint64_t reserved_2_2 : 1; uint64_t msi : 1; /**< BIST Status for MSI Memory Map */ uint64_t ncb_cmd : 1; /**< BIST Status for NCB Outbound Commands */ #else uint64_t ncb_cmd : 1; uint64_t msi : 1; uint64_t reserved_2_2 : 1; uint64_t dif3 : 1; uint64_t dif2 : 1; uint64_t dif1 : 1; uint64_t dif0 : 1; uint64_t csm1 : 1; uint64_t csm0 : 1; uint64_t p2n1_p1 : 1; uint64_t p2n1_p0 : 1; uint64_t p2n1_n : 1; uint64_t p2n1_c1 : 1; uint64_t p2n1_c0 : 1; uint64_t p2n0_p1 : 1; uint64_t p2n0_p0 : 1; uint64_t p2n0_n : 1; uint64_t p2n0_c1 : 1; uint64_t p2n0_c0 : 1; uint64_t p2n0_co : 1; uint64_t p2n0_no : 1; uint64_t p2n0_po : 1; uint64_t p2n1_co : 1; uint64_t p2n1_no : 1; uint64_t p2n1_po : 1; uint64_t cpl_p1 : 1; uint64_t cpl_p0 : 1; uint64_t n2p1_o : 1; uint64_t n2p1_c : 1; uint64_t n2p0_o : 1; uint64_t n2p0_c : 1; uint64_t reserved_31_31 : 1; uint64_t d3_pst : 1; uint64_t d2_pst : 1; uint64_t d1_pst : 1; uint64_t d0_pst : 1; uint64_t reserved_36_47 : 12; uint64_t pkt_slm : 1; uint64_t pkt_ind : 1; uint64_t reserved_50_52 : 3; uint64_t pcsr_sl : 1; uint64_t pcsr_id : 1; uint64_t pcsr_cnt : 1; uint64_t pcsr_im : 1; uint64_t pcsr_int : 1; uint64_t pkt_pif : 1; uint64_t pcr_gim : 1; uint64_t reserved_60_62 : 3; uint64_t pkt_rdf : 1; #endif } s; struct cvmx_npei_bist_status_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t pkt_rdf : 1; /**< BIST Status for PKT Read FIFO */ uint64_t reserved_60_62 : 3; uint64_t pcr_gim : 1; /**< BIST Status for PKT Gather Instr MEM */ uint64_t pkt_pif : 1; /**< BIST Status for PKT INB FIFO */ uint64_t pcsr_int : 1; /**< BIST Status for PKT OUTB Interrupt MEM */ uint64_t pcsr_im : 1; /**< BIST Status for PKT CSR Instr MEM */ uint64_t pcsr_cnt : 1; /**< BIST Status for PKT INB Count MEM */ uint64_t pcsr_id : 1; /**< BIST Status for PKT INB Instr Done MEM */ uint64_t pcsr_sl : 1; /**< BIST Status for PKT OUTB SLIST MEM */ uint64_t pkt_imem : 1; /**< BIST Status for PKT OUTB IFIFO */ uint64_t pkt_pfm : 1; /**< BIST Status for PKT Front MEM */ uint64_t pkt_pof : 1; /**< BIST Status for PKT OUTB FIFO */ uint64_t reserved_48_49 : 2; uint64_t pkt_pop0 : 1; /**< BIST Status for PKT OUTB Slist0 */ uint64_t pkt_pop1 : 1; /**< BIST Status for PKT OUTB Slist1 */ uint64_t d0_mem : 1; /**< BIST Status for DMA MEM 0 */ uint64_t d1_mem : 1; /**< BIST Status for DMA MEM 1 */ uint64_t d2_mem : 1; /**< BIST Status for DMA MEM 2 */ uint64_t d3_mem : 1; /**< BIST Status for DMA MEM 3 */ uint64_t d4_mem : 1; /**< BIST Status for DMA MEM 4 */ uint64_t ds_mem : 1; /**< BIST Status for DMA Memory */ uint64_t reserved_36_39 : 4; uint64_t d0_pst : 1; /**< BIST Status for DMA0 Pcie Store */ uint64_t d1_pst : 1; /**< BIST Status for DMA1 Pcie Store */ uint64_t d2_pst : 1; /**< BIST Status for DMA2 Pcie Store */ uint64_t d3_pst : 1; /**< BIST Status for DMA3 Pcie Store */ uint64_t d4_pst : 1; /**< BIST Status for DMA4 Pcie Store */ uint64_t n2p0_c : 1; /**< BIST Status for N2P Port0 Cmd */ uint64_t n2p0_o : 1; /**< BIST Status for N2P Port0 Data */ uint64_t n2p1_c : 1; /**< BIST Status for N2P Port1 Cmd */ uint64_t n2p1_o : 1; /**< BIST Status for N2P Port1 Data */ uint64_t cpl_p0 : 1; /**< BIST Status for CPL Port 0 */ uint64_t cpl_p1 : 1; /**< BIST Status for CPL Port 1 */ uint64_t p2n1_po : 1; /**< BIST Status for P2N Port1 P Order */ uint64_t p2n1_no : 1; /**< BIST Status for P2N Port1 N Order */ uint64_t p2n1_co : 1; /**< BIST Status for P2N Port1 C Order */ uint64_t p2n0_po : 1; /**< BIST Status for P2N Port0 P Order */ uint64_t p2n0_no : 1; /**< BIST Status for P2N Port0 N Order */ uint64_t p2n0_co : 1; /**< BIST Status for P2N Port0 C Order */ uint64_t p2n0_c0 : 1; /**< BIST Status for P2N Port0 C0 */ uint64_t p2n0_c1 : 1; /**< BIST Status for P2N Port0 C1 */ uint64_t p2n0_n : 1; /**< BIST Status for P2N Port0 N */ uint64_t p2n0_p0 : 1; /**< BIST Status for P2N Port0 P0 */ uint64_t p2n0_p1 : 1; /**< BIST Status for P2N Port0 P1 */ uint64_t p2n1_c0 : 1; /**< BIST Status for P2N Port1 C0 */ uint64_t p2n1_c1 : 1; /**< BIST Status for P2N Port1 C1 */ uint64_t p2n1_n : 1; /**< BIST Status for P2N Port1 N */ uint64_t p2n1_p0 : 1; /**< BIST Status for P2N Port1 P0 */ uint64_t p2n1_p1 : 1; /**< BIST Status for P2N Port1 P1 */ uint64_t csm0 : 1; /**< BIST Status for CSM0 */ uint64_t csm1 : 1; /**< BIST Status for CSM1 */ uint64_t dif0 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif1 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif2 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif3 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif4 : 1; /**< BIST Status for DMA Instr0 */ uint64_t msi : 1; /**< BIST Status for MSI Memory Map */ uint64_t ncb_cmd : 1; /**< BIST Status for NCB Outbound Commands */ #else uint64_t ncb_cmd : 1; uint64_t msi : 1; uint64_t dif4 : 1; uint64_t dif3 : 1; uint64_t dif2 : 1; uint64_t dif1 : 1; uint64_t dif0 : 1; uint64_t csm1 : 1; uint64_t csm0 : 1; uint64_t p2n1_p1 : 1; uint64_t p2n1_p0 : 1; uint64_t p2n1_n : 1; uint64_t p2n1_c1 : 1; uint64_t p2n1_c0 : 1; uint64_t p2n0_p1 : 1; uint64_t p2n0_p0 : 1; uint64_t p2n0_n : 1; uint64_t p2n0_c1 : 1; uint64_t p2n0_c0 : 1; uint64_t p2n0_co : 1; uint64_t p2n0_no : 1; uint64_t p2n0_po : 1; uint64_t p2n1_co : 1; uint64_t p2n1_no : 1; uint64_t p2n1_po : 1; uint64_t cpl_p1 : 1; uint64_t cpl_p0 : 1; uint64_t n2p1_o : 1; uint64_t n2p1_c : 1; uint64_t n2p0_o : 1; uint64_t n2p0_c : 1; uint64_t d4_pst : 1; uint64_t d3_pst : 1; uint64_t d2_pst : 1; uint64_t d1_pst : 1; uint64_t d0_pst : 1; uint64_t reserved_36_39 : 4; uint64_t ds_mem : 1; uint64_t d4_mem : 1; uint64_t d3_mem : 1; uint64_t d2_mem : 1; uint64_t d1_mem : 1; uint64_t d0_mem : 1; uint64_t pkt_pop1 : 1; uint64_t pkt_pop0 : 1; uint64_t reserved_48_49 : 2; uint64_t pkt_pof : 1; uint64_t pkt_pfm : 1; uint64_t pkt_imem : 1; uint64_t pcsr_sl : 1; uint64_t pcsr_id : 1; uint64_t pcsr_cnt : 1; uint64_t pcsr_im : 1; uint64_t pcsr_int : 1; uint64_t pkt_pif : 1; uint64_t pcr_gim : 1; uint64_t reserved_60_62 : 3; uint64_t pkt_rdf : 1; #endif } cn52xx; struct cvmx_npei_bist_status_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_46_63 : 18; uint64_t d0_mem0 : 1; /**< BIST Status for DMA0 Memory */ uint64_t d1_mem1 : 1; /**< BIST Status for DMA1 Memory */ uint64_t d2_mem2 : 1; /**< BIST Status for DMA2 Memory */ uint64_t d3_mem3 : 1; /**< BIST Status for DMA3 Memory */ uint64_t dr0_mem : 1; /**< BIST Status for DMA0 Store */ uint64_t d0_mem : 1; /**< BIST Status for DMA0 Memory */ uint64_t d1_mem : 1; /**< BIST Status for DMA1 Memory */ uint64_t d2_mem : 1; /**< BIST Status for DMA2 Memory */ uint64_t d3_mem : 1; /**< BIST Status for DMA3 Memory */ uint64_t dr1_mem : 1; /**< BIST Status for DMA1 Store */ uint64_t d0_pst : 1; /**< BIST Status for DMA0 Pcie Store */ uint64_t d1_pst : 1; /**< BIST Status for DMA1 Pcie Store */ uint64_t d2_pst : 1; /**< BIST Status for DMA2 Pcie Store */ uint64_t d3_pst : 1; /**< BIST Status for DMA3 Pcie Store */ uint64_t dr2_mem : 1; /**< BIST Status for DMA2 Store */ uint64_t n2p0_c : 1; /**< BIST Status for N2P Port0 Cmd */ uint64_t n2p0_o : 1; /**< BIST Status for N2P Port0 Data */ uint64_t n2p1_c : 1; /**< BIST Status for N2P Port1 Cmd */ uint64_t n2p1_o : 1; /**< BIST Status for N2P Port1 Data */ uint64_t cpl_p0 : 1; /**< BIST Status for CPL Port 0 */ uint64_t cpl_p1 : 1; /**< BIST Status for CPL Port 1 */ uint64_t p2n1_po : 1; /**< BIST Status for P2N Port1 P Order */ uint64_t p2n1_no : 1; /**< BIST Status for P2N Port1 N Order */ uint64_t p2n1_co : 1; /**< BIST Status for P2N Port1 C Order */ uint64_t p2n0_po : 1; /**< BIST Status for P2N Port0 P Order */ uint64_t p2n0_no : 1; /**< BIST Status for P2N Port0 N Order */ uint64_t p2n0_co : 1; /**< BIST Status for P2N Port0 C Order */ uint64_t p2n0_c0 : 1; /**< BIST Status for P2N Port0 C0 */ uint64_t p2n0_c1 : 1; /**< BIST Status for P2N Port0 C1 */ uint64_t p2n0_n : 1; /**< BIST Status for P2N Port0 N */ uint64_t p2n0_p0 : 1; /**< BIST Status for P2N Port0 P0 */ uint64_t p2n0_p1 : 1; /**< BIST Status for P2N Port0 P1 */ uint64_t p2n1_c0 : 1; /**< BIST Status for P2N Port1 C0 */ uint64_t p2n1_c1 : 1; /**< BIST Status for P2N Port1 C1 */ uint64_t p2n1_n : 1; /**< BIST Status for P2N Port1 N */ uint64_t p2n1_p0 : 1; /**< BIST Status for P2N Port1 P0 */ uint64_t p2n1_p1 : 1; /**< BIST Status for P2N Port1 P1 */ uint64_t csm0 : 1; /**< BIST Status for CSM0 */ uint64_t csm1 : 1; /**< BIST Status for CSM1 */ uint64_t dif0 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif1 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif2 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif3 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dr3_mem : 1; /**< BIST Status for DMA3 Store */ uint64_t msi : 1; /**< BIST Status for MSI Memory Map */ uint64_t ncb_cmd : 1; /**< BIST Status for NCB Outbound Commands */ #else uint64_t ncb_cmd : 1; uint64_t msi : 1; uint64_t dr3_mem : 1; uint64_t dif3 : 1; uint64_t dif2 : 1; uint64_t dif1 : 1; uint64_t dif0 : 1; uint64_t csm1 : 1; uint64_t csm0 : 1; uint64_t p2n1_p1 : 1; uint64_t p2n1_p0 : 1; uint64_t p2n1_n : 1; uint64_t p2n1_c1 : 1; uint64_t p2n1_c0 : 1; uint64_t p2n0_p1 : 1; uint64_t p2n0_p0 : 1; uint64_t p2n0_n : 1; uint64_t p2n0_c1 : 1; uint64_t p2n0_c0 : 1; uint64_t p2n0_co : 1; uint64_t p2n0_no : 1; uint64_t p2n0_po : 1; uint64_t p2n1_co : 1; uint64_t p2n1_no : 1; uint64_t p2n1_po : 1; uint64_t cpl_p1 : 1; uint64_t cpl_p0 : 1; uint64_t n2p1_o : 1; uint64_t n2p1_c : 1; uint64_t n2p0_o : 1; uint64_t n2p0_c : 1; uint64_t dr2_mem : 1; uint64_t d3_pst : 1; uint64_t d2_pst : 1; uint64_t d1_pst : 1; uint64_t d0_pst : 1; uint64_t dr1_mem : 1; uint64_t d3_mem : 1; uint64_t d2_mem : 1; uint64_t d1_mem : 1; uint64_t d0_mem : 1; uint64_t dr0_mem : 1; uint64_t d3_mem3 : 1; uint64_t d2_mem2 : 1; uint64_t d1_mem1 : 1; uint64_t d0_mem0 : 1; uint64_t reserved_46_63 : 18; #endif } cn52xxp1; struct cvmx_npei_bist_status_cn52xx cn56xx; struct cvmx_npei_bist_status_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_58_63 : 6; uint64_t pcsr_int : 1; /**< BIST Status for PKT pout_int_bstatus */ uint64_t pcsr_im : 1; /**< BIST Status for PKT pcsr_instr_mem_bstatus */ uint64_t pcsr_cnt : 1; /**< BIST Status for PKT pin_cnt_bstatus */ uint64_t pcsr_id : 1; /**< BIST Status for PKT pcsr_in_done_bstatus */ uint64_t pcsr_sl : 1; /**< BIST Status for PKT pcsr_slist_bstatus */ uint64_t pkt_pout : 1; /**< BIST Status for PKT OUT Count MEM */ uint64_t pkt_imem : 1; /**< BIST Status for PKT Instruction MEM */ uint64_t pkt_cntm : 1; /**< BIST Status for PKT Count MEM */ uint64_t pkt_ind : 1; /**< BIST Status for PKT Instruction Done MEM */ uint64_t pkt_slm : 1; /**< BIST Status for PKT SList MEM */ uint64_t pkt_odf : 1; /**< BIST Status for PKT Output Data FIFO */ uint64_t pkt_oif : 1; /**< BIST Status for PKT Output INFO FIFO */ uint64_t pkt_out : 1; /**< BIST Status for PKT Output FIFO */ uint64_t pkt_i0 : 1; /**< BIST Status for PKT Instr0 */ uint64_t pkt_i1 : 1; /**< BIST Status for PKT Instr1 */ uint64_t pkt_s0 : 1; /**< BIST Status for PKT Slist0 */ uint64_t pkt_s1 : 1; /**< BIST Status for PKT Slist1 */ uint64_t d0_mem : 1; /**< BIST Status for DMA0 Memory */ uint64_t d1_mem : 1; /**< BIST Status for DMA1 Memory */ uint64_t d2_mem : 1; /**< BIST Status for DMA2 Memory */ uint64_t d3_mem : 1; /**< BIST Status for DMA3 Memory */ uint64_t d4_mem : 1; /**< BIST Status for DMA4 Memory */ uint64_t d0_pst : 1; /**< BIST Status for DMA0 Pcie Store */ uint64_t d1_pst : 1; /**< BIST Status for DMA1 Pcie Store */ uint64_t d2_pst : 1; /**< BIST Status for DMA2 Pcie Store */ uint64_t d3_pst : 1; /**< BIST Status for DMA3 Pcie Store */ uint64_t d4_pst : 1; /**< BIST Status for DMA4 Pcie Store */ uint64_t n2p0_c : 1; /**< BIST Status for N2P Port0 Cmd */ uint64_t n2p0_o : 1; /**< BIST Status for N2P Port0 Data */ uint64_t n2p1_c : 1; /**< BIST Status for N2P Port1 Cmd */ uint64_t n2p1_o : 1; /**< BIST Status for N2P Port1 Data */ uint64_t cpl_p0 : 1; /**< BIST Status for CPL Port 0 */ uint64_t cpl_p1 : 1; /**< BIST Status for CPL Port 1 */ uint64_t p2n1_po : 1; /**< BIST Status for P2N Port1 P Order */ uint64_t p2n1_no : 1; /**< BIST Status for P2N Port1 N Order */ uint64_t p2n1_co : 1; /**< BIST Status for P2N Port1 C Order */ uint64_t p2n0_po : 1; /**< BIST Status for P2N Port0 P Order */ uint64_t p2n0_no : 1; /**< BIST Status for P2N Port0 N Order */ uint64_t p2n0_co : 1; /**< BIST Status for P2N Port0 C Order */ uint64_t p2n0_c0 : 1; /**< BIST Status for P2N Port0 C0 */ uint64_t p2n0_c1 : 1; /**< BIST Status for P2N Port0 C1 */ uint64_t p2n0_n : 1; /**< BIST Status for P2N Port0 N */ uint64_t p2n0_p0 : 1; /**< BIST Status for P2N Port0 P0 */ uint64_t p2n0_p1 : 1; /**< BIST Status for P2N Port0 P1 */ uint64_t p2n1_c0 : 1; /**< BIST Status for P2N Port1 C0 */ uint64_t p2n1_c1 : 1; /**< BIST Status for P2N Port1 C1 */ uint64_t p2n1_n : 1; /**< BIST Status for P2N Port1 N */ uint64_t p2n1_p0 : 1; /**< BIST Status for P2N Port1 P0 */ uint64_t p2n1_p1 : 1; /**< BIST Status for P2N Port1 P1 */ uint64_t csm0 : 1; /**< BIST Status for CSM0 */ uint64_t csm1 : 1; /**< BIST Status for CSM1 */ uint64_t dif0 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif1 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif2 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif3 : 1; /**< BIST Status for DMA Instr0 */ uint64_t dif4 : 1; /**< BIST Status for DMA Instr0 */ uint64_t msi : 1; /**< BIST Status for MSI Memory Map */ uint64_t ncb_cmd : 1; /**< BIST Status for NCB Outbound Commands */ #else uint64_t ncb_cmd : 1; uint64_t msi : 1; uint64_t dif4 : 1; uint64_t dif3 : 1; uint64_t dif2 : 1; uint64_t dif1 : 1; uint64_t dif0 : 1; uint64_t csm1 : 1; uint64_t csm0 : 1; uint64_t p2n1_p1 : 1; uint64_t p2n1_p0 : 1; uint64_t p2n1_n : 1; uint64_t p2n1_c1 : 1; uint64_t p2n1_c0 : 1; uint64_t p2n0_p1 : 1; uint64_t p2n0_p0 : 1; uint64_t p2n0_n : 1; uint64_t p2n0_c1 : 1; uint64_t p2n0_c0 : 1; uint64_t p2n0_co : 1; uint64_t p2n0_no : 1; uint64_t p2n0_po : 1; uint64_t p2n1_co : 1; uint64_t p2n1_no : 1; uint64_t p2n1_po : 1; uint64_t cpl_p1 : 1; uint64_t cpl_p0 : 1; uint64_t n2p1_o : 1; uint64_t n2p1_c : 1; uint64_t n2p0_o : 1; uint64_t n2p0_c : 1; uint64_t d4_pst : 1; uint64_t d3_pst : 1; uint64_t d2_pst : 1; uint64_t d1_pst : 1; uint64_t d0_pst : 1; uint64_t d4_mem : 1; uint64_t d3_mem : 1; uint64_t d2_mem : 1; uint64_t d1_mem : 1; uint64_t d0_mem : 1; uint64_t pkt_s1 : 1; uint64_t pkt_s0 : 1; uint64_t pkt_i1 : 1; uint64_t pkt_i0 : 1; uint64_t pkt_out : 1; uint64_t pkt_oif : 1; uint64_t pkt_odf : 1; uint64_t pkt_slm : 1; uint64_t pkt_ind : 1; uint64_t pkt_cntm : 1; uint64_t pkt_imem : 1; uint64_t pkt_pout : 1; uint64_t pcsr_sl : 1; uint64_t pcsr_id : 1; uint64_t pcsr_cnt : 1; uint64_t pcsr_im : 1; uint64_t pcsr_int : 1; uint64_t reserved_58_63 : 6; #endif } cn56xxp1; } cvmx_npei_bist_status_t; /** * cvmx_npei_bist_status2 * * NPEI_BIST_STATUS2 = NPI's BIST Status Register2 * * Results from BIST runs of NPEI's memories. */ typedef union { uint64_t u64; struct cvmx_npei_bist_status2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t prd_tag : 1; /**< BIST Status for DMA PCIE RD Tag MEM */ uint64_t prd_st0 : 1; /**< BIST Status for DMA PCIE RD state MEM 0 */ uint64_t prd_st1 : 1; /**< BIST Status for DMA PCIE RD state MEM 1 */ uint64_t prd_err : 1; /**< BIST Status for DMA PCIE RD ERR state MEM */ uint64_t nrd_st : 1; /**< BIST Status for DMA L2C RD state MEM */ uint64_t nwe_st : 1; /**< BIST Status for DMA L2C WR state MEM */ uint64_t nwe_wr0 : 1; /**< BIST Status for DMA L2C WR MEM 0 */ uint64_t nwe_wr1 : 1; /**< BIST Status for DMA L2C WR MEM 1 */ uint64_t pkt_rd : 1; /**< BIST Status for Inbound PKT MEM */ uint64_t psc_p0 : 1; /**< BIST Status for PSC TLP 0 MEM */ uint64_t psc_p1 : 1; /**< BIST Status for PSC TLP 1 MEM */ uint64_t pkt_gd : 1; /**< BIST Status for PKT OUTB Gather Data FIFO */ uint64_t pkt_gl : 1; /**< BIST Status for PKT_OUTB Gather List FIFO */ uint64_t pkt_blk : 1; /**< BIST Status for PKT OUTB Blocked FIFO */ #else uint64_t pkt_blk : 1; uint64_t pkt_gl : 1; uint64_t pkt_gd : 1; uint64_t psc_p1 : 1; uint64_t psc_p0 : 1; uint64_t pkt_rd : 1; uint64_t nwe_wr1 : 1; uint64_t nwe_wr0 : 1; uint64_t nwe_st : 1; uint64_t nrd_st : 1; uint64_t prd_err : 1; uint64_t prd_st1 : 1; uint64_t prd_st0 : 1; uint64_t prd_tag : 1; uint64_t reserved_14_63 : 50; #endif } s; struct cvmx_npei_bist_status2_s cn52xx; struct cvmx_npei_bist_status2_s cn56xx; } cvmx_npei_bist_status2_t; /** * cvmx_npei_ctl_port0 * * NPEI_CTL_PORT0 = NPEI's Control Port 0 * * Contains control for access for Port0 */ typedef union { uint64_t u64; struct cvmx_npei_ctl_port0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_21_63 : 43; uint64_t waitl_com : 1; /**< When set '1' casues the NPI to wait for a commit from the L2C before sending additional completions to the L2C from the PCIe. Set this for more conservative behavior. Clear this for more aggressive, higher-performance behavior */ uint64_t intd : 1; /**< When '0' Intd wire asserted. Before mapping. */ uint64_t intc : 1; /**< When '0' Intc wire asserted. Before mapping. */ uint64_t intb : 1; /**< When '0' Intb wire asserted. Before mapping. */ uint64_t inta : 1; /**< When '0' Inta wire asserted. Before mapping. */ uint64_t intd_map : 2; /**< Maps INTD to INTA(00), INTB(01), INTC(10) or INTD (11). */ uint64_t intc_map : 2; /**< Maps INTC to INTA(00), INTB(01), INTC(10) or INTD (11). */ uint64_t intb_map : 2; /**< Maps INTB to INTA(00), INTB(01), INTC(10) or INTD (11). */ uint64_t inta_map : 2; /**< Maps INTA to INTA(00), INTB(01), INTC(10) or INTD (11). */ uint64_t ctlp_ro : 1; /**< Relaxed ordering enable for Completion TLPS. */ uint64_t reserved_6_6 : 1; uint64_t ptlp_ro : 1; /**< Relaxed ordering enable for Posted TLPS. */ uint64_t bar2_enb : 1; /**< When set '1' BAR2 is enable and will respond when clear '0' BAR2 access will cause UR responses. */ uint64_t bar2_esx : 2; /**< Value will be XORed with pci-address[37:36] to determine the endian swap mode. */ uint64_t bar2_cax : 1; /**< Value will be XORed with pcie-address[38] to determine the L2 cache attribute. Not cached in L2 if XOR result is 1 */ uint64_t wait_com : 1; /**< When set '1' casues the NPI to wait for a commit from the L2C before sending additional stores to the L2C from the PCIe. Most applications will not notice a difference, so should not set this bit. Setting the bit is more conservative on ordering, lower performance */ #else uint64_t wait_com : 1; uint64_t bar2_cax : 1; uint64_t bar2_esx : 2; uint64_t bar2_enb : 1; uint64_t ptlp_ro : 1; uint64_t reserved_6_6 : 1; uint64_t ctlp_ro : 1; uint64_t inta_map : 2; uint64_t intb_map : 2; uint64_t intc_map : 2; uint64_t intd_map : 2; uint64_t inta : 1; uint64_t intb : 1; uint64_t intc : 1; uint64_t intd : 1; uint64_t waitl_com : 1; uint64_t reserved_21_63 : 43; #endif } s; struct cvmx_npei_ctl_port0_s cn52xx; struct cvmx_npei_ctl_port0_s cn52xxp1; struct cvmx_npei_ctl_port0_s cn56xx; struct cvmx_npei_ctl_port0_s cn56xxp1; } cvmx_npei_ctl_port0_t; /** * cvmx_npei_ctl_port1 * * NPEI_CTL_PORT1 = NPEI's Control Port1 * * Contains control for access for Port1 */ typedef union { uint64_t u64; struct cvmx_npei_ctl_port1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_21_63 : 43; uint64_t waitl_com : 1; /**< When set '1' casues the NPI to wait for a commit from the L2C before sending additional completions to the L2C from the PCIe. Set this for more conservative behavior. Clear this for more aggressive, higher-performance */ uint64_t intd : 1; /**< When '0' Intd wire asserted. Before mapping. */ uint64_t intc : 1; /**< When '0' Intc wire asserted. Before mapping. */ uint64_t intb : 1; /**< When '0' Intv wire asserted. Before mapping. */ uint64_t inta : 1; /**< When '0' Inta wire asserted. Before mapping. */ uint64_t intd_map : 2; /**< Maps INTD to INTA(00), INTB(01), INTC(10) or INTD (11). */ uint64_t intc_map : 2; /**< Maps INTC to INTA(00), INTB(01), INTC(10) or INTD (11). */ uint64_t intb_map : 2; /**< Maps INTB to INTA(00), INTB(01), INTC(10) or INTD (11). */ uint64_t inta_map : 2; /**< Maps INTA to INTA(00), INTB(01), INTC(10) or INTD (11). */ uint64_t ctlp_ro : 1; /**< Relaxed ordering enable for Completion TLPS. */ uint64_t reserved_6_6 : 1; uint64_t ptlp_ro : 1; /**< Relaxed ordering enable for Posted TLPS. */ uint64_t bar2_enb : 1; /**< When set '1' BAR2 is enable and will respond when clear '0' BAR2 access will cause UR responses. */ uint64_t bar2_esx : 2; /**< Value will be XORed with pci-address[37:36] to determine the endian swap mode. */ uint64_t bar2_cax : 1; /**< Value will be XORed with pcie-address[38] to determine the L2 cache attribute. Not cached in L2 if XOR result is 1 */ uint64_t wait_com : 1; /**< When set '1' casues the NPI to wait for a commit from the L2C before sending additional stores to the L2C from the PCIe. Most applications will not notice a difference, so should not set this bit. Setting the bit is more conservative on ordering, lower performance */ #else uint64_t wait_com : 1; uint64_t bar2_cax : 1; uint64_t bar2_esx : 2; uint64_t bar2_enb : 1; uint64_t ptlp_ro : 1; uint64_t reserved_6_6 : 1; uint64_t ctlp_ro : 1; uint64_t inta_map : 2; uint64_t intb_map : 2; uint64_t intc_map : 2; uint64_t intd_map : 2; uint64_t inta : 1; uint64_t intb : 1; uint64_t intc : 1; uint64_t intd : 1; uint64_t waitl_com : 1; uint64_t reserved_21_63 : 43; #endif } s; struct cvmx_npei_ctl_port1_s cn52xx; struct cvmx_npei_ctl_port1_s cn52xxp1; struct cvmx_npei_ctl_port1_s cn56xx; struct cvmx_npei_ctl_port1_s cn56xxp1; } cvmx_npei_ctl_port1_t; /** * cvmx_npei_ctl_status * * NPEI_CTL_STATUS = NPEI Control Status Register * * Contains control and status for NPEI. Writes to this register are not oSrdered with writes/reads to the PCIe Memory space. * To ensure that a write has completed the user must read the register before making an access(i.e. PCIe memory space) * that requires the value of this register to be updated. */ typedef union { uint64_t u64; struct cvmx_npei_ctl_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_44_63 : 20; uint64_t p1_ntags : 6; /**< Number of tags avaiable for PCIe Port1. In RC mode 1 tag is needed for each outbound TLP that requires a CPL TLP. In Endpoint mode the number of tags required for a TLP request is 1 per 64-bytes of CPL data + 1. This field should only be written as part of reset sequence, before issuing any reads, CFGs, or IO transactions from the core(s). */ uint64_t p0_ntags : 6; /**< Number of tags avaiable for PCIe Port0. In RC mode 1 tag is needed for each outbound TLP that requires a CPL TLP. In Endpoint mode the number of tags required for a TLP request is 1 per 64-bytes of CPL data + 1. This field should only be written as part of reset sequence, before issuing any reads, CFGs, or IO transactions from the core(s). */ uint64_t cfg_rtry : 16; /**< The time x 0x10000 in core clocks to wait for a CPL to a CFG RD that does not carry a Retry Status. Until such time that the timeout occurs and Retry Status is received for a CFG RD, the Read CFG Read will be resent. A value of 0 disables retries and treats a CPL Retry as a CPL UR. */ uint64_t ring_en : 1; /**< When '0' forces "relative Q position" received from PKO to be zero, and replicates the back- pressure indication for the first ring attached to a PKO port across all the rings attached to a PKO port. When '0', only rings 0-3 can be used. */ uint64_t lnk_rst : 1; /**< Set when PCIe Core 0 request a link reset due to link down state. This bit is only reset on raw reset so it can be read for state to determine if a reset occured. Bit is cleared when a '1' is written to this field. */ uint64_t arb : 1; /**< PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */ uint64_t pkt_bp : 4; /**< Unused */ uint64_t host_mode : 1; /**< Host mode */ uint64_t chip_rev : 8; /**< The chip revision. */ #else uint64_t chip_rev : 8; uint64_t host_mode : 1; uint64_t pkt_bp : 4; uint64_t arb : 1; uint64_t lnk_rst : 1; uint64_t ring_en : 1; uint64_t cfg_rtry : 16; uint64_t p0_ntags : 6; uint64_t p1_ntags : 6; uint64_t reserved_44_63 : 20; #endif } s; struct cvmx_npei_ctl_status_s cn52xx; struct cvmx_npei_ctl_status_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_44_63 : 20; uint64_t p1_ntags : 6; /**< Number of tags avaiable for PCIe Port1. In RC mode 1 tag is needed for each outbound TLP that requires a CPL TLP. In Endpoint mode the number of tags required for a TLP request is 1 per 64-bytes of CPL data + 1. This field should only be written as part of reset sequence, before issuing any reads, CFGs, or IO transactions from the core(s). */ uint64_t p0_ntags : 6; /**< Number of tags avaiable for PCIe Port0. In RC mode 1 tag is needed for each outbound TLP that requires a CPL TLP. In Endpoint mode the number of tags required for a TLP request is 1 per 64-bytes of CPL data + 1. This field should only be written as part of reset sequence, before issuing any reads, CFGs, or IO transactions from the core(s). */ uint64_t cfg_rtry : 16; /**< The time x 0x10000 in core clocks to wait for a CPL to a CFG RD that does not carry a Retry Status. Until such time that the timeout occurs and Retry Status is received for a CFG RD, the Read CFG Read will be resent. A value of 0 disables retries and treats a CPL Retry as a CPL UR. */ uint64_t reserved_15_15 : 1; uint64_t lnk_rst : 1; /**< Set when PCIe Core 0 request a link reset due to link down state. This bit is only reset on raw reset so it can be read for state to determine if a reset occured. Bit is cleared when a '1' is written to this field. */ uint64_t arb : 1; /**< PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */ uint64_t reserved_9_12 : 4; uint64_t host_mode : 1; /**< Host mode */ uint64_t chip_rev : 8; /**< The chip revision. */ #else uint64_t chip_rev : 8; uint64_t host_mode : 1; uint64_t reserved_9_12 : 4; uint64_t arb : 1; uint64_t lnk_rst : 1; uint64_t reserved_15_15 : 1; uint64_t cfg_rtry : 16; uint64_t p0_ntags : 6; uint64_t p1_ntags : 6; uint64_t reserved_44_63 : 20; #endif } cn52xxp1; struct cvmx_npei_ctl_status_s cn56xx; struct cvmx_npei_ctl_status_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_15_63 : 49; uint64_t lnk_rst : 1; /**< Set when PCIe Core 0 request a link reset due to link down state. This bit is only reset on raw reset so it can be read for state to determine if a reset occured. Bit is cleared when a '1' is written to this field. */ uint64_t arb : 1; /**< PCIe switch arbitration mode. '0' == fixed priority NPEI, PCIe0, then PCIe1. '1' == round robin. */ uint64_t pkt_bp : 4; /**< Unused */ uint64_t host_mode : 1; /**< Host mode */ uint64_t chip_rev : 8; /**< The chip revision. */ #else uint64_t chip_rev : 8; uint64_t host_mode : 1; uint64_t pkt_bp : 4; uint64_t arb : 1; uint64_t lnk_rst : 1; uint64_t reserved_15_63 : 49; #endif } cn56xxp1; } cvmx_npei_ctl_status_t; /** * cvmx_npei_ctl_status2 * * NPEI_CTL_STATUS2 = NPEI's Control Status2 Register * * Contains control and status for NPEI. * Writes to this register are not ordered with writes/reads to the PCI Memory space. * To ensure that a write has completed the user must read the register before * making an access(i.e. PCI memory space) that requires the value of this register to be updated. */ typedef union { uint64_t u64; struct cvmx_npei_ctl_status2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t mps : 1; /**< Max Payload Size 0 = 128B 1 = 256B Note: PCIE*_CFG030[MPS] must be set to the same value for proper function. */ uint64_t mrrs : 3; /**< Max Read Request Size 0 = 128B 1 = 256B 2 = 512B 3 = 1024B 4 = 2048B 5 = 4096B Note: This field must not exceed the desired max read request size. This means this field should not exceed PCIE*_CFG030[MRRS]. */ uint64_t c1_w_flt : 1; /**< When '1' enables the window filter for reads and writes using the window registers. PCIE-Port1. Unfilter writes are: MIO, SubId0 MIO, SubId7 NPEI, SubId0 NPEI, SubId7 POW, SubId7 IPD, SubId7 USBN0, SubId7 Unfiltered Reads are: MIO, SubId0 MIO, SubId7 NPEI, SubId0 NPEI, SubId7 POW, SubId1 POW, SubId2 POW, SubId3 POW, SubId7 IPD, SubId7 USBN0, SubId7 */ uint64_t c0_w_flt : 1; /**< When '1' enables the window filter for reads and writes using the window registers. PCIE-Port0. Unfilter writes are: MIO, SubId0 MIO, SubId7 NPEI, SubId0 NPEI, SubId7 POW, SubId7 IPD, SubId7 USBN0, SubId7 Unfiltered Reads are: MIO, SubId0 MIO, SubId7 NPEI, SubId0 NPEI, SubId7 POW, SubId1 POW, SubId2 POW, SubId3 POW, SubId7 IPD, SubId7 USBN0, SubId7 */ uint64_t c1_b1_s : 3; /**< Pcie-Port1, Bar1 Size. 1 == 64MB, 2 == 128MB, 3 == 256MB, 4 == 512MB, 5 == 1024MB, 6 == 2048MB, 0 and 7 are reserved. */ uint64_t c0_b1_s : 3; /**< Pcie-Port0, Bar1 Size. 1 == 64MB, 2 == 128MB, 3 == 256MB, 4 == 512MB, 5 == 1024MB, 6 == 2048MB, 0 and 7 are reserved. */ uint64_t c1_wi_d : 1; /**< When set '1' disables access to the Window Registers from the PCIe-Port1. */ uint64_t c1_b0_d : 1; /**< When set '1' disables access from PCIe-Port1 to BAR-0 address offsets: Less Than 0x270, Greater than 0x270 AND less than 0x0520, 0x3BC0, 0x3CD0. */ uint64_t c0_wi_d : 1; /**< When set '1' disables access to the Window Registers from the PCIe-Port0. */ uint64_t c0_b0_d : 1; /**< When set '1' disables access from PCIe-Port0 to BAR-0 address offsets: Less Than 0x270, Greater than 0x270 AND less than 0x0520, 0x3BC0, 0x3CD0. */ #else uint64_t c0_b0_d : 1; uint64_t c0_wi_d : 1; uint64_t c1_b0_d : 1; uint64_t c1_wi_d : 1; uint64_t c0_b1_s : 3; uint64_t c1_b1_s : 3; uint64_t c0_w_flt : 1; uint64_t c1_w_flt : 1; uint64_t mrrs : 3; uint64_t mps : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_npei_ctl_status2_s cn52xx; struct cvmx_npei_ctl_status2_s cn52xxp1; struct cvmx_npei_ctl_status2_s cn56xx; struct cvmx_npei_ctl_status2_s cn56xxp1; } cvmx_npei_ctl_status2_t; /** * cvmx_npei_data_out_cnt * * NPEI_DATA_OUT_CNT = NPEI DATA OUT COUNT * * The EXEC data out fifo-count and the data unload counter. */ typedef union { uint64_t u64; struct cvmx_npei_data_out_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_44_63 : 20; uint64_t p1_ucnt : 16; /**< PCIE-Port1 Fifo Unload Count. This counter is incremented by '1' every time a word is removed from the Data Out FIFO, whose count is shown in P0_FCNT. */ uint64_t p1_fcnt : 6; /**< PCIE-Port1 Data Out Fifo Count. Number of address data words to be sent out the PCIe port presently buffered in the FIFO. */ uint64_t p0_ucnt : 16; /**< PCIE-Port0 Fifo Unload Count. This counter is incremented by '1' every time a word is removed from the Data Out FIFO, whose count is shown in P0_FCNT. */ uint64_t p0_fcnt : 6; /**< PCIE-Port0 Data Out Fifo Count. Number of address data words to be sent out the PCIe port presently buffered in the FIFO. */ #else uint64_t p0_fcnt : 6; uint64_t p0_ucnt : 16; uint64_t p1_fcnt : 6; uint64_t p1_ucnt : 16; uint64_t reserved_44_63 : 20; #endif } s; struct cvmx_npei_data_out_cnt_s cn52xx; struct cvmx_npei_data_out_cnt_s cn52xxp1; struct cvmx_npei_data_out_cnt_s cn56xx; struct cvmx_npei_data_out_cnt_s cn56xxp1; } cvmx_npei_data_out_cnt_t; /** * cvmx_npei_dbg_data * * NPEI_DBG_DATA = NPEI Debug Data Register * * Value returned on the debug-data lines from the RSLs */ typedef union { uint64_t u64; struct cvmx_npei_dbg_data_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t qlm0_rev_lanes : 1; /**< Lane reversal for PCIe port 0 */ uint64_t reserved_25_26 : 2; uint64_t qlm1_spd : 2; /**< Sets the QLM1 frequency 0=1.25 Gbaud 1=2.5 Gbaud 2=3.125 Gbaud 3=3.75 Gbaud */ uint64_t c_mul : 5; /**< PLL_MUL pins sampled at DCOK assertion Core frequency = 50MHz*C_MUL */ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the debug select value. */ uint64_t data : 17; /**< Value on the debug data lines. */ #else uint64_t data : 17; uint64_t dsel_ext : 1; uint64_t c_mul : 5; uint64_t qlm1_spd : 2; uint64_t reserved_25_26 : 2; uint64_t qlm0_rev_lanes : 1; uint64_t reserved_28_63 : 36; #endif } s; struct cvmx_npei_dbg_data_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t qlm0_link_width : 1; /**< Link width of PCIe port 0 0 = PCIe port 0 is 2 lanes, 2 lane PCIe port 1 exists 1 = PCIe port 0 is 4 lanes, PCIe port 1 does not exist */ uint64_t qlm0_rev_lanes : 1; /**< Lane reversal for PCIe port 0 */ uint64_t qlm1_mode : 2; /**< Sets the QLM1 Mode 0=Reserved 1=XAUI 2=SGMII 3=PICMG */ uint64_t qlm1_spd : 2; /**< Sets the QLM1 frequency 0=1.25 Gbaud 1=2.5 Gbaud 2=3.125 Gbaud 3=3.75 Gbaud */ uint64_t c_mul : 5; /**< PLL_MUL pins sampled at DCOK assertion Core frequency = 50MHz*C_MUL */ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the debug select value. */ uint64_t data : 17; /**< Value on the debug data lines. */ #else uint64_t data : 17; uint64_t dsel_ext : 1; uint64_t c_mul : 5; uint64_t qlm1_spd : 2; uint64_t qlm1_mode : 2; uint64_t qlm0_rev_lanes : 1; uint64_t qlm0_link_width : 1; uint64_t reserved_29_63 : 35; #endif } cn52xx; struct cvmx_npei_dbg_data_cn52xx cn52xxp1; struct cvmx_npei_dbg_data_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t qlm2_rev_lanes : 1; /**< Lane reversal for PCIe port 1 */ uint64_t qlm0_rev_lanes : 1; /**< Lane reversal for PCIe port 0 */ uint64_t qlm3_spd : 2; /**< Sets the QLM3 frequency 0=1.25 Gbaud 1=2.5 Gbaud 2=3.125 Gbaud 3=3.75 Gbaud */ uint64_t qlm1_spd : 2; /**< Sets the QLM1 frequency 0=1.25 Gbaud 1=2.5 Gbaud 2=3.125 Gbaud 3=3.75 Gbaud */ uint64_t c_mul : 5; /**< PLL_MUL pins sampled at DCOK assertion Core frequency = 50MHz*C_MUL */ uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the debug select value. */ uint64_t data : 17; /**< Value on the debug data lines. */ #else uint64_t data : 17; uint64_t dsel_ext : 1; uint64_t c_mul : 5; uint64_t qlm1_spd : 2; uint64_t qlm3_spd : 2; uint64_t qlm0_rev_lanes : 1; uint64_t qlm2_rev_lanes : 1; uint64_t reserved_29_63 : 35; #endif } cn56xx; struct cvmx_npei_dbg_data_cn56xx cn56xxp1; } cvmx_npei_dbg_data_t; /** * cvmx_npei_dbg_select * * NPEI_DBG_SELECT = Debug Select Register * * Contains the debug select value last written to the RSLs. */ typedef union { uint64_t u64; struct cvmx_npei_dbg_select_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t dbg_sel : 16; /**< When this register is written its value is sent to all RSLs. */ #else uint64_t dbg_sel : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_npei_dbg_select_s cn52xx; struct cvmx_npei_dbg_select_s cn52xxp1; struct cvmx_npei_dbg_select_s cn56xx; struct cvmx_npei_dbg_select_s cn56xxp1; } cvmx_npei_dbg_select_t; /** * cvmx_npei_dma#_counts * * NPEI_DMA[0..4]_COUNTS = DMA Instruction Counts * * Values for determing the number of instructions for DMA[0..4] in the NPEI. */ typedef union { uint64_t u64; struct cvmx_npei_dmax_counts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_39_63 : 25; uint64_t fcnt : 7; /**< Number of words in the Instruction FIFO. */ uint64_t dbell : 32; /**< Number of available words of Instructions to read. */ #else uint64_t dbell : 32; uint64_t fcnt : 7; uint64_t reserved_39_63 : 25; #endif } s; struct cvmx_npei_dmax_counts_s cn52xx; struct cvmx_npei_dmax_counts_s cn52xxp1; struct cvmx_npei_dmax_counts_s cn56xx; struct cvmx_npei_dmax_counts_s cn56xxp1; } cvmx_npei_dmax_counts_t; /** * cvmx_npei_dma#_dbell * * NPEI_DMA_DBELL[0..4] = DMA Door Bell * * The door bell register for DMA[0..4] queue. */ typedef union { uint32_t u32; struct cvmx_npei_dmax_dbell_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_16_31 : 16; uint32_t dbell : 16; /**< The value written to this register is added to the number of 8byte words to be read and processes for the low priority dma queue. */ #else uint32_t dbell : 16; uint32_t reserved_16_31 : 16; #endif } s; struct cvmx_npei_dmax_dbell_s cn52xx; struct cvmx_npei_dmax_dbell_s cn52xxp1; struct cvmx_npei_dmax_dbell_s cn56xx; struct cvmx_npei_dmax_dbell_s cn56xxp1; } cvmx_npei_dmax_dbell_t; /** * cvmx_npei_dma#_ibuff_saddr * * NPEI_DMA[0..4]_IBUFF_SADDR = DMA Instruction Buffer Starting Address * * The address to start reading Instructions from for DMA[0..4]. */ typedef union { uint64_t u64; struct cvmx_npei_dmax_ibuff_saddr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_37_63 : 27; uint64_t idle : 1; /**< DMA Engine IDLE state */ uint64_t saddr : 29; /**< The 128 byte aligned starting address to read the first instruction. SADDR is address bit 35:7 of the first instructions address. */ uint64_t reserved_0_6 : 7; #else uint64_t reserved_0_6 : 7; uint64_t saddr : 29; uint64_t idle : 1; uint64_t reserved_37_63 : 27; #endif } s; struct cvmx_npei_dmax_ibuff_saddr_s cn52xx; struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t saddr : 29; /**< The 128 byte aligned starting address to read the first instruction. SADDR is address bit 35:7 of the first instructions address. */ uint64_t reserved_0_6 : 7; #else uint64_t reserved_0_6 : 7; uint64_t saddr : 29; uint64_t reserved_36_63 : 28; #endif } cn52xxp1; struct cvmx_npei_dmax_ibuff_saddr_s cn56xx; struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 cn56xxp1; } cvmx_npei_dmax_ibuff_saddr_t; /** * cvmx_npei_dma#_naddr * * NPEI_DMA[0..4]_NADDR = DMA Next Ichunk Address * * Place NPEI will read the next Ichunk data from. This is valid when state is 0 */ typedef union { uint64_t u64; struct cvmx_npei_dmax_naddr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< The next L2C address to read DMA# instructions from. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_npei_dmax_naddr_s cn52xx; struct cvmx_npei_dmax_naddr_s cn52xxp1; struct cvmx_npei_dmax_naddr_s cn56xx; struct cvmx_npei_dmax_naddr_s cn56xxp1; } cvmx_npei_dmax_naddr_t; /** * cvmx_npei_dma0_int_level * * NPEI_DMA0_INT_LEVEL = NPEI DMA0 Interrupt Level * * Thresholds for DMA count and timer interrupts for DMA0. */ typedef union { uint64_t u64; struct cvmx_npei_dma0_int_level_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t time : 32; /**< Whenever the DMA_CNT0 timer exceeds this value, NPEI_INT_SUM[DTIME0] is set. The DMA_CNT0 timer increments every core clock whenever NPEI_DMA_CNTS[DMA0]!=0, and is cleared when NPEI_INT_SUM[DTIME0] is written with one. */ uint64_t cnt : 32; /**< Whenever NPEI_DMA_CNTS[DMA0] exceeds this value, NPEI_INT_SUM[DCNT0] is set. */ #else uint64_t cnt : 32; uint64_t time : 32; #endif } s; struct cvmx_npei_dma0_int_level_s cn52xx; struct cvmx_npei_dma0_int_level_s cn52xxp1; struct cvmx_npei_dma0_int_level_s cn56xx; struct cvmx_npei_dma0_int_level_s cn56xxp1; } cvmx_npei_dma0_int_level_t; /** * cvmx_npei_dma1_int_level * * NPEI_DMA1_INT_LEVEL = NPEI DMA1 Interrupt Level * * Thresholds for DMA count and timer interrupts for DMA1. */ typedef union { uint64_t u64; struct cvmx_npei_dma1_int_level_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t time : 32; /**< Whenever the DMA_CNT1 timer exceeds this value, NPEI_INT_SUM[DTIME1] is set. The DMA_CNT1 timer increments every core clock whenever NPEI_DMA_CNTS[DMA1]!=0, and is cleared when NPEI_INT_SUM[DTIME1] is written with one. */ uint64_t cnt : 32; /**< Whenever NPEI_DMA_CNTS[DMA1] exceeds this value, NPEI_INT_SUM[DCNT1] is set. */ #else uint64_t cnt : 32; uint64_t time : 32; #endif } s; struct cvmx_npei_dma1_int_level_s cn52xx; struct cvmx_npei_dma1_int_level_s cn52xxp1; struct cvmx_npei_dma1_int_level_s cn56xx; struct cvmx_npei_dma1_int_level_s cn56xxp1; } cvmx_npei_dma1_int_level_t; /** * cvmx_npei_dma_cnts * * NPEI_DMA_CNTS = NPEI DMA Count * * The DMA Count values for DMA0 and DMA1. */ typedef union { uint64_t u64; struct cvmx_npei_dma_cnts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t dma1 : 32; /**< The DMA counter 1. Writing this field will cause the written value to be subtracted from DMA1. SW should use a 4-byte write to access this field so as not to change the value of other fields in this register. HW will optionally increment this field after it completes an OUTBOUND or EXTERNAL-ONLY DMA instruction. These increments may cause interrupts. Refer to NPEI_DMA1_INT_LEVEL and NPEI_INT_SUM[DCNT1,DTIME1]. */ uint64_t dma0 : 32; /**< The DMA counter 0. Writing this field will cause the written value to be subtracted from DMA0. SW should use a 4-byte write to access this field so as not to change the value of other fields in this register. HW will optionally increment this field after it completes an OUTBOUND or EXTERNAL-ONLY DMA instruction. These increments may cause interrupts. Refer to NPEI_DMA0_INT_LEVEL and NPEI_INT_SUM[DCNT0,DTIME0]. */ #else uint64_t dma0 : 32; uint64_t dma1 : 32; #endif } s; struct cvmx_npei_dma_cnts_s cn52xx; struct cvmx_npei_dma_cnts_s cn52xxp1; struct cvmx_npei_dma_cnts_s cn56xx; struct cvmx_npei_dma_cnts_s cn56xxp1; } cvmx_npei_dma_cnts_t; /** * cvmx_npei_dma_control * * NPEI_DMA_CONTROL = DMA Control Register * * Controls operation of the DMA IN/OUT. */ typedef union { uint64_t u64; struct cvmx_npei_dma_control_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t p_32b_m : 1; /**< DMA PCIE 32-bit word read disable bit When 0, enable the feature */ uint64_t dma4_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t dma3_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t dma2_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t dma1_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t dma0_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t b0_lend : 1; /**< When set '1' and the NPEI is in the mode to write 0 to L2C memory when a DMA is done, the address to be written to will be treated as a Little Endian address. */ uint64_t dwb_denb : 1; /**< When set '1' the NPEI will send a value in the DWB field for a free page operation for the memory that contained the data. */ uint64_t dwb_ichk : 9; /**< When Instruction Chunks for DMA operations are freed this value is used for the DWB field of the operation. */ uint64_t fpa_que : 3; /**< The FPA queue that the instruction-chunk page will be returned to when used. */ uint64_t o_add1 : 1; /**< When set '1' 1 will be added to the DMA counters, if '0' then the number of bytes in the dma transfer will be added to the count register. */ uint64_t o_ro : 1; /**< Relaxed Ordering Mode for DMA. */ uint64_t o_ns : 1; /**< Nosnoop For DMA. */ uint64_t o_es : 2; /**< Endian Swap Mode for DMA. */ uint64_t o_mode : 1; /**< Select PCI_POINTER MODE to be used. '1' use pointer values for address and register values for RO, ES, and NS, '0' use register values for address and pointer values for RO, ES, and NS. */ uint64_t csize : 14; /**< The size in words of the DMA Instruction Chunk. This value should only be written once. After writing this value a new value will not be recognized until the end of the DMA I-Chunk is reached. */ #else uint64_t csize : 14; uint64_t o_mode : 1; uint64_t o_es : 2; uint64_t o_ns : 1; uint64_t o_ro : 1; uint64_t o_add1 : 1; uint64_t fpa_que : 3; uint64_t dwb_ichk : 9; uint64_t dwb_denb : 1; uint64_t b0_lend : 1; uint64_t dma0_enb : 1; uint64_t dma1_enb : 1; uint64_t dma2_enb : 1; uint64_t dma3_enb : 1; uint64_t dma4_enb : 1; uint64_t p_32b_m : 1; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_npei_dma_control_s cn52xx; struct cvmx_npei_dma_control_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t dma3_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t dma2_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t dma1_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t dma0_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t b0_lend : 1; /**< When set '1' and the NPEI is in the mode to write 0 to L2C memory when a DMA is done, the address to be written to will be treated as a Little Endian address. */ uint64_t dwb_denb : 1; /**< When set '1' the NPEI will send a value in the DWB field for a free page operation for the memory that contained the data. */ uint64_t dwb_ichk : 9; /**< When Instruction Chunks for DMA operations are freed this value is used for the DWB field of the operation. */ uint64_t fpa_que : 3; /**< The FPA queue that the instruction-chunk page will be returned to when used. */ uint64_t o_add1 : 1; /**< When set '1' 1 will be added to the DMA counters, if '0' then the number of bytes in the dma transfer will be added to the count register. */ uint64_t o_ro : 1; /**< Relaxed Ordering Mode for DMA. */ uint64_t o_ns : 1; /**< Nosnoop For DMA. */ uint64_t o_es : 2; /**< Endian Swap Mode for DMA. */ uint64_t o_mode : 1; /**< Select PCI_POINTER MODE to be used. '1' use pointer values for address and register values for RO, ES, and NS, '0' use register values for address and pointer values for RO, ES, and NS. */ uint64_t csize : 14; /**< The size in words of the DMA Instruction Chunk. This value should only be written once. After writing this value a new value will not be recognized until the end of the DMA I-Chunk is reached. */ #else uint64_t csize : 14; uint64_t o_mode : 1; uint64_t o_es : 2; uint64_t o_ns : 1; uint64_t o_ro : 1; uint64_t o_add1 : 1; uint64_t fpa_que : 3; uint64_t dwb_ichk : 9; uint64_t dwb_denb : 1; uint64_t b0_lend : 1; uint64_t dma0_enb : 1; uint64_t dma1_enb : 1; uint64_t dma2_enb : 1; uint64_t dma3_enb : 1; uint64_t reserved_38_63 : 26; #endif } cn52xxp1; struct cvmx_npei_dma_control_s cn56xx; struct cvmx_npei_dma_control_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_39_63 : 25; uint64_t dma4_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t dma3_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t dma2_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t dma1_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t dma0_enb : 1; /**< DMA# enable. Enables the operation of the DMA engine. After being enabled a DMA engine should not be dis-abled while processing instructions. */ uint64_t b0_lend : 1; /**< When set '1' and the NPEI is in the mode to write 0 to L2C memory when a DMA is done, the address to be written to will be treated as a Little Endian address. */ uint64_t dwb_denb : 1; /**< When set '1' the NPEI will send a value in the DWB field for a free page operation for the memory that contained the data. */ uint64_t dwb_ichk : 9; /**< When Instruction Chunks for DMA operations are freed this value is used for the DWB field of the operation. */ uint64_t fpa_que : 3; /**< The FPA queue that the instruction-chunk page will be returned to when used. */ uint64_t o_add1 : 1; /**< When set '1' 1 will be added to the DMA counters, if '0' then the number of bytes in the dma transfer will be added to the count register. */ uint64_t o_ro : 1; /**< Relaxed Ordering Mode for DMA. */ uint64_t o_ns : 1; /**< Nosnoop For DMA. */ uint64_t o_es : 2; /**< Endian Swap Mode for DMA. */ uint64_t o_mode : 1; /**< Select PCI_POINTER MODE to be used. '1' use pointer values for address and register values for RO, ES, and NS, '0' use register values for address and pointer values for RO, ES, and NS. */ uint64_t csize : 14; /**< The size in words of the DMA Instruction Chunk. This value should only be written once. After writing this value a new value will not be recognized until the end of the DMA I-Chunk is reached. */ #else uint64_t csize : 14; uint64_t o_mode : 1; uint64_t o_es : 2; uint64_t o_ns : 1; uint64_t o_ro : 1; uint64_t o_add1 : 1; uint64_t fpa_que : 3; uint64_t dwb_ichk : 9; uint64_t dwb_denb : 1; uint64_t b0_lend : 1; uint64_t dma0_enb : 1; uint64_t dma1_enb : 1; uint64_t dma2_enb : 1; uint64_t dma3_enb : 1; uint64_t dma4_enb : 1; uint64_t reserved_39_63 : 25; #endif } cn56xxp1; } cvmx_npei_dma_control_t; /** * cvmx_npei_dma_pcie_req_num * * NPEI_DMA_PCIE_REQ_NUM = NPEI DMA PCIE Outstanding Read Request Number * * Outstanding PCIE read request number for DMAs and Packet, maximum number is 16 */ typedef union { uint64_t u64; struct cvmx_npei_dma_pcie_req_num_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t dma_arb : 1; /**< DMA_PKT Read Request Arbitration - 1: DMA0-4 and PKT are round robin. i.e. DMA0-DMA1-DMA2-DMA3-DMA4-PKT... - 0: DMA0-4 are round robin, pkt gets selected half the time. i.e. DMA0-PKT-DMA1-PKT-DMA2-PKT-DMA3-PKT-DMA4-PKT... */ uint64_t reserved_53_62 : 10; uint64_t pkt_cnt : 5; /**< PKT outstanding PCIE Read Request Number for each PCIe port When PKT_CNT=x, for each PCIe port, the number of outstanding PCIe memory space reads by the PCIe packet input/output will not exceed x. Valid Number is between 1 and 16 */ uint64_t reserved_45_47 : 3; uint64_t dma4_cnt : 5; /**< DMA4 outstanding PCIE Read Request Number When DMA4_CNT=x, the number of outstanding PCIe memory space reads by the PCIe DMA engine 4 will not exceed x. Valid Number is between 1 and 16 */ uint64_t reserved_37_39 : 3; uint64_t dma3_cnt : 5; /**< DMA3 outstanding PCIE Read Request Number When DMA3_CNT=x, the number of outstanding PCIe memory space reads by the PCIe DMA engine 3 will not exceed x. Valid Number is between 1 and 16 */ uint64_t reserved_29_31 : 3; uint64_t dma2_cnt : 5; /**< DMA2 outstanding PCIE Read Request Number When DMA2_CNT=x, the number of outstanding PCIe memory space reads by the PCIe DMA engine 2 will not exceed x. Valid Number is between 1 and 16 */ uint64_t reserved_21_23 : 3; uint64_t dma1_cnt : 5; /**< DMA1 outstanding PCIE Read Request Number When DMA1_CNT=x, the number of outstanding PCIe memory space reads by the PCIe DMA engine 1 will not exceed x. Valid Number is between 1 and 16 */ uint64_t reserved_13_15 : 3; uint64_t dma0_cnt : 5; /**< DMA0 outstanding PCIE Read Request Number When DMA0_CNT=x, the number of outstanding PCIe memory space reads by the PCIe DMA engine 0 will not exceed x. Valid Number is between 1 and 16 */ uint64_t reserved_5_7 : 3; uint64_t dma_cnt : 5; /**< Total outstanding PCIE Read Request Number for each PCIe port When DMA_CNT=x, for each PCIe port, the total number of outstanding PCIe memory space reads by the PCIe DMA engines and packet input/output will not exceed x. Valid Number is between 1 and 16 */ #else uint64_t dma_cnt : 5; uint64_t reserved_5_7 : 3; uint64_t dma0_cnt : 5; uint64_t reserved_13_15 : 3; uint64_t dma1_cnt : 5; uint64_t reserved_21_23 : 3; uint64_t dma2_cnt : 5; uint64_t reserved_29_31 : 3; uint64_t dma3_cnt : 5; uint64_t reserved_37_39 : 3; uint64_t dma4_cnt : 5; uint64_t reserved_45_47 : 3; uint64_t pkt_cnt : 5; uint64_t reserved_53_62 : 10; uint64_t dma_arb : 1; #endif } s; struct cvmx_npei_dma_pcie_req_num_s cn52xx; struct cvmx_npei_dma_pcie_req_num_s cn56xx; } cvmx_npei_dma_pcie_req_num_t; /** * cvmx_npei_dma_state1 * * NPEI_DMA_STATE1 = NPI's DMA State 1 * * Results from DMA state register 1 */ typedef union { uint64_t u64; struct cvmx_npei_dma_state1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t d4_dwe : 8; /**< DMA4 PICe Write State */ uint64_t d3_dwe : 8; /**< DMA3 PICe Write State */ uint64_t d2_dwe : 8; /**< DMA2 PICe Write State */ uint64_t d1_dwe : 8; /**< DMA1 PICe Write State */ uint64_t d0_dwe : 8; /**< DMA0 PICe Write State */ #else uint64_t d0_dwe : 8; uint64_t d1_dwe : 8; uint64_t d2_dwe : 8; uint64_t d3_dwe : 8; uint64_t d4_dwe : 8; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_npei_dma_state1_s cn52xx; } cvmx_npei_dma_state1_t; /** * cvmx_npei_dma_state1_p1 * * NPEI_DMA_STATE1_P1 = NPEI DMA Request and Instruction State * * DMA engine Debug information. */ typedef union { uint64_t u64; struct cvmx_npei_dma_state1_p1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_60_63 : 4; uint64_t d0_difst : 7; /**< DMA engine 0 dif instruction read state */ uint64_t d1_difst : 7; /**< DMA engine 1 dif instruction read state */ uint64_t d2_difst : 7; /**< DMA engine 2 dif instruction read state */ uint64_t d3_difst : 7; /**< DMA engine 3 dif instruction read state */ uint64_t d4_difst : 7; /**< DMA engine 4 dif instruction read state */ uint64_t d0_reqst : 5; /**< DMA engine 0 request data state */ uint64_t d1_reqst : 5; /**< DMA engine 1 request data state */ uint64_t d2_reqst : 5; /**< DMA engine 2 request data state */ uint64_t d3_reqst : 5; /**< DMA engine 3 request data state */ uint64_t d4_reqst : 5; /**< DMA engine 4 request data state */ #else uint64_t d4_reqst : 5; uint64_t d3_reqst : 5; uint64_t d2_reqst : 5; uint64_t d1_reqst : 5; uint64_t d0_reqst : 5; uint64_t d4_difst : 7; uint64_t d3_difst : 7; uint64_t d2_difst : 7; uint64_t d1_difst : 7; uint64_t d0_difst : 7; uint64_t reserved_60_63 : 4; #endif } s; struct cvmx_npei_dma_state1_p1_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_60_63 : 4; uint64_t d0_difst : 7; /**< DMA engine 0 dif instruction read state */ uint64_t d1_difst : 7; /**< DMA engine 1 dif instruction read state */ uint64_t d2_difst : 7; /**< DMA engine 2 dif instruction read state */ uint64_t d3_difst : 7; /**< DMA engine 3 dif instruction read state */ uint64_t reserved_25_31 : 7; uint64_t d0_reqst : 5; /**< DMA engine 0 request data state */ uint64_t d1_reqst : 5; /**< DMA engine 1 request data state */ uint64_t d2_reqst : 5; /**< DMA engine 2 request data state */ uint64_t d3_reqst : 5; /**< DMA engine 3 request data state */ uint64_t reserved_0_4 : 5; #else uint64_t reserved_0_4 : 5; uint64_t d3_reqst : 5; uint64_t d2_reqst : 5; uint64_t d1_reqst : 5; uint64_t d0_reqst : 5; uint64_t reserved_25_31 : 7; uint64_t d3_difst : 7; uint64_t d2_difst : 7; uint64_t d1_difst : 7; uint64_t d0_difst : 7; uint64_t reserved_60_63 : 4; #endif } cn52xxp1; struct cvmx_npei_dma_state1_p1_s cn56xxp1; } cvmx_npei_dma_state1_p1_t; /** * cvmx_npei_dma_state2 * * NPEI_DMA_STATE2 = NPI's DMA State 2 * * Results from DMA state register 2 */ typedef union { uint64_t u64; struct cvmx_npei_dma_state2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t ndwe : 4; /**< DMA L2C Write State */ uint64_t reserved_21_23 : 3; uint64_t ndre : 5; /**< DMA L2C Read State */ uint64_t reserved_10_15 : 6; uint64_t prd : 10; /**< DMA PICe Read State */ #else uint64_t prd : 10; uint64_t reserved_10_15 : 6; uint64_t ndre : 5; uint64_t reserved_21_23 : 3; uint64_t ndwe : 4; uint64_t reserved_28_63 : 36; #endif } s; struct cvmx_npei_dma_state2_s cn52xx; } cvmx_npei_dma_state2_t; /** * cvmx_npei_dma_state2_p1 * * NPEI_DMA_STATE2_P1 = NPEI DMA Instruction Fetch State * * DMA engine Debug information. */ typedef union { uint64_t u64; struct cvmx_npei_dma_state2_p1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_45_63 : 19; uint64_t d0_dffst : 9; /**< DMA engine 0 dif instruction fetch state */ uint64_t d1_dffst : 9; /**< DMA engine 1 dif instruction fetch state */ uint64_t d2_dffst : 9; /**< DMA engine 2 dif instruction fetch state */ uint64_t d3_dffst : 9; /**< DMA engine 3 dif instruction fetch state */ uint64_t d4_dffst : 9; /**< DMA engine 4 dif instruction fetch state */ #else uint64_t d4_dffst : 9; uint64_t d3_dffst : 9; uint64_t d2_dffst : 9; uint64_t d1_dffst : 9; uint64_t d0_dffst : 9; uint64_t reserved_45_63 : 19; #endif } s; struct cvmx_npei_dma_state2_p1_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_45_63 : 19; uint64_t d0_dffst : 9; /**< DMA engine 0 dif instruction fetch state */ uint64_t d1_dffst : 9; /**< DMA engine 1 dif instruction fetch state */ uint64_t d2_dffst : 9; /**< DMA engine 2 dif instruction fetch state */ uint64_t d3_dffst : 9; /**< DMA engine 3 dif instruction fetch state */ uint64_t reserved_0_8 : 9; #else uint64_t reserved_0_8 : 9; uint64_t d3_dffst : 9; uint64_t d2_dffst : 9; uint64_t d1_dffst : 9; uint64_t d0_dffst : 9; uint64_t reserved_45_63 : 19; #endif } cn52xxp1; struct cvmx_npei_dma_state2_p1_s cn56xxp1; } cvmx_npei_dma_state2_p1_t; /** * cvmx_npei_dma_state3_p1 * * NPEI_DMA_STATE3_P1 = NPEI DMA DRE State * * DMA engine Debug information. */ typedef union { uint64_t u64; struct cvmx_npei_dma_state3_p1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_60_63 : 4; uint64_t d0_drest : 15; /**< DMA engine 0 dre state */ uint64_t d1_drest : 15; /**< DMA engine 1 dre state */ uint64_t d2_drest : 15; /**< DMA engine 2 dre state */ uint64_t d3_drest : 15; /**< DMA engine 3 dre state */ #else uint64_t d3_drest : 15; uint64_t d2_drest : 15; uint64_t d1_drest : 15; uint64_t d0_drest : 15; uint64_t reserved_60_63 : 4; #endif } s; struct cvmx_npei_dma_state3_p1_s cn52xxp1; struct cvmx_npei_dma_state3_p1_s cn56xxp1; } cvmx_npei_dma_state3_p1_t; /** * cvmx_npei_dma_state4_p1 * * NPEI_DMA_STATE4_P1 = NPEI DMA DWE State * * DMA engine Debug information. */ typedef union { uint64_t u64; struct cvmx_npei_dma_state4_p1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_52_63 : 12; uint64_t d0_dwest : 13; /**< DMA engine 0 dwe state */ uint64_t d1_dwest : 13; /**< DMA engine 1 dwe state */ uint64_t d2_dwest : 13; /**< DMA engine 2 dwe state */ uint64_t d3_dwest : 13; /**< DMA engine 3 dwe state */ #else uint64_t d3_dwest : 13; uint64_t d2_dwest : 13; uint64_t d1_dwest : 13; uint64_t d0_dwest : 13; uint64_t reserved_52_63 : 12; #endif } s; struct cvmx_npei_dma_state4_p1_s cn52xxp1; struct cvmx_npei_dma_state4_p1_s cn56xxp1; } cvmx_npei_dma_state4_p1_t; /** * cvmx_npei_dma_state5_p1 * * NPEI_DMA_STATE5_P1 = NPEI DMA DWE and DRE State * * DMA engine Debug information. */ typedef union { uint64_t u64; struct cvmx_npei_dma_state5_p1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t d4_drest : 15; /**< DMA engine 4 dre state */ uint64_t d4_dwest : 13; /**< DMA engine 4 dwe state */ #else uint64_t d4_dwest : 13; uint64_t d4_drest : 15; uint64_t reserved_28_63 : 36; #endif } s; struct cvmx_npei_dma_state5_p1_s cn56xxp1; } cvmx_npei_dma_state5_p1_t; /** * cvmx_npei_int_a_enb * * NPEI_INTERRUPT_A_ENB = NPI's Interrupt A Enable Register * * Used to allow the generation of interrupts (MSI/INTA) to the PCIe CoresUsed to enable the various interrupting conditions of NPEI */ typedef union { uint64_t u64; struct cvmx_npei_int_a_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t pout_err : 1; /**< Enables NPEI_INT_A_SUM[9] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t pin_bp : 1; /**< Enables NPEI_INT_A_SUM[8] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t p1_rdlk : 1; /**< Enables NPEI_INT_A_SUM[7] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t p0_rdlk : 1; /**< Enables NPEI_INT_A_SUM[6] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t pgl_err : 1; /**< Enables NPEI_INT_A_SUM[5] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t pdi_err : 1; /**< Enables NPEI_INT_A_SUM[4] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t pop_err : 1; /**< Enables NPEI_INT_A_SUM[3] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t pins_err : 1; /**< Enables NPEI_INT_A_SUM[2] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma1_cpl : 1; /**< Enables NPEI_INT_A_SUM[1] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma0_cpl : 1; /**< Enables NPEI_INT_A_SUM[0] to generate an interrupt to the PCIE core for MSI/inta. */ #else uint64_t dma0_cpl : 1; uint64_t dma1_cpl : 1; uint64_t pins_err : 1; uint64_t pop_err : 1; uint64_t pdi_err : 1; uint64_t pgl_err : 1; uint64_t p0_rdlk : 1; uint64_t p1_rdlk : 1; uint64_t pin_bp : 1; uint64_t pout_err : 1; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_npei_int_a_enb_s cn52xx; struct cvmx_npei_int_a_enb_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t dma1_cpl : 1; /**< Enables NPEI_INT_A_SUM[1] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma0_cpl : 1; /**< Enables NPEI_INT_A_SUM[0] to generate an interrupt to the PCIE core for MSI/inta. */ #else uint64_t dma0_cpl : 1; uint64_t dma1_cpl : 1; uint64_t reserved_2_63 : 62; #endif } cn52xxp1; struct cvmx_npei_int_a_enb_s cn56xx; } cvmx_npei_int_a_enb_t; /** * cvmx_npei_int_a_enb2 * * NPEI_INTERRUPT_A_ENB2 = NPEI's Interrupt A Enable2 Register * * Used to enable the various interrupting conditions of NPEI */ typedef union { uint64_t u64; struct cvmx_npei_int_a_enb2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t pout_err : 1; /**< Enables NPEI_INT_A_SUM[9] to generate an interrupt on the RSL. */ uint64_t pin_bp : 1; /**< Enables NPEI_INT_A_SUM[8] to generate an interrupt on the RSL. */ uint64_t p1_rdlk : 1; /**< Enables NPEI_INT_A_SUM[7] to generate an interrupt on the RSL. */ uint64_t p0_rdlk : 1; /**< Enables NPEI_INT_A_SUM[6] to generate an interrupt on the RSL. */ uint64_t pgl_err : 1; /**< Enables NPEI_INT_A_SUM[5] to generate an interrupt on the RSL. */ uint64_t pdi_err : 1; /**< Enables NPEI_INT_A_SUM[4] to generate an interrupt on the RSL. */ uint64_t pop_err : 1; /**< Enables NPEI_INT_A_SUM[3] to generate an interrupt on the RSL. */ uint64_t pins_err : 1; /**< Enables NPEI_INT_A_SUM[2] to generate an interrupt on the RSL. */ uint64_t dma1_cpl : 1; /**< Enables NPEI_INT_A_SUM[1] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma0_cpl : 1; /**< Enables NPEI_INT_A_SUM[0] to generate an interrupt to the PCIE core for MSI/inta. */ #else uint64_t dma0_cpl : 1; uint64_t dma1_cpl : 1; uint64_t pins_err : 1; uint64_t pop_err : 1; uint64_t pdi_err : 1; uint64_t pgl_err : 1; uint64_t p0_rdlk : 1; uint64_t p1_rdlk : 1; uint64_t pin_bp : 1; uint64_t pout_err : 1; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_npei_int_a_enb2_s cn52xx; struct cvmx_npei_int_a_enb2_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t dma1_cpl : 1; /**< Enables NPEI_INT_A_SUM[1] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma0_cpl : 1; /**< Enables NPEI_INT_A_SUM[0] to generate an interrupt to the PCIE core for MSI/inta. */ #else uint64_t dma0_cpl : 1; uint64_t dma1_cpl : 1; uint64_t reserved_2_63 : 62; #endif } cn52xxp1; struct cvmx_npei_int_a_enb2_s cn56xx; } cvmx_npei_int_a_enb2_t; /** * cvmx_npei_int_a_sum * * NPEI_INTERRUPT_A_SUM = NPI Interrupt A Summary Register * * Set when an interrupt condition occurs, write '1' to clear. When an interrupt bitin this register is set and * the cooresponding bit in the NPEI_INT_A_ENB register is set, then NPEI_INT_SUM[61] will be set. */ typedef union { uint64_t u64; struct cvmx_npei_int_a_sum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t pout_err : 1; /**< Set when PKO sends packet data with the error bit set. */ uint64_t pin_bp : 1; /**< Packet input count has exceeded the WMARK. See NPEI_PKT_IN_BP */ uint64_t p1_rdlk : 1; /**< PCIe port 1 received a read lock. */ uint64_t p0_rdlk : 1; /**< PCIe port 0 received a read lock. */ uint64_t pgl_err : 1; /**< When a read error occurs on a packet gather list read this bit is set. */ uint64_t pdi_err : 1; /**< When a read error occurs on a packet data read this bit is set. */ uint64_t pop_err : 1; /**< When a read error occurs on a packet scatter pointer pair this bit is set. */ uint64_t pins_err : 1; /**< When a read error occurs on a packet instruction this bit is set. */ uint64_t dma1_cpl : 1; /**< Set each time any PCIe DMA engine recieves a UR/CA response from PCIe Port 1 */ uint64_t dma0_cpl : 1; /**< Set each time any PCIe DMA engine recieves a UR/CA response from PCIe Port 0 */ #else uint64_t dma0_cpl : 1; uint64_t dma1_cpl : 1; uint64_t pins_err : 1; uint64_t pop_err : 1; uint64_t pdi_err : 1; uint64_t pgl_err : 1; uint64_t p0_rdlk : 1; uint64_t p1_rdlk : 1; uint64_t pin_bp : 1; uint64_t pout_err : 1; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_npei_int_a_sum_s cn52xx; struct cvmx_npei_int_a_sum_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t dma1_cpl : 1; /**< Set each time any PCIe DMA engine recieves a UR/CA response from PCIe Port 1 */ uint64_t dma0_cpl : 1; /**< Set each time any PCIe DMA engine recieves a UR/CA response from PCIe Port 0 */ #else uint64_t dma0_cpl : 1; uint64_t dma1_cpl : 1; uint64_t reserved_2_63 : 62; #endif } cn52xxp1; struct cvmx_npei_int_a_sum_s cn56xx; } cvmx_npei_int_a_sum_t; /** * cvmx_npei_int_enb * * NPEI_INTERRUPT_ENB = NPI's Interrupt Enable Register * * Used to allow the generation of interrupts (MSI/INTA) to the PCIe CoresUsed to enable the various interrupting conditions of NPI */ typedef union { uint64_t u64; struct cvmx_npei_int_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mio_inta : 1; /**< Enables NPEI_INT_SUM[63] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t reserved_62_62 : 1; uint64_t int_a : 1; /**< Enables NPEI_INT_SUM[61] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM[60] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM[59] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM[58] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM[57] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM[56] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM[55] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM[54] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM[53] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM[52] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM[51] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM[50] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM[49] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM[48] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM[47] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM[46] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM[45] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM[44] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM[43] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM[42] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM[41] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM[40] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM[39] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM[38] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM[37] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM[36] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM[35] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM[34] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM[33] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM[32] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM[31] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM[30] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t crs1_dr : 1; /**< Enables NPEI_INT_SUM[29] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM[28] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t crs1_er : 1; /**< Enables NPEI_INT_SUM[27] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM[26] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM[25] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM[24] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM[23] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t crs0_dr : 1; /**< Enables NPEI_INT_SUM[22] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM[21] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t crs0_er : 1; /**< Enables NPEI_INT_SUM[20] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM[19] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM[18] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM[17] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM[16] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM[15] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM[14] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM[13] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM[12] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM[11] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM[10] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM[9] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma4dbo : 1; /**< Enables NPEI_INT_SUM[8] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM[7] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM[6] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM[5] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM[4] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM[3] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM[2] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM[1] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t rml_rto : 1; /**< Enables NPEI_INT_SUM[0] to generate an interrupt to the PCIE core for MSI/inta. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t bar0_to : 1; uint64_t iob2big : 1; uint64_t dma0dbo : 1; uint64_t dma1dbo : 1; uint64_t dma2dbo : 1; uint64_t dma3dbo : 1; uint64_t dma4dbo : 1; uint64_t dma0fi : 1; uint64_t dma1fi : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t psldbof : 1; uint64_t pidbof : 1; uint64_t pcnt : 1; uint64_t ptime : 1; uint64_t c0_aeri : 1; uint64_t crs0_er : 1; uint64_t c0_se : 1; uint64_t crs0_dr : 1; uint64_t c0_wake : 1; uint64_t c0_pmei : 1; uint64_t c0_hpint : 1; uint64_t c1_aeri : 1; uint64_t crs1_er : 1; uint64_t c1_se : 1; uint64_t crs1_dr : 1; uint64_t c1_wake : 1; uint64_t c1_pmei : 1; uint64_t c1_hpint : 1; uint64_t c0_up_b0 : 1; uint64_t c0_up_b1 : 1; uint64_t c0_up_b2 : 1; uint64_t c0_up_wi : 1; uint64_t c0_up_bx : 1; uint64_t c0_un_b0 : 1; uint64_t c0_un_b1 : 1; uint64_t c0_un_b2 : 1; uint64_t c0_un_wi : 1; uint64_t c0_un_bx : 1; uint64_t c1_up_b0 : 1; uint64_t c1_up_b1 : 1; uint64_t c1_up_b2 : 1; uint64_t c1_up_wi : 1; uint64_t c1_up_bx : 1; uint64_t c1_un_b0 : 1; uint64_t c1_un_b1 : 1; uint64_t c1_un_b2 : 1; uint64_t c1_un_wi : 1; uint64_t c1_un_bx : 1; uint64_t c0_un_wf : 1; uint64_t c1_un_wf : 1; uint64_t c0_up_wf : 1; uint64_t c1_up_wf : 1; uint64_t c0_exc : 1; uint64_t c1_exc : 1; uint64_t c0_ldwn : 1; uint64_t c1_ldwn : 1; uint64_t int_a : 1; uint64_t reserved_62_62 : 1; uint64_t mio_inta : 1; #endif } s; struct cvmx_npei_int_enb_s cn52xx; struct cvmx_npei_int_enb_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mio_inta : 1; /**< Enables NPEI_INT_SUM[63] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t reserved_62_62 : 1; uint64_t int_a : 1; /**< Enables NPEI_INT_SUM[61] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM[60] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM[59] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM[58] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM[57] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM[56] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM[55] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM[54] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM[53] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM[52] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM[51] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM[50] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM[49] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM[48] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM[47] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM[46] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM[45] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM[44] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM[43] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM[42] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM[41] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM[40] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM[39] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM[38] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM[37] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM[36] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM[35] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM[34] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM[33] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM[32] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM[31] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM[30] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t crs1_dr : 1; /**< Enables NPEI_INT_SUM[29] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM[28] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t crs1_er : 1; /**< Enables NPEI_INT_SUM[27] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM[26] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM[25] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM[24] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM[23] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t crs0_dr : 1; /**< Enables NPEI_INT_SUM[22] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM[21] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t crs0_er : 1; /**< Enables NPEI_INT_SUM[20] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM[19] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM[18] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM[17] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM[16] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM[15] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM[14] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM[13] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM[12] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM[11] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM[10] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM[9] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t reserved_8_8 : 1; uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM[7] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM[6] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM[5] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM[4] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM[3] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM[2] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM[1] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t rml_rto : 1; /**< Enables NPEI_INT_SUM[0] to generate an interrupt to the PCIE core for MSI/inta. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t bar0_to : 1; uint64_t iob2big : 1; uint64_t dma0dbo : 1; uint64_t dma1dbo : 1; uint64_t dma2dbo : 1; uint64_t dma3dbo : 1; uint64_t reserved_8_8 : 1; uint64_t dma0fi : 1; uint64_t dma1fi : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t psldbof : 1; uint64_t pidbof : 1; uint64_t pcnt : 1; uint64_t ptime : 1; uint64_t c0_aeri : 1; uint64_t crs0_er : 1; uint64_t c0_se : 1; uint64_t crs0_dr : 1; uint64_t c0_wake : 1; uint64_t c0_pmei : 1; uint64_t c0_hpint : 1; uint64_t c1_aeri : 1; uint64_t crs1_er : 1; uint64_t c1_se : 1; uint64_t crs1_dr : 1; uint64_t c1_wake : 1; uint64_t c1_pmei : 1; uint64_t c1_hpint : 1; uint64_t c0_up_b0 : 1; uint64_t c0_up_b1 : 1; uint64_t c0_up_b2 : 1; uint64_t c0_up_wi : 1; uint64_t c0_up_bx : 1; uint64_t c0_un_b0 : 1; uint64_t c0_un_b1 : 1; uint64_t c0_un_b2 : 1; uint64_t c0_un_wi : 1; uint64_t c0_un_bx : 1; uint64_t c1_up_b0 : 1; uint64_t c1_up_b1 : 1; uint64_t c1_up_b2 : 1; uint64_t c1_up_wi : 1; uint64_t c1_up_bx : 1; uint64_t c1_un_b0 : 1; uint64_t c1_un_b1 : 1; uint64_t c1_un_b2 : 1; uint64_t c1_un_wi : 1; uint64_t c1_un_bx : 1; uint64_t c0_un_wf : 1; uint64_t c1_un_wf : 1; uint64_t c0_up_wf : 1; uint64_t c1_up_wf : 1; uint64_t c0_exc : 1; uint64_t c1_exc : 1; uint64_t c0_ldwn : 1; uint64_t c1_ldwn : 1; uint64_t int_a : 1; uint64_t reserved_62_62 : 1; uint64_t mio_inta : 1; #endif } cn52xxp1; struct cvmx_npei_int_enb_s cn56xx; struct cvmx_npei_int_enb_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mio_inta : 1; /**< Enables NPEI_INT_SUM[63] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t reserved_61_62 : 2; uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM[60] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM[59] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM[58] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM[57] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM[56] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM[55] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM[54] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM[53] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM[52] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM[51] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM[50] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM[49] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM[48] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM[47] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM[46] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM[45] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM[44] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM[43] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM[42] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM[41] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM[40] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM[39] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM[38] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM[37] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM[36] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM[35] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM[34] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM[33] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM[32] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM[31] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM[30] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t reserved_29_29 : 1; uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM[28] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t reserved_27_27 : 1; uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM[26] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM[25] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM[24] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM[23] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t reserved_22_22 : 1; uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM[21] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t reserved_20_20 : 1; uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM[19] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM[18] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM[17] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM[16] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM[15] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM[14] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM[13] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM[12] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM[11] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM[10] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM[9] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma4dbo : 1; /**< Enables NPEI_INT_SUM[8] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM[7] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM[6] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM[5] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM[4] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM[3] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM[2] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM[1] to generate an interrupt to the PCIE core for MSI/inta. */ uint64_t rml_rto : 1; /**< Enables NPEI_INT_SUM[0] to generate an interrupt to the PCIE core for MSI/inta. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t bar0_to : 1; uint64_t iob2big : 1; uint64_t dma0dbo : 1; uint64_t dma1dbo : 1; uint64_t dma2dbo : 1; uint64_t dma3dbo : 1; uint64_t dma4dbo : 1; uint64_t dma0fi : 1; uint64_t dma1fi : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t psldbof : 1; uint64_t pidbof : 1; uint64_t pcnt : 1; uint64_t ptime : 1; uint64_t c0_aeri : 1; uint64_t reserved_20_20 : 1; uint64_t c0_se : 1; uint64_t reserved_22_22 : 1; uint64_t c0_wake : 1; uint64_t c0_pmei : 1; uint64_t c0_hpint : 1; uint64_t c1_aeri : 1; uint64_t reserved_27_27 : 1; uint64_t c1_se : 1; uint64_t reserved_29_29 : 1; uint64_t c1_wake : 1; uint64_t c1_pmei : 1; uint64_t c1_hpint : 1; uint64_t c0_up_b0 : 1; uint64_t c0_up_b1 : 1; uint64_t c0_up_b2 : 1; uint64_t c0_up_wi : 1; uint64_t c0_up_bx : 1; uint64_t c0_un_b0 : 1; uint64_t c0_un_b1 : 1; uint64_t c0_un_b2 : 1; uint64_t c0_un_wi : 1; uint64_t c0_un_bx : 1; uint64_t c1_up_b0 : 1; uint64_t c1_up_b1 : 1; uint64_t c1_up_b2 : 1; uint64_t c1_up_wi : 1; uint64_t c1_up_bx : 1; uint64_t c1_un_b0 : 1; uint64_t c1_un_b1 : 1; uint64_t c1_un_b2 : 1; uint64_t c1_un_wi : 1; uint64_t c1_un_bx : 1; uint64_t c0_un_wf : 1; uint64_t c1_un_wf : 1; uint64_t c0_up_wf : 1; uint64_t c1_up_wf : 1; uint64_t c0_exc : 1; uint64_t c1_exc : 1; uint64_t c0_ldwn : 1; uint64_t c1_ldwn : 1; uint64_t reserved_61_62 : 2; uint64_t mio_inta : 1; #endif } cn56xxp1; } cvmx_npei_int_enb_t; /** * cvmx_npei_int_enb2 * * NPEI_INTERRUPT_ENB2 = NPI's Interrupt Enable2 Register * * Used to enable the various interrupting conditions of NPI */ typedef union { uint64_t u64; struct cvmx_npei_int_enb2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_62_63 : 2; uint64_t int_a : 1; /**< Enables NPEI_INT_SUM2[61] to generate an interrupt on the RSL. */ uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM[60] to generate an interrupt on the RSL. */ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM[59] to generate an interrupt on the RSL. */ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM[58] to generate an interrupt on the RSL. */ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM[57] to generate an interrupt on the RSL. */ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM[56] to generate an interrupt on the RSL. */ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM[55] to generate an interrupt on the RSL. */ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM[54] to generate an interrupt on the RSL. */ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM[53] to generate an interrupt on the RSL. */ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM[52] to generate an interrupt on the RSL. */ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM[51] to generate an interrupt on the RSL. */ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM[50] to generate an interrupt on the RSL. */ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM[49] to generate an interrupt on the RSL. */ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM[48] to generate an interrupt on the RSL. */ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM[47] to generate an interrupt on the RSL. */ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM[46] to generate an interrupt on the RSL. */ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM[45] to generate an interrupt on the RSL. */ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM[44] to generate an interrupt on the RSL. */ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM[43] to generate an interrupt on the RSL. */ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM[42] to generate an interrupt on the RSL. */ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM[41] to generate an interrupt on the RSL. */ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM[40] to generate an interrupt on the RSL. */ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM[39] to generate an interrupt on the RSL. */ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM[38] to generate an interrupt on the RSL. */ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM[37] to generate an interrupt on the RSL. */ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM[36] to generate an interrupt on the RSL. */ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM[35] to generate an interrupt on the RSL. */ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM[34] to generate an interrupt on the RSL. */ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM[33] to generate an interrupt on the RSL. */ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM[32] to generate an interrupt on the RSL. */ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM[31] to generate an interrupt on the RSL. */ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM[30] to generate an interrupt on the RSL. */ uint64_t crs1_dr : 1; /**< Enables NPEI_INT_SUM2[29] to generate an interrupt on the RSL. */ uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM[28] to generate an interrupt on the RSL. */ uint64_t crs1_er : 1; /**< Enables NPEI_INT_SUM2[27] to generate an interrupt on the RSL. */ uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM[26] to generate an interrupt on the RSL. */ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM[25] to generate an interrupt on the RSL. */ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM[24] to generate an interrupt on the RSL. */ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM[23] to generate an interrupt on the RSL. */ uint64_t crs0_dr : 1; /**< Enables NPEI_INT_SUM2[22] to generate an interrupt on the RSL. */ uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM[21] to generate an interrupt on the RSL. */ uint64_t crs0_er : 1; /**< Enables NPEI_INT_SUM2[20] to generate an interrupt on the RSL. */ uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM[19] to generate an interrupt on the RSL. */ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM[18] to generate an interrupt on the RSL. */ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM[17] to generate an interrupt on the RSL. */ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM[16] to generate an interrupt on the RSL. */ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM[15] to generate an interrupt on the RSL. */ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM[14] to generate an interrupt on the RSL. */ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM[13] to generate an interrupt on the RSL. */ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM[12] to generate an interrupt on the RSL. */ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM[11] to generate an interrupt on the RSL. */ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM[10] to generate an interrupt on the RSL. */ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM[9] to generate an interrupt on the RSL. */ uint64_t dma4dbo : 1; /**< Enables NPEI_INT_SUM[8] to generate an interrupt on the RSL. */ uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM[7] to generate an interrupt on the RSL. */ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM[6] to generate an interrupt on the RSL. */ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM[5] to generate an interrupt on the RSL. */ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM[4] to generate an interrupt on the RSL. */ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM[3] to generate an interrupt on the RSL. */ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM[2] to generate an interrupt on the RSL. */ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM[1] to generate an interrupt on the RSL. */ uint64_t rml_rto : 1; /**< Enables NPEI_INT_UM[0] to generate an interrupt on the RSL. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t bar0_to : 1; uint64_t iob2big : 1; uint64_t dma0dbo : 1; uint64_t dma1dbo : 1; uint64_t dma2dbo : 1; uint64_t dma3dbo : 1; uint64_t dma4dbo : 1; uint64_t dma0fi : 1; uint64_t dma1fi : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t psldbof : 1; uint64_t pidbof : 1; uint64_t pcnt : 1; uint64_t ptime : 1; uint64_t c0_aeri : 1; uint64_t crs0_er : 1; uint64_t c0_se : 1; uint64_t crs0_dr : 1; uint64_t c0_wake : 1; uint64_t c0_pmei : 1; uint64_t c0_hpint : 1; uint64_t c1_aeri : 1; uint64_t crs1_er : 1; uint64_t c1_se : 1; uint64_t crs1_dr : 1; uint64_t c1_wake : 1; uint64_t c1_pmei : 1; uint64_t c1_hpint : 1; uint64_t c0_up_b0 : 1; uint64_t c0_up_b1 : 1; uint64_t c0_up_b2 : 1; uint64_t c0_up_wi : 1; uint64_t c0_up_bx : 1; uint64_t c0_un_b0 : 1; uint64_t c0_un_b1 : 1; uint64_t c0_un_b2 : 1; uint64_t c0_un_wi : 1; uint64_t c0_un_bx : 1; uint64_t c1_up_b0 : 1; uint64_t c1_up_b1 : 1; uint64_t c1_up_b2 : 1; uint64_t c1_up_wi : 1; uint64_t c1_up_bx : 1; uint64_t c1_un_b0 : 1; uint64_t c1_un_b1 : 1; uint64_t c1_un_b2 : 1; uint64_t c1_un_wi : 1; uint64_t c1_un_bx : 1; uint64_t c0_un_wf : 1; uint64_t c1_un_wf : 1; uint64_t c0_up_wf : 1; uint64_t c1_up_wf : 1; uint64_t c0_exc : 1; uint64_t c1_exc : 1; uint64_t c0_ldwn : 1; uint64_t c1_ldwn : 1; uint64_t int_a : 1; uint64_t reserved_62_63 : 2; #endif } s; struct cvmx_npei_int_enb2_s cn52xx; struct cvmx_npei_int_enb2_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_62_63 : 2; uint64_t int_a : 1; /**< Enables NPEI_INT_SUM2[61] to generate an interrupt on the RSL. */ uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM2[60] to generate an interrupt on the RSL. */ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM2[59] to generate an interrupt on the RSL. */ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM2[58] to generate an interrupt on the RSL. */ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM2[57] to generate an interrupt on the RSL. */ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM2[56] to generate an interrupt on the RSL. */ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM2[55] to generate an interrupt on the RSL. */ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM2[54] to generate an interrupt on the RSL. */ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM2[53] to generate an interrupt on the RSL. */ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM2[52] to generate an interrupt on the RSL. */ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM2[51] to generate an interrupt on the RSL. */ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM2[50] to generate an interrupt on the RSL. */ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM2[49] to generate an interrupt on the RSL. */ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM2[48] to generate an interrupt on the RSL. */ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM2[47] to generate an interrupt on the RSL. */ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM2[46] to generate an interrupt on the RSL. */ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM2[45] to generate an interrupt on the RSL. */ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM2[44] to generate an interrupt on the RSL. */ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM2[43] to generate an interrupt on the RSL. */ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM2[42] to generate an interrupt on the RSL. */ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM2[41] to generate an interrupt on the RSL. */ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM2[40] to generate an interrupt on the RSL. */ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM2[39] to generate an interrupt on the RSL. */ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM2[38] to generate an interrupt on the RSL. */ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM2[37] to generate an interrupt on the RSL. */ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM2[36] to generate an interrupt on the RSL. */ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM2[35] to generate an interrupt on the RSL. */ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM2[34] to generate an interrupt on the RSL. */ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM2[33] to generate an interrupt on the RSL. */ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM2[32] to generate an interrupt on the RSL. */ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM2[31] to generate an interrupt on the RSL. */ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM2[30] to generate an interrupt on the RSL. */ uint64_t crs1_dr : 1; /**< Enables NPEI_INT_SUM2[29] to generate an interrupt on the RSL. */ uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM2[28] to generate an interrupt on the RSL. */ uint64_t crs1_er : 1; /**< Enables NPEI_INT_SUM2[27] to generate an interrupt on the RSL. */ uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM2[26] to generate an interrupt on the RSL. */ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM2[25] to generate an interrupt on the RSL. */ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM2[24] to generate an interrupt on the RSL. */ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM2[23] to generate an interrupt on the RSL. */ uint64_t crs0_dr : 1; /**< Enables NPEI_INT_SUM2[22] to generate an interrupt on the RSL. */ uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM2[21] to generate an interrupt on the RSL. */ uint64_t crs0_er : 1; /**< Enables NPEI_INT_SUM2[20] to generate an interrupt on the RSL. */ uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM2[19] to generate an interrupt on the RSL. */ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM2[18] to generate an interrupt on the RSL. */ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM2[17] to generate an interrupt on the RSL. */ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM2[16] to generate an interrupt on the RSL. */ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM2[15] to generate an interrupt on the RSL. */ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM2[14] to generate an interrupt on the RSL. */ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM2[13] to generate an interrupt on the RSL. */ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM2[12] to generate an interrupt on the RSL. */ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM2[11] to generate an interrupt on the RSL. */ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM2[10] to generate an interrupt on the RSL. */ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM2[9] to generate an interrupt on the RSL. */ uint64_t reserved_8_8 : 1; uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM2[7] to generate an interrupt on the RSL. */ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM2[6] to generate an interrupt on the RSL. */ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM2[5] to generate an interrupt on the RSL. */ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM2[4] to generate an interrupt on the RSL. */ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM2[3] to generate an interrupt on the RSL. */ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM2[2] to generate an interrupt on the RSL. */ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM2[1] to generate an interrupt on the RSL. */ uint64_t rml_rto : 1; /**< Enables NPEI_INT_SUM2[0] to generate an interrupt on the RSL. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t bar0_to : 1; uint64_t iob2big : 1; uint64_t dma0dbo : 1; uint64_t dma1dbo : 1; uint64_t dma2dbo : 1; uint64_t dma3dbo : 1; uint64_t reserved_8_8 : 1; uint64_t dma0fi : 1; uint64_t dma1fi : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t psldbof : 1; uint64_t pidbof : 1; uint64_t pcnt : 1; uint64_t ptime : 1; uint64_t c0_aeri : 1; uint64_t crs0_er : 1; uint64_t c0_se : 1; uint64_t crs0_dr : 1; uint64_t c0_wake : 1; uint64_t c0_pmei : 1; uint64_t c0_hpint : 1; uint64_t c1_aeri : 1; uint64_t crs1_er : 1; uint64_t c1_se : 1; uint64_t crs1_dr : 1; uint64_t c1_wake : 1; uint64_t c1_pmei : 1; uint64_t c1_hpint : 1; uint64_t c0_up_b0 : 1; uint64_t c0_up_b1 : 1; uint64_t c0_up_b2 : 1; uint64_t c0_up_wi : 1; uint64_t c0_up_bx : 1; uint64_t c0_un_b0 : 1; uint64_t c0_un_b1 : 1; uint64_t c0_un_b2 : 1; uint64_t c0_un_wi : 1; uint64_t c0_un_bx : 1; uint64_t c1_up_b0 : 1; uint64_t c1_up_b1 : 1; uint64_t c1_up_b2 : 1; uint64_t c1_up_wi : 1; uint64_t c1_up_bx : 1; uint64_t c1_un_b0 : 1; uint64_t c1_un_b1 : 1; uint64_t c1_un_b2 : 1; uint64_t c1_un_wi : 1; uint64_t c1_un_bx : 1; uint64_t c0_un_wf : 1; uint64_t c1_un_wf : 1; uint64_t c0_up_wf : 1; uint64_t c1_up_wf : 1; uint64_t c0_exc : 1; uint64_t c1_exc : 1; uint64_t c0_ldwn : 1; uint64_t c1_ldwn : 1; uint64_t int_a : 1; uint64_t reserved_62_63 : 2; #endif } cn52xxp1; struct cvmx_npei_int_enb2_s cn56xx; struct cvmx_npei_int_enb2_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_61_63 : 3; uint64_t c1_ldwn : 1; /**< Enables NPEI_INT_SUM[60] to generate an interrupt on the RSL. */ uint64_t c0_ldwn : 1; /**< Enables NPEI_INT_SUM[59] to generate an interrupt on the RSL. */ uint64_t c1_exc : 1; /**< Enables NPEI_INT_SUM[58] to generate an interrupt on the RSL. */ uint64_t c0_exc : 1; /**< Enables NPEI_INT_SUM[57] to generate an interrupt on the RSL. */ uint64_t c1_up_wf : 1; /**< Enables NPEI_INT_SUM[56] to generate an interrupt on the RSL. */ uint64_t c0_up_wf : 1; /**< Enables NPEI_INT_SUM[55] to generate an interrupt on the RSL. */ uint64_t c1_un_wf : 1; /**< Enables NPEI_INT_SUM[54] to generate an interrupt on the RSL. */ uint64_t c0_un_wf : 1; /**< Enables NPEI_INT_SUM[53] to generate an interrupt on the RSL. */ uint64_t c1_un_bx : 1; /**< Enables NPEI_INT_SUM[52] to generate an interrupt on the RSL. */ uint64_t c1_un_wi : 1; /**< Enables NPEI_INT_SUM[51] to generate an interrupt on the RSL. */ uint64_t c1_un_b2 : 1; /**< Enables NPEI_INT_SUM[50] to generate an interrupt on the RSL. */ uint64_t c1_un_b1 : 1; /**< Enables NPEI_INT_SUM[49] to generate an interrupt on the RSL. */ uint64_t c1_un_b0 : 1; /**< Enables NPEI_INT_SUM[48] to generate an interrupt on the RSL. */ uint64_t c1_up_bx : 1; /**< Enables NPEI_INT_SUM[47] to generate an interrupt on the RSL. */ uint64_t c1_up_wi : 1; /**< Enables NPEI_INT_SUM[46] to generate an interrupt on the RSL. */ uint64_t c1_up_b2 : 1; /**< Enables NPEI_INT_SUM[45] to generate an interrupt on the RSL. */ uint64_t c1_up_b1 : 1; /**< Enables NPEI_INT_SUM[44] to generate an interrupt on the RSL. */ uint64_t c1_up_b0 : 1; /**< Enables NPEI_INT_SUM[43] to generate an interrupt on the RSL. */ uint64_t c0_un_bx : 1; /**< Enables NPEI_INT_SUM[42] to generate an interrupt on the RSL. */ uint64_t c0_un_wi : 1; /**< Enables NPEI_INT_SUM[41] to generate an interrupt on the RSL. */ uint64_t c0_un_b2 : 1; /**< Enables NPEI_INT_SUM[40] to generate an interrupt on the RSL. */ uint64_t c0_un_b1 : 1; /**< Enables NPEI_INT_SUM[39] to generate an interrupt on the RSL. */ uint64_t c0_un_b0 : 1; /**< Enables NPEI_INT_SUM[38] to generate an interrupt on the RSL. */ uint64_t c0_up_bx : 1; /**< Enables NPEI_INT_SUM[37] to generate an interrupt on the RSL. */ uint64_t c0_up_wi : 1; /**< Enables NPEI_INT_SUM[36] to generate an interrupt on the RSL. */ uint64_t c0_up_b2 : 1; /**< Enables NPEI_INT_SUM[35] to generate an interrupt on the RSL. */ uint64_t c0_up_b1 : 1; /**< Enables NPEI_INT_SUM[34] to generate an interrupt on the RSL. */ uint64_t c0_up_b0 : 1; /**< Enables NPEI_INT_SUM[33] to generate an interrupt on the RSL. */ uint64_t c1_hpint : 1; /**< Enables NPEI_INT_SUM[32] to generate an interrupt on the RSL. */ uint64_t c1_pmei : 1; /**< Enables NPEI_INT_SUM[31] to generate an interrupt on the RSL. */ uint64_t c1_wake : 1; /**< Enables NPEI_INT_SUM[30] to generate an interrupt on the RSL. */ uint64_t reserved_29_29 : 1; uint64_t c1_se : 1; /**< Enables NPEI_INT_SUM[28] to generate an interrupt on the RSL. */ uint64_t reserved_27_27 : 1; uint64_t c1_aeri : 1; /**< Enables NPEI_INT_SUM[26] to generate an interrupt on the RSL. */ uint64_t c0_hpint : 1; /**< Enables NPEI_INT_SUM[25] to generate an interrupt on the RSL. */ uint64_t c0_pmei : 1; /**< Enables NPEI_INT_SUM[24] to generate an interrupt on the RSL. */ uint64_t c0_wake : 1; /**< Enables NPEI_INT_SUM[23] to generate an interrupt on the RSL. */ uint64_t reserved_22_22 : 1; uint64_t c0_se : 1; /**< Enables NPEI_INT_SUM[21] to generate an interrupt on the RSL. */ uint64_t reserved_20_20 : 1; uint64_t c0_aeri : 1; /**< Enables NPEI_INT_SUM[19] to generate an interrupt on the RSL. */ uint64_t ptime : 1; /**< Enables NPEI_INT_SUM[18] to generate an interrupt on the RSL. */ uint64_t pcnt : 1; /**< Enables NPEI_INT_SUM[17] to generate an interrupt on the RSL. */ uint64_t pidbof : 1; /**< Enables NPEI_INT_SUM[16] to generate an interrupt on the RSL. */ uint64_t psldbof : 1; /**< Enables NPEI_INT_SUM[15] to generate an interrupt on the RSL. */ uint64_t dtime1 : 1; /**< Enables NPEI_INT_SUM[14] to generate an interrupt on the RSL. */ uint64_t dtime0 : 1; /**< Enables NPEI_INT_SUM[13] to generate an interrupt on the RSL. */ uint64_t dcnt1 : 1; /**< Enables NPEI_INT_SUM[12] to generate an interrupt on the RSL. */ uint64_t dcnt0 : 1; /**< Enables NPEI_INT_SUM[11] to generate an interrupt on the RSL. */ uint64_t dma1fi : 1; /**< Enables NPEI_INT_SUM[10] to generate an interrupt on the RSL. */ uint64_t dma0fi : 1; /**< Enables NPEI_INT_SUM[9] to generate an interrupt on the RSL. */ uint64_t dma4dbo : 1; /**< Enables NPEI_INT_SUM[8] to generate an interrupt on the RSL. */ uint64_t dma3dbo : 1; /**< Enables NPEI_INT_SUM[7] to generate an interrupt on the RSL. */ uint64_t dma2dbo : 1; /**< Enables NPEI_INT_SUM[6] to generate an interrupt on the RSL. */ uint64_t dma1dbo : 1; /**< Enables NPEI_INT_SUM[5] to generate an interrupt on the RSL. */ uint64_t dma0dbo : 1; /**< Enables NPEI_INT_SUM[4] to generate an interrupt on the RSL. */ uint64_t iob2big : 1; /**< Enables NPEI_INT_SUM[3] to generate an interrupt on the RSL. */ uint64_t bar0_to : 1; /**< Enables NPEI_INT_SUM[2] to generate an interrupt on the RSL. */ uint64_t rml_wto : 1; /**< Enables NPEI_INT_SUM[1] to generate an interrupt on the RSL. */ uint64_t rml_rto : 1; /**< Enables NPEI_INT_UM[0] to generate an interrupt on the RSL. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t bar0_to : 1; uint64_t iob2big : 1; uint64_t dma0dbo : 1; uint64_t dma1dbo : 1; uint64_t dma2dbo : 1; uint64_t dma3dbo : 1; uint64_t dma4dbo : 1; uint64_t dma0fi : 1; uint64_t dma1fi : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t psldbof : 1; uint64_t pidbof : 1; uint64_t pcnt : 1; uint64_t ptime : 1; uint64_t c0_aeri : 1; uint64_t reserved_20_20 : 1; uint64_t c0_se : 1; uint64_t reserved_22_22 : 1; uint64_t c0_wake : 1; uint64_t c0_pmei : 1; uint64_t c0_hpint : 1; uint64_t c1_aeri : 1; uint64_t reserved_27_27 : 1; uint64_t c1_se : 1; uint64_t reserved_29_29 : 1; uint64_t c1_wake : 1; uint64_t c1_pmei : 1; uint64_t c1_hpint : 1; uint64_t c0_up_b0 : 1; uint64_t c0_up_b1 : 1; uint64_t c0_up_b2 : 1; uint64_t c0_up_wi : 1; uint64_t c0_up_bx : 1; uint64_t c0_un_b0 : 1; uint64_t c0_un_b1 : 1; uint64_t c0_un_b2 : 1; uint64_t c0_un_wi : 1; uint64_t c0_un_bx : 1; uint64_t c1_up_b0 : 1; uint64_t c1_up_b1 : 1; uint64_t c1_up_b2 : 1; uint64_t c1_up_wi : 1; uint64_t c1_up_bx : 1; uint64_t c1_un_b0 : 1; uint64_t c1_un_b1 : 1; uint64_t c1_un_b2 : 1; uint64_t c1_un_wi : 1; uint64_t c1_un_bx : 1; uint64_t c0_un_wf : 1; uint64_t c1_un_wf : 1; uint64_t c0_up_wf : 1; uint64_t c1_up_wf : 1; uint64_t c0_exc : 1; uint64_t c1_exc : 1; uint64_t c0_ldwn : 1; uint64_t c1_ldwn : 1; uint64_t reserved_61_63 : 3; #endif } cn56xxp1; } cvmx_npei_int_enb2_t; /** * cvmx_npei_int_info * * NPEI_INT_INFO = NPI Interrupt Information * * Contains information about some of the interrupt condition that can occur in the NPEI_INTERRUPT_SUM register. */ typedef union { uint64_t u64; struct cvmx_npei_int_info_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t pidbof : 6; /**< Field set when the NPEI_INTERRUPT_SUM[PIDBOF] bit is set. This field when set will not change again unitl NPEI_INTERRUPT_SUM[PIDBOF] is cleared. */ uint64_t psldbof : 6; /**< Field set when the NPEI_INTERRUPT_SUM[PSLDBOF] bit is set. This field when set will not change again unitl NPEI_INTERRUPT_SUM[PSLDBOF] is cleared. */ #else uint64_t psldbof : 6; uint64_t pidbof : 6; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_npei_int_info_s cn52xx; struct cvmx_npei_int_info_s cn56xx; struct cvmx_npei_int_info_s cn56xxp1; } cvmx_npei_int_info_t; /** * cvmx_npei_int_sum * * NPEI_INTERRUPT_SUM = NPI Interrupt Summary Register * * Set when an interrupt condition occurs, write '1' to clear. * * HACK: These used to exist, how are TO handled? * <3> PO0_2SML R/W1C 0x0 0 The packet being sent out on Port0 is smaller $R NS * than the NPI_BUFF_SIZE_OUTPUT0[ISIZE] field. * <7> I0_RTOUT R/W1C 0x0 0 Port-0 had a read timeout while attempting to $R NS * read instructions. * <15> P0_RTOUT R/W1C 0x0 0 Port-0 had a read timeout while attempting to $R NS * read packet data. * <23> G0_RTOUT R/W1C 0x0 0 Port-0 had a read timeout while attempting to $R NS * read a gather list. * <31> P0_PTOUT R/W1C 0x0 0 Port-0 output had a read timeout on a DATA/INFO $R NS * pair. */ typedef union { uint64_t u64; struct cvmx_npei_int_sum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mio_inta : 1; /**< Interrupt from MIO. */ uint64_t reserved_62_62 : 1; uint64_t int_a : 1; /**< Set when a bit in the NPEI_INT_A_SUM register and the cooresponding bit in the NPEI_INT_A_ENB register is set. */ uint64_t c1_ldwn : 1; /**< Reset request due to link1 down status. */ uint64_t c0_ldwn : 1; /**< Reset request due to link0 down status. */ uint64_t c1_exc : 1; /**< Set when the PESC1_DBG_INFO register has a bit set and its cooresponding PESC1_DBG_INFO_EN bit is set. */ uint64_t c0_exc : 1; /**< Set when the PESC0_DBG_INFO register has a bit set and its cooresponding PESC0_DBG_INFO_EN bit is set. */ uint64_t c1_up_wf : 1; /**< Received Unsupported P-TLP for filtered window register. Core1. */ uint64_t c0_up_wf : 1; /**< Received Unsupported P-TLP for filtered window register. Core0. */ uint64_t c1_un_wf : 1; /**< Received Unsupported N-TLP for filtered window register. Core1. */ uint64_t c0_un_wf : 1; /**< Received Unsupported N-TLP for filtered window register. Core0. */ uint64_t c1_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar. Core 1. */ uint64_t c1_un_wi : 1; /**< Received Unsupported N-TLP for Window Register. Core 1. */ uint64_t c1_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2. Core 1. */ uint64_t c1_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1. Core 1. */ uint64_t c1_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0. Core 1. */ uint64_t c1_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar. Core 1. */ uint64_t c1_up_wi : 1; /**< Received Unsupported P-TLP for Window Register. Core 1. */ uint64_t c1_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2. Core 1. */ uint64_t c1_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1. Core 1. */ uint64_t c1_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0. Core 1. */ uint64_t c0_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar. Core 0. */ uint64_t c0_un_wi : 1; /**< Received Unsupported N-TLP for Window Register. Core 0. */ uint64_t c0_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2. Core 0. */ uint64_t c0_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1. Core 0. */ uint64_t c0_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0. Core 0. */ uint64_t c0_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar. Core 0. */ uint64_t c0_up_wi : 1; /**< Received Unsupported P-TLP for Window Register. Core 0. */ uint64_t c0_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2. Core 0. */ uint64_t c0_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1. Core 0. */ uint64_t c0_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0. Core 0. */ uint64_t c1_hpint : 1; /**< Hot-Plug Interrupt. Pcie Core 1 (hp_int). This interrupt will only be generated when PCIERC1_CFG034[DLLS_C] is generated. Hot plug is not supported. */ uint64_t c1_pmei : 1; /**< PME Interrupt. Pcie Core 1. (cfg_pme_int) */ uint64_t c1_wake : 1; /**< Wake up from Power Management Unit. Pcie Core 1. (wake_n) Octeon will never generate this interrupt. */ uint64_t crs1_dr : 1; /**< Had a CRS when Retries were disabled. */ uint64_t c1_se : 1; /**< System Error, RC Mode Only. Pcie Core 1. (cfg_sys_err_rc) */ uint64_t crs1_er : 1; /**< Had a CRS Timeout when Retries were enabled. */ uint64_t c1_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only. Pcie Core 1. */ uint64_t c0_hpint : 1; /**< Hot-Plug Interrupt. Pcie Core 0 (hp_int). This interrupt will only be generated when PCIERC0_CFG034[DLLS_C] is generated. Hot plug is not supported. */ uint64_t c0_pmei : 1; /**< PME Interrupt. Pcie Core 0. (cfg_pme_int) */ uint64_t c0_wake : 1; /**< Wake up from Power Management Unit. Pcie Core 0. (wake_n) Octeon will never generate this interrupt. */ uint64_t crs0_dr : 1; /**< Had a CRS when Retries were disabled. */ uint64_t c0_se : 1; /**< System Error, RC Mode Only. Pcie Core 0. (cfg_sys_err_rc) */ uint64_t crs0_er : 1; /**< Had a CRS Timeout when Retries were enabled. */ uint64_t c0_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only. Pcie Core 0 (cfg_aer_rc_err_int). */ uint64_t ptime : 1; /**< Packet Timer has an interrupt. Which rings can be found in NPEI_PKT_TIME_INT. */ uint64_t pcnt : 1; /**< Packet Counter has an interrupt. Which rings can be found in NPEI_PKT_CNT_INT. */ uint64_t pidbof : 1; /**< Packet Instruction Doorbell count overflowed. Which doorbell can be found in NPEI_INT_INFO[PIDBOF] */ uint64_t psldbof : 1; /**< Packet Scatterlist Doorbell count overflowed. Which doorbell can be found in NPEI_INT_INFO[PSLDBOF] */ uint64_t dtime1 : 1; /**< Whenever NPEI_DMA_CNTS[DMA1] is not 0, the DMA_CNT1 timer increments every core clock. When DMA_CNT1 timer exceeds NPEI_DMA1_INT_LEVEL[TIME], this bit is set. Writing a '1' to this bit also clears the DMA_CNT1 timer. */ uint64_t dtime0 : 1; /**< Whenever NPEI_DMA_CNTS[DMA0] is not 0, the DMA_CNT0 timer increments every core clock. When DMA_CNT0 timer exceeds NPEI_DMA0_INT_LEVEL[TIME], this bit is set. Writing a '1' to this bit also clears the DMA_CNT0 timer. */ uint64_t dcnt1 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA1] was/is greater than NPEI_DMA1_INT_LEVEL[CNT]. */ uint64_t dcnt0 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA0] was/is greater than NPEI_DMA0_INT_LEVEL[CNT]. */ uint64_t dma1fi : 1; /**< DMA0 set Forced Interrupt. */ uint64_t dma0fi : 1; /**< DMA0 set Forced Interrupt. */ uint64_t dma4dbo : 1; /**< DMA4 doorbell overflow. Bit[32] of the doorbell count was set. */ uint64_t dma3dbo : 1; /**< DMA3 doorbell overflow. Bit[32] of the doorbell count was set. */ uint64_t dma2dbo : 1; /**< DMA2 doorbell overflow. Bit[32] of the doorbell count was set. */ uint64_t dma1dbo : 1; /**< DMA1 doorbell overflow. Bit[32] of the doorbell count was set. */ uint64_t dma0dbo : 1; /**< DMA0 doorbell overflow. Bit[32] of the doorbell count was set. */ uint64_t iob2big : 1; /**< A requested IOBDMA is to large. */ uint64_t bar0_to : 1; /**< BAR0 R/W to a NCB device did not receive read-data/commit in 0xffff core clocks. */ uint64_t rml_wto : 1; /**< RML write did not get commit in 0xffff core clocks. */ uint64_t rml_rto : 1; /**< RML read did not return data in 0xffff core clocks. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t bar0_to : 1; uint64_t iob2big : 1; uint64_t dma0dbo : 1; uint64_t dma1dbo : 1; uint64_t dma2dbo : 1; uint64_t dma3dbo : 1; uint64_t dma4dbo : 1; uint64_t dma0fi : 1; uint64_t dma1fi : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t psldbof : 1; uint64_t pidbof : 1; uint64_t pcnt : 1; uint64_t ptime : 1; uint64_t c0_aeri : 1; uint64_t crs0_er : 1; uint64_t c0_se : 1; uint64_t crs0_dr : 1; uint64_t c0_wake : 1; uint64_t c0_pmei : 1; uint64_t c0_hpint : 1; uint64_t c1_aeri : 1; uint64_t crs1_er : 1; uint64_t c1_se : 1; uint64_t crs1_dr : 1; uint64_t c1_wake : 1; uint64_t c1_pmei : 1; uint64_t c1_hpint : 1; uint64_t c0_up_b0 : 1; uint64_t c0_up_b1 : 1; uint64_t c0_up_b2 : 1; uint64_t c0_up_wi : 1; uint64_t c0_up_bx : 1; uint64_t c0_un_b0 : 1; uint64_t c0_un_b1 : 1; uint64_t c0_un_b2 : 1; uint64_t c0_un_wi : 1; uint64_t c0_un_bx : 1; uint64_t c1_up_b0 : 1; uint64_t c1_up_b1 : 1; uint64_t c1_up_b2 : 1; uint64_t c1_up_wi : 1; uint64_t c1_up_bx : 1; uint64_t c1_un_b0 : 1; uint64_t c1_un_b1 : 1; uint64_t c1_un_b2 : 1; uint64_t c1_un_wi : 1; uint64_t c1_un_bx : 1; uint64_t c0_un_wf : 1; uint64_t c1_un_wf : 1; uint64_t c0_up_wf : 1; uint64_t c1_up_wf : 1; uint64_t c0_exc : 1; uint64_t c1_exc : 1; uint64_t c0_ldwn : 1; uint64_t c1_ldwn : 1; uint64_t int_a : 1; uint64_t reserved_62_62 : 1; uint64_t mio_inta : 1; #endif } s; struct cvmx_npei_int_sum_s cn52xx; struct cvmx_npei_int_sum_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mio_inta : 1; /**< Interrupt from MIO. */ uint64_t reserved_62_62 : 1; uint64_t int_a : 1; /**< Set when a bit in the NPEI_INT_A_SUM register and the cooresponding bit in the NPEI_INT_A_ENB register is set. */ uint64_t c1_ldwn : 1; /**< Reset request due to link1 down status. */ uint64_t c0_ldwn : 1; /**< Reset request due to link0 down status. */ uint64_t c1_exc : 1; /**< Set when the PESC1_DBG_INFO register has a bit set and its cooresponding PESC1_DBG_INFO_EN bit is set. */ uint64_t c0_exc : 1; /**< Set when the PESC0_DBG_INFO register has a bit set and its cooresponding PESC0_DBG_INFO_EN bit is set. */ uint64_t c1_up_wf : 1; /**< Received Unsupported P-TLP for filtered window register. Core1. */ uint64_t c0_up_wf : 1; /**< Received Unsupported P-TLP for filtered window register. Core0. */ uint64_t c1_un_wf : 1; /**< Received Unsupported N-TLP for filtered window register. Core1. */ uint64_t c0_un_wf : 1; /**< Received Unsupported N-TLP for filtered window register. Core0. */ uint64_t c1_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar. Core 1. */ uint64_t c1_un_wi : 1; /**< Received Unsupported N-TLP for Window Register. Core 1. */ uint64_t c1_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2. Core 1. */ uint64_t c1_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1. Core 1. */ uint64_t c1_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0. Core 1. */ uint64_t c1_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar. Core 1. */ uint64_t c1_up_wi : 1; /**< Received Unsupported P-TLP for Window Register. Core 1. */ uint64_t c1_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2. Core 1. */ uint64_t c1_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1. Core 1. */ uint64_t c1_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0. Core 1. */ uint64_t c0_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar. Core 0. */ uint64_t c0_un_wi : 1; /**< Received Unsupported N-TLP for Window Register. Core 0. */ uint64_t c0_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2. Core 0. */ uint64_t c0_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1. Core 0. */ uint64_t c0_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0. Core 0. */ uint64_t c0_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar. Core 0. */ uint64_t c0_up_wi : 1; /**< Received Unsupported P-TLP for Window Register. Core 0. */ uint64_t c0_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2. Core 0. */ uint64_t c0_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1. Core 0. */ uint64_t c0_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0. Core 0. */ uint64_t c1_hpint : 1; /**< Hot-Plug Interrupt. Pcie Core 1 (hp_int). This interrupt will only be generated when PCIERC1_CFG034[DLLS_C] is generated. Hot plug is not supported. */ uint64_t c1_pmei : 1; /**< PME Interrupt. Pcie Core 1. (cfg_pme_int) */ uint64_t c1_wake : 1; /**< Wake up from Power Management Unit. Pcie Core 1. (wake_n) Octeon will never generate this interrupt. */ uint64_t crs1_dr : 1; /**< Had a CRS when Retries were disabled. */ uint64_t c1_se : 1; /**< System Error, RC Mode Only. Pcie Core 1. (cfg_sys_err_rc) */ uint64_t crs1_er : 1; /**< Had a CRS Timeout when Retries were enabled. */ uint64_t c1_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only. Pcie Core 1. */ uint64_t c0_hpint : 1; /**< Hot-Plug Interrupt. Pcie Core 0 (hp_int). This interrupt will only be generated when PCIERC0_CFG034[DLLS_C] is generated. Hot plug is not supported. */ uint64_t c0_pmei : 1; /**< PME Interrupt. Pcie Core 0. (cfg_pme_int) */ uint64_t c0_wake : 1; /**< Wake up from Power Management Unit. Pcie Core 0. (wake_n) Octeon will never generate this interrupt. */ uint64_t crs0_dr : 1; /**< Had a CRS when Retries were disabled. */ uint64_t c0_se : 1; /**< System Error, RC Mode Only. Pcie Core 0. (cfg_sys_err_rc) */ uint64_t crs0_er : 1; /**< Had a CRS Timeout when Retries were enabled. */ uint64_t c0_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only. Pcie Core 0 (cfg_aer_rc_err_int). */ uint64_t reserved_15_18 : 4; uint64_t dtime1 : 1; /**< Whenever NPEI_DMA_CNTS[DMA1] is not 0, the DMA_CNT1 timer increments every core clock. When DMA_CNT1 timer exceeds NPEI_DMA1_INT_LEVEL[TIME], this bit is set. Writing a '1' to this bit also clears the DMA_CNT1 timer. */ uint64_t dtime0 : 1; /**< Whenever NPEI_DMA_CNTS[DMA0] is not 0, the DMA_CNT0 timer increments every core clock. When DMA_CNT0 timer exceeds NPEI_DMA0_INT_LEVEL[TIME], this bit is set. Writing a '1' to this bit also clears the DMA_CNT0 timer. */ uint64_t dcnt1 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA1] was/is greater than NPEI_DMA1_INT_LEVEL[CNT]. */ uint64_t dcnt0 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA0] was/is greater than NPEI_DMA0_INT_LEVEL[CNT]. */ uint64_t dma1fi : 1; /**< DMA0 set Forced Interrupt. */ uint64_t dma0fi : 1; /**< DMA0 set Forced Interrupt. */ uint64_t reserved_8_8 : 1; uint64_t dma3dbo : 1; /**< DMA3 doorbell count overflow. Bit[32] of the doorbell count was set. */ uint64_t dma2dbo : 1; /**< DMA2 doorbell count overflow. Bit[32] of the doorbell count was set. */ uint64_t dma1dbo : 1; /**< DMA1 doorbell count overflow. Bit[32] of the doorbell count was set. */ uint64_t dma0dbo : 1; /**< DMA0 doorbell count overflow. Bit[32] of the doorbell count was set. */ uint64_t iob2big : 1; /**< A requested IOBDMA is to large. */ uint64_t bar0_to : 1; /**< BAR0 R/W to a NCB device did not receive read-data/commit in 0xffff core clocks. */ uint64_t rml_wto : 1; /**< RML write did not get commit in 0xffff core clocks. */ uint64_t rml_rto : 1; /**< RML read did not return data in 0xffff core clocks. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t bar0_to : 1; uint64_t iob2big : 1; uint64_t dma0dbo : 1; uint64_t dma1dbo : 1; uint64_t dma2dbo : 1; uint64_t dma3dbo : 1; uint64_t reserved_8_8 : 1; uint64_t dma0fi : 1; uint64_t dma1fi : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t reserved_15_18 : 4; uint64_t c0_aeri : 1; uint64_t crs0_er : 1; uint64_t c0_se : 1; uint64_t crs0_dr : 1; uint64_t c0_wake : 1; uint64_t c0_pmei : 1; uint64_t c0_hpint : 1; uint64_t c1_aeri : 1; uint64_t crs1_er : 1; uint64_t c1_se : 1; uint64_t crs1_dr : 1; uint64_t c1_wake : 1; uint64_t c1_pmei : 1; uint64_t c1_hpint : 1; uint64_t c0_up_b0 : 1; uint64_t c0_up_b1 : 1; uint64_t c0_up_b2 : 1; uint64_t c0_up_wi : 1; uint64_t c0_up_bx : 1; uint64_t c0_un_b0 : 1; uint64_t c0_un_b1 : 1; uint64_t c0_un_b2 : 1; uint64_t c0_un_wi : 1; uint64_t c0_un_bx : 1; uint64_t c1_up_b0 : 1; uint64_t c1_up_b1 : 1; uint64_t c1_up_b2 : 1; uint64_t c1_up_wi : 1; uint64_t c1_up_bx : 1; uint64_t c1_un_b0 : 1; uint64_t c1_un_b1 : 1; uint64_t c1_un_b2 : 1; uint64_t c1_un_wi : 1; uint64_t c1_un_bx : 1; uint64_t c0_un_wf : 1; uint64_t c1_un_wf : 1; uint64_t c0_up_wf : 1; uint64_t c1_up_wf : 1; uint64_t c0_exc : 1; uint64_t c1_exc : 1; uint64_t c0_ldwn : 1; uint64_t c1_ldwn : 1; uint64_t int_a : 1; uint64_t reserved_62_62 : 1; uint64_t mio_inta : 1; #endif } cn52xxp1; struct cvmx_npei_int_sum_s cn56xx; struct cvmx_npei_int_sum_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mio_inta : 1; /**< Interrupt from MIO. */ uint64_t reserved_61_62 : 2; uint64_t c1_ldwn : 1; /**< Reset request due to link1 down status. */ uint64_t c0_ldwn : 1; /**< Reset request due to link0 down status. */ uint64_t c1_exc : 1; /**< Set when the PESC1_DBG_INFO register has a bit set and its cooresponding PESC1_DBG_INFO_EN bit is set. */ uint64_t c0_exc : 1; /**< Set when the PESC0_DBG_INFO register has a bit set and its cooresponding PESC0_DBG_INFO_EN bit is set. */ uint64_t c1_up_wf : 1; /**< Received Unsupported P-TLP for filtered window register. Core1. */ uint64_t c0_up_wf : 1; /**< Received Unsupported P-TLP for filtered window register. Core0. */ uint64_t c1_un_wf : 1; /**< Received Unsupported N-TLP for filtered window register. Core1. */ uint64_t c0_un_wf : 1; /**< Received Unsupported N-TLP for filtered window register. Core0. */ uint64_t c1_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar. Core 1. */ uint64_t c1_un_wi : 1; /**< Received Unsupported N-TLP for Window Register. Core 1. */ uint64_t c1_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2. Core 1. */ uint64_t c1_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1. Core 1. */ uint64_t c1_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0. Core 1. */ uint64_t c1_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar. Core 1. */ uint64_t c1_up_wi : 1; /**< Received Unsupported P-TLP for Window Register. Core 1. */ uint64_t c1_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2. Core 1. */ uint64_t c1_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1. Core 1. */ uint64_t c1_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0. Core 1. */ uint64_t c0_un_bx : 1; /**< Received Unsupported N-TLP for unknown Bar. Core 0. */ uint64_t c0_un_wi : 1; /**< Received Unsupported N-TLP for Window Register. Core 0. */ uint64_t c0_un_b2 : 1; /**< Received Unsupported N-TLP for Bar2. Core 0. */ uint64_t c0_un_b1 : 1; /**< Received Unsupported N-TLP for Bar1. Core 0. */ uint64_t c0_un_b0 : 1; /**< Received Unsupported N-TLP for Bar0. Core 0. */ uint64_t c0_up_bx : 1; /**< Received Unsupported P-TLP for unknown Bar. Core 0. */ uint64_t c0_up_wi : 1; /**< Received Unsupported P-TLP for Window Register. Core 0. */ uint64_t c0_up_b2 : 1; /**< Received Unsupported P-TLP for Bar2. Core 0. */ uint64_t c0_up_b1 : 1; /**< Received Unsupported P-TLP for Bar1. Core 0. */ uint64_t c0_up_b0 : 1; /**< Received Unsupported P-TLP for Bar0. Core 0. */ uint64_t c1_hpint : 1; /**< Hot-Plug Interrupt. Pcie Core 1 (hp_int). This interrupt will only be generated when PCIERC1_CFG034[DLLS_C] is generated. Hot plug is not supported. */ uint64_t c1_pmei : 1; /**< PME Interrupt. Pcie Core 1. (cfg_pme_int) */ uint64_t c1_wake : 1; /**< Wake up from Power Management Unit. Pcie Core 1. (wake_n) Octeon will never generate this interrupt. */ uint64_t reserved_29_29 : 1; uint64_t c1_se : 1; /**< System Error, RC Mode Only. Pcie Core 1. (cfg_sys_err_rc) */ uint64_t reserved_27_27 : 1; uint64_t c1_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only. Pcie Core 1. */ uint64_t c0_hpint : 1; /**< Hot-Plug Interrupt. Pcie Core 0 (hp_int). This interrupt will only be generated when PCIERC0_CFG034[DLLS_C] is generated. Hot plug is not supported. */ uint64_t c0_pmei : 1; /**< PME Interrupt. Pcie Core 0. (cfg_pme_int) */ uint64_t c0_wake : 1; /**< Wake up from Power Management Unit. Pcie Core 0. (wake_n) Octeon will never generate this interrupt. */ uint64_t reserved_22_22 : 1; uint64_t c0_se : 1; /**< System Error, RC Mode Only. Pcie Core 0. (cfg_sys_err_rc) */ uint64_t reserved_20_20 : 1; uint64_t c0_aeri : 1; /**< Advanced Error Reporting Interrupt, RC Mode Only. Pcie Core 0 (cfg_aer_rc_err_int). */ uint64_t reserved_15_18 : 4; uint64_t dtime1 : 1; /**< Whenever NPEI_DMA_CNTS[DMA1] is not 0, the DMA_CNT1 timer increments every core clock. When DMA_CNT1 timer exceeds NPEI_DMA1_INT_LEVEL[TIME], this bit is set. Writing a '1' to this bit also clears the DMA_CNT1 timer. */ uint64_t dtime0 : 1; /**< Whenever NPEI_DMA_CNTS[DMA0] is not 0, the DMA_CNT0 timer increments every core clock. When DMA_CNT0 timer exceeds NPEI_DMA0_INT_LEVEL[TIME], this bit is set. Writing a '1' to this bit also clears the DMA_CNT0 timer. */ uint64_t dcnt1 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA1] was/is greater than NPEI_DMA1_INT_LEVEL[CNT]. */ uint64_t dcnt0 : 1; /**< This bit indicates that NPEI_DMA_CNTS[DMA0] was/is greater than NPEI_DMA0_INT_LEVEL[CNT]. */ uint64_t dma1fi : 1; /**< DMA0 set Forced Interrupt. */ uint64_t dma0fi : 1; /**< DMA0 set Forced Interrupt. */ uint64_t dma4dbo : 1; /**< DMA4 doorbell overflow. Bit[32] of the doorbell count was set. */ uint64_t dma3dbo : 1; /**< DMA3 doorbell overflow. Bit[32] of the doorbell count was set. */ uint64_t dma2dbo : 1; /**< DMA2 doorbell overflow. Bit[32] of the doorbell count was set. */ uint64_t dma1dbo : 1; /**< DMA1 doorbell overflow. Bit[32] of the doorbell count was set. */ uint64_t dma0dbo : 1; /**< DMA0 doorbell overflow. Bit[32] of the doorbell count was set. */ uint64_t iob2big : 1; /**< A requested IOBDMA is to large. */ uint64_t bar0_to : 1; /**< BAR0 R/W to a NCB device did not receive read-data/commit in 0xffff core clocks. */ uint64_t rml_wto : 1; /**< RML write did not get commit in 0xffff core clocks. */ uint64_t rml_rto : 1; /**< RML read did not return data in 0xffff core clocks. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t bar0_to : 1; uint64_t iob2big : 1; uint64_t dma0dbo : 1; uint64_t dma1dbo : 1; uint64_t dma2dbo : 1; uint64_t dma3dbo : 1; uint64_t dma4dbo : 1; uint64_t dma0fi : 1; uint64_t dma1fi : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t reserved_15_18 : 4; uint64_t c0_aeri : 1; uint64_t reserved_20_20 : 1; uint64_t c0_se : 1; uint64_t reserved_22_22 : 1; uint64_t c0_wake : 1; uint64_t c0_pmei : 1; uint64_t c0_hpint : 1; uint64_t c1_aeri : 1; uint64_t reserved_27_27 : 1; uint64_t c1_se : 1; uint64_t reserved_29_29 : 1; uint64_t c1_wake : 1; uint64_t c1_pmei : 1; uint64_t c1_hpint : 1; uint64_t c0_up_b0 : 1; uint64_t c0_up_b1 : 1; uint64_t c0_up_b2 : 1; uint64_t c0_up_wi : 1; uint64_t c0_up_bx : 1; uint64_t c0_un_b0 : 1; uint64_t c0_un_b1 : 1; uint64_t c0_un_b2 : 1; uint64_t c0_un_wi : 1; uint64_t c0_un_bx : 1; uint64_t c1_up_b0 : 1; uint64_t c1_up_b1 : 1; uint64_t c1_up_b2 : 1; uint64_t c1_up_wi : 1; uint64_t c1_up_bx : 1; uint64_t c1_un_b0 : 1; uint64_t c1_un_b1 : 1; uint64_t c1_un_b2 : 1; uint64_t c1_un_wi : 1; uint64_t c1_un_bx : 1; uint64_t c0_un_wf : 1; uint64_t c1_un_wf : 1; uint64_t c0_up_wf : 1; uint64_t c1_up_wf : 1; uint64_t c0_exc : 1; uint64_t c1_exc : 1; uint64_t c0_ldwn : 1; uint64_t c1_ldwn : 1; uint64_t reserved_61_62 : 2; uint64_t mio_inta : 1; #endif } cn56xxp1; } cvmx_npei_int_sum_t; /** * cvmx_npei_int_sum2 * * NPEI_INTERRUPT_SUM2 = NPI Interrupt Summary2 Register * * This is a read only copy of the NPEI_INTERRUPT_SUM register with bit variances. */ typedef union { uint64_t u64; struct cvmx_npei_int_sum2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mio_inta : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t reserved_62_62 : 1; uint64_t int_a : 1; /**< Set when a bit in the NPEI_INT_A_SUM register and the cooresponding bit in the NPEI_INT_A_ENB2 register is set. */ uint64_t c1_ldwn : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_ldwn : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_exc : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_exc : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_up_wf : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_up_wf : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_un_wf : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_un_wf : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_un_bx : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_un_wi : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_un_b2 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_un_b1 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_un_b0 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_up_bx : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_up_wi : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_up_b2 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_up_b1 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_up_b0 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_un_bx : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_un_wi : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_un_b2 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_un_b1 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_un_b0 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_up_bx : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_up_wi : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_up_b2 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_up_b1 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_up_b0 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_hpint : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_pmei : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_wake : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t crs1_dr : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_se : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t crs1_er : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c1_aeri : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_hpint : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_pmei : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_wake : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t crs0_dr : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_se : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t crs0_er : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t c0_aeri : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t reserved_15_18 : 4; uint64_t dtime1 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t dtime0 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t dcnt1 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t dcnt0 : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t dma1fi : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t dma0fi : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t reserved_8_8 : 1; uint64_t dma3dbo : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t dma2dbo : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t dma1dbo : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t dma0dbo : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t iob2big : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t bar0_to : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t rml_wto : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ uint64_t rml_rto : 1; /**< Equal to the cooresponding bit if the NPEI_INT_SUM register. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t bar0_to : 1; uint64_t iob2big : 1; uint64_t dma0dbo : 1; uint64_t dma1dbo : 1; uint64_t dma2dbo : 1; uint64_t dma3dbo : 1; uint64_t reserved_8_8 : 1; uint64_t dma0fi : 1; uint64_t dma1fi : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t reserved_15_18 : 4; uint64_t c0_aeri : 1; uint64_t crs0_er : 1; uint64_t c0_se : 1; uint64_t crs0_dr : 1; uint64_t c0_wake : 1; uint64_t c0_pmei : 1; uint64_t c0_hpint : 1; uint64_t c1_aeri : 1; uint64_t crs1_er : 1; uint64_t c1_se : 1; uint64_t crs1_dr : 1; uint64_t c1_wake : 1; uint64_t c1_pmei : 1; uint64_t c1_hpint : 1; uint64_t c0_up_b0 : 1; uint64_t c0_up_b1 : 1; uint64_t c0_up_b2 : 1; uint64_t c0_up_wi : 1; uint64_t c0_up_bx : 1; uint64_t c0_un_b0 : 1; uint64_t c0_un_b1 : 1; uint64_t c0_un_b2 : 1; uint64_t c0_un_wi : 1; uint64_t c0_un_bx : 1; uint64_t c1_up_b0 : 1; uint64_t c1_up_b1 : 1; uint64_t c1_up_b2 : 1; uint64_t c1_up_wi : 1; uint64_t c1_up_bx : 1; uint64_t c1_un_b0 : 1; uint64_t c1_un_b1 : 1; uint64_t c1_un_b2 : 1; uint64_t c1_un_wi : 1; uint64_t c1_un_bx : 1; uint64_t c0_un_wf : 1; uint64_t c1_un_wf : 1; uint64_t c0_up_wf : 1; uint64_t c1_up_wf : 1; uint64_t c0_exc : 1; uint64_t c1_exc : 1; uint64_t c0_ldwn : 1; uint64_t c1_ldwn : 1; uint64_t int_a : 1; uint64_t reserved_62_62 : 1; uint64_t mio_inta : 1; #endif } s; struct cvmx_npei_int_sum2_s cn52xx; struct cvmx_npei_int_sum2_s cn52xxp1; struct cvmx_npei_int_sum2_s cn56xx; } cvmx_npei_int_sum2_t; /** * cvmx_npei_last_win_rdata0 * * NPEI_LAST_WIN_RDATA0 = NPEI Last Window Read Data Port0 * * The data from the last initiated window read. */ typedef union { uint64_t u64; struct cvmx_npei_last_win_rdata0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< Last window read data. */ #else uint64_t data : 64; #endif } s; struct cvmx_npei_last_win_rdata0_s cn52xx; struct cvmx_npei_last_win_rdata0_s cn52xxp1; struct cvmx_npei_last_win_rdata0_s cn56xx; struct cvmx_npei_last_win_rdata0_s cn56xxp1; } cvmx_npei_last_win_rdata0_t; /** * cvmx_npei_last_win_rdata1 * * NPEI_LAST_WIN_RDATA1 = NPEI Last Window Read Data Port1 * * The data from the last initiated window read. */ typedef union { uint64_t u64; struct cvmx_npei_last_win_rdata1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< Last window read data. */ #else uint64_t data : 64; #endif } s; struct cvmx_npei_last_win_rdata1_s cn52xx; struct cvmx_npei_last_win_rdata1_s cn52xxp1; struct cvmx_npei_last_win_rdata1_s cn56xx; struct cvmx_npei_last_win_rdata1_s cn56xxp1; } cvmx_npei_last_win_rdata1_t; /** * cvmx_npei_mem_access_ctl * * NPEI_MEM_ACCESS_CTL = NPEI's Memory Access Control * * Contains control for access to the PCIe address space. */ typedef union { uint64_t u64; struct cvmx_npei_mem_access_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t max_word : 4; /**< The maximum number of words to merge into a single write operation from the PPs to the PCIe. Legal values are 1 to 16, where a '0' is treated as 16. */ uint64_t timer : 10; /**< When the NPEI starts a PP to PCIe write it waits no longer than the value of TIMER in eclks to merge additional writes from the PPs into 1 large write. The values for this field is 1 to 1024 where a value of '0' is treated as 1024. */ #else uint64_t timer : 10; uint64_t max_word : 4; uint64_t reserved_14_63 : 50; #endif } s; struct cvmx_npei_mem_access_ctl_s cn52xx; struct cvmx_npei_mem_access_ctl_s cn52xxp1; struct cvmx_npei_mem_access_ctl_s cn56xx; struct cvmx_npei_mem_access_ctl_s cn56xxp1; } cvmx_npei_mem_access_ctl_t; /** * cvmx_npei_mem_access_subid# * * NPEI_MEM_ACCESS_SUBIDX = NPEI Memory Access SubidX Register * * Contains address index and control bits for access to memory from Core PPs. */ typedef union { uint64_t u64; struct cvmx_npei_mem_access_subidx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_42_63 : 22; uint64_t zero : 1; /**< Causes all byte reads to be zero length reads. Returns to the EXEC a zero for all read data. */ uint64_t port : 2; /**< Port the request is sent to. */ uint64_t nmerge : 1; /**< No merging is allowed in this window. */ uint64_t esr : 2; /**< Endian-swap for Reads. */ uint64_t esw : 2; /**< Endian-swap for Writes. */ uint64_t nsr : 1; /**< No Snoop for Reads. */ uint64_t nsw : 1; /**< No Snoop for Writes. */ uint64_t ror : 1; /**< Relaxed Ordering for Reads. */ uint64_t row : 1; /**< Relaxed Ordering for Writes. */ uint64_t ba : 30; /**< PCIe Adddress Bits <63:34>. */ #else uint64_t ba : 30; uint64_t row : 1; uint64_t ror : 1; uint64_t nsw : 1; uint64_t nsr : 1; uint64_t esw : 2; uint64_t esr : 2; uint64_t nmerge : 1; uint64_t port : 2; uint64_t zero : 1; uint64_t reserved_42_63 : 22; #endif } s; struct cvmx_npei_mem_access_subidx_s cn52xx; struct cvmx_npei_mem_access_subidx_s cn52xxp1; struct cvmx_npei_mem_access_subidx_s cn56xx; struct cvmx_npei_mem_access_subidx_s cn56xxp1; } cvmx_npei_mem_access_subidx_t; /** * cvmx_npei_msi_enb0 * * NPEI_MSI_ENB0 = NPEI MSI Enable0 * * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV0. */ typedef union { uint64_t u64; struct cvmx_npei_msi_enb0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t enb : 64; /**< Enables bit [63:0] of NPEI_MSI_RCV0. */ #else uint64_t enb : 64; #endif } s; struct cvmx_npei_msi_enb0_s cn52xx; struct cvmx_npei_msi_enb0_s cn52xxp1; struct cvmx_npei_msi_enb0_s cn56xx; struct cvmx_npei_msi_enb0_s cn56xxp1; } cvmx_npei_msi_enb0_t; /** * cvmx_npei_msi_enb1 * * NPEI_MSI_ENB1 = NPEI MSI Enable1 * * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV1. */ typedef union { uint64_t u64; struct cvmx_npei_msi_enb1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t enb : 64; /**< Enables bit [63:0] of NPEI_MSI_RCV1. */ #else uint64_t enb : 64; #endif } s; struct cvmx_npei_msi_enb1_s cn52xx; struct cvmx_npei_msi_enb1_s cn52xxp1; struct cvmx_npei_msi_enb1_s cn56xx; struct cvmx_npei_msi_enb1_s cn56xxp1; } cvmx_npei_msi_enb1_t; /** * cvmx_npei_msi_enb2 * * NPEI_MSI_ENB2 = NPEI MSI Enable2 * * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV2. */ typedef union { uint64_t u64; struct cvmx_npei_msi_enb2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t enb : 64; /**< Enables bit [63:0] of NPEI_MSI_RCV2. */ #else uint64_t enb : 64; #endif } s; struct cvmx_npei_msi_enb2_s cn52xx; struct cvmx_npei_msi_enb2_s cn52xxp1; struct cvmx_npei_msi_enb2_s cn56xx; struct cvmx_npei_msi_enb2_s cn56xxp1; } cvmx_npei_msi_enb2_t; /** * cvmx_npei_msi_enb3 * * NPEI_MSI_ENB3 = NPEI MSI Enable3 * * Used to enable the interrupt generation for the bits in the NPEI_MSI_RCV3. */ typedef union { uint64_t u64; struct cvmx_npei_msi_enb3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t enb : 64; /**< Enables bit [63:0] of NPEI_MSI_RCV3. */ #else uint64_t enb : 64; #endif } s; struct cvmx_npei_msi_enb3_s cn52xx; struct cvmx_npei_msi_enb3_s cn52xxp1; struct cvmx_npei_msi_enb3_s cn56xx; struct cvmx_npei_msi_enb3_s cn56xxp1; } cvmx_npei_msi_enb3_t; /** * cvmx_npei_msi_rcv0 * * NPEI_MSI_RCV0 = NPEI MSI Receive0 * * Contains bits [63:0] of the 256 bits oof MSI interrupts. */ typedef union { uint64_t u64; struct cvmx_npei_msi_rcv0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t intr : 64; /**< Bits 63-0 of the 256 bits of MSI interrupt. */ #else uint64_t intr : 64; #endif } s; struct cvmx_npei_msi_rcv0_s cn52xx; struct cvmx_npei_msi_rcv0_s cn52xxp1; struct cvmx_npei_msi_rcv0_s cn56xx; struct cvmx_npei_msi_rcv0_s cn56xxp1; } cvmx_npei_msi_rcv0_t; /** * cvmx_npei_msi_rcv1 * * NPEI_MSI_RCV1 = NPEI MSI Receive1 * * Contains bits [127:64] of the 256 bits oof MSI interrupts. */ typedef union { uint64_t u64; struct cvmx_npei_msi_rcv1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t intr : 64; /**< Bits 127-64 of the 256 bits of MSI interrupt. */ #else uint64_t intr : 64; #endif } s; struct cvmx_npei_msi_rcv1_s cn52xx; struct cvmx_npei_msi_rcv1_s cn52xxp1; struct cvmx_npei_msi_rcv1_s cn56xx; struct cvmx_npei_msi_rcv1_s cn56xxp1; } cvmx_npei_msi_rcv1_t; /** * cvmx_npei_msi_rcv2 * * NPEI_MSI_RCV2 = NPEI MSI Receive2 * * Contains bits [191:128] of the 256 bits oof MSI interrupts. */ typedef union { uint64_t u64; struct cvmx_npei_msi_rcv2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t intr : 64; /**< Bits 191-128 of the 256 bits of MSI interrupt. */ #else uint64_t intr : 64; #endif } s; struct cvmx_npei_msi_rcv2_s cn52xx; struct cvmx_npei_msi_rcv2_s cn52xxp1; struct cvmx_npei_msi_rcv2_s cn56xx; struct cvmx_npei_msi_rcv2_s cn56xxp1; } cvmx_npei_msi_rcv2_t; /** * cvmx_npei_msi_rcv3 * * NPEI_MSI_RCV3 = NPEI MSI Receive3 * * Contains bits [255:192] of the 256 bits oof MSI interrupts. */ typedef union { uint64_t u64; struct cvmx_npei_msi_rcv3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t intr : 64; /**< Bits 255-192 of the 256 bits of MSI interrupt. */ #else uint64_t intr : 64; #endif } s; struct cvmx_npei_msi_rcv3_s cn52xx; struct cvmx_npei_msi_rcv3_s cn52xxp1; struct cvmx_npei_msi_rcv3_s cn56xx; struct cvmx_npei_msi_rcv3_s cn56xxp1; } cvmx_npei_msi_rcv3_t; /** * cvmx_npei_msi_rd_map * * NPEI_MSI_RD_MAP = NPEI MSI Read MAP * * Used to read the mapping function of the NPEI_PCIE_MSI_RCV to NPEI_MSI_RCV registers. */ typedef union { uint64_t u64; struct cvmx_npei_msi_rd_map_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t rd_int : 8; /**< The value of the map at the location PREVIOUSLY written to the MSI_INT field of this register. */ uint64_t msi_int : 8; /**< Selects the value that would be received when the NPEI_PCIE_MSI_RCV register is written. */ #else uint64_t msi_int : 8; uint64_t rd_int : 8; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_npei_msi_rd_map_s cn52xx; struct cvmx_npei_msi_rd_map_s cn52xxp1; struct cvmx_npei_msi_rd_map_s cn56xx; struct cvmx_npei_msi_rd_map_s cn56xxp1; } cvmx_npei_msi_rd_map_t; /** * cvmx_npei_msi_w1c_enb0 * * NPEI_MSI_W1C_ENB0 = NPEI MSI Write 1 To Clear Enable0 * * Used to clear bits in NPEI_MSI_ENB0. This is a PASS2 register. */ typedef union { uint64_t u64; struct cvmx_npei_msi_w1c_enb0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t clr : 64; /**< A write of '1' to a vector will clear the cooresponding bit in NPEI_MSI_ENB0. A read to this address will return 0. */ #else uint64_t clr : 64; #endif } s; struct cvmx_npei_msi_w1c_enb0_s cn52xx; struct cvmx_npei_msi_w1c_enb0_s cn56xx; } cvmx_npei_msi_w1c_enb0_t; /** * cvmx_npei_msi_w1c_enb1 * * NPEI_MSI_W1C_ENB1 = NPEI MSI Write 1 To Clear Enable1 * * Used to clear bits in NPEI_MSI_ENB1. This is a PASS2 register. */ typedef union { uint64_t u64; struct cvmx_npei_msi_w1c_enb1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t clr : 64; /**< A write of '1' to a vector will clear the cooresponding bit in NPEI_MSI_ENB1. A read to this address will return 0. */ #else uint64_t clr : 64; #endif } s; struct cvmx_npei_msi_w1c_enb1_s cn52xx; struct cvmx_npei_msi_w1c_enb1_s cn56xx; } cvmx_npei_msi_w1c_enb1_t; /** * cvmx_npei_msi_w1c_enb2 * * NPEI_MSI_W1C_ENB2 = NPEI MSI Write 1 To Clear Enable2 * * Used to clear bits in NPEI_MSI_ENB2. This is a PASS2 register. */ typedef union { uint64_t u64; struct cvmx_npei_msi_w1c_enb2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t clr : 64; /**< A write of '1' to a vector will clear the cooresponding bit in NPEI_MSI_ENB2. A read to this address will return 0. */ #else uint64_t clr : 64; #endif } s; struct cvmx_npei_msi_w1c_enb2_s cn52xx; struct cvmx_npei_msi_w1c_enb2_s cn56xx; } cvmx_npei_msi_w1c_enb2_t; /** * cvmx_npei_msi_w1c_enb3 * * NPEI_MSI_W1C_ENB3 = NPEI MSI Write 1 To Clear Enable3 * * Used to clear bits in NPEI_MSI_ENB3. This is a PASS2 register. */ typedef union { uint64_t u64; struct cvmx_npei_msi_w1c_enb3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t clr : 64; /**< A write of '1' to a vector will clear the cooresponding bit in NPEI_MSI_ENB3. A read to this address will return 0. */ #else uint64_t clr : 64; #endif } s; struct cvmx_npei_msi_w1c_enb3_s cn52xx; struct cvmx_npei_msi_w1c_enb3_s cn56xx; } cvmx_npei_msi_w1c_enb3_t; /** * cvmx_npei_msi_w1s_enb0 * * NPEI_MSI_W1S_ENB0 = NPEI MSI Write 1 To Set Enable0 * * Used to set bits in NPEI_MSI_ENB0. This is a PASS2 register. */ typedef union { uint64_t u64; struct cvmx_npei_msi_w1s_enb0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t set : 64; /**< A write of '1' to a vector will set the cooresponding bit in NPEI_MSI_ENB0. A read to this address will return 0. */ #else uint64_t set : 64; #endif } s; struct cvmx_npei_msi_w1s_enb0_s cn52xx; struct cvmx_npei_msi_w1s_enb0_s cn56xx; } cvmx_npei_msi_w1s_enb0_t; /** * cvmx_npei_msi_w1s_enb1 * * NPEI_MSI_W1S_ENB0 = NPEI MSI Write 1 To Set Enable1 * * Used to set bits in NPEI_MSI_ENB1. This is a PASS2 register. */ typedef union { uint64_t u64; struct cvmx_npei_msi_w1s_enb1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t set : 64; /**< A write of '1' to a vector will set the cooresponding bit in NPEI_MSI_ENB1. A read to this address will return 0. */ #else uint64_t set : 64; #endif } s; struct cvmx_npei_msi_w1s_enb1_s cn52xx; struct cvmx_npei_msi_w1s_enb1_s cn56xx; } cvmx_npei_msi_w1s_enb1_t; /** * cvmx_npei_msi_w1s_enb2 * * NPEI_MSI_W1S_ENB2 = NPEI MSI Write 1 To Set Enable2 * * Used to set bits in NPEI_MSI_ENB2. This is a PASS2 register. */ typedef union { uint64_t u64; struct cvmx_npei_msi_w1s_enb2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t set : 64; /**< A write of '1' to a vector will set the cooresponding bit in NPEI_MSI_ENB2. A read to this address will return 0. */ #else uint64_t set : 64; #endif } s; struct cvmx_npei_msi_w1s_enb2_s cn52xx; struct cvmx_npei_msi_w1s_enb2_s cn56xx; } cvmx_npei_msi_w1s_enb2_t; /** * cvmx_npei_msi_w1s_enb3 * * NPEI_MSI_W1S_ENB3 = NPEI MSI Write 1 To Set Enable3 * * Used to set bits in NPEI_MSI_ENB3. This is a PASS2 register. */ typedef union { uint64_t u64; struct cvmx_npei_msi_w1s_enb3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t set : 64; /**< A write of '1' to a vector will set the cooresponding bit in NPEI_MSI_ENB3. A read to this address will return 0. */ #else uint64_t set : 64; #endif } s; struct cvmx_npei_msi_w1s_enb3_s cn52xx; struct cvmx_npei_msi_w1s_enb3_s cn56xx; } cvmx_npei_msi_w1s_enb3_t; /** * cvmx_npei_msi_wr_map * * NPEI_MSI_WR_MAP = NPEI MSI Write MAP * * Used to write the mapping function of the NPEI_PCIE_MSI_RCV to NPEI_MSI_RCV registers. */ typedef union { uint64_t u64; struct cvmx_npei_msi_wr_map_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t ciu_int : 8; /**< Selects which bit in the NPEI_MSI_RCV# (0-255) will be set when the value specified in the MSI_INT of this register is recevied during a write to the NPEI_PCIE_MSI_RCV register. */ uint64_t msi_int : 8; /**< Selects the value that would be received when the NPEI_PCIE_MSI_RCV register is written. */ #else uint64_t msi_int : 8; uint64_t ciu_int : 8; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_npei_msi_wr_map_s cn52xx; struct cvmx_npei_msi_wr_map_s cn52xxp1; struct cvmx_npei_msi_wr_map_s cn56xx; struct cvmx_npei_msi_wr_map_s cn56xxp1; } cvmx_npei_msi_wr_map_t; /** * cvmx_npei_pcie_credit_cnt * * NPEI_PCIE_CREDIT_CNT = NPEI PCIE Credit Count * * Contains the number of credits for the pcie port FIFOs used by the NPEI. This value needs to be set BEFORE PCIe traffic * flow from NPEI to PCIE Ports starts. A write to this register will cause the credit counts in the NPEI for the two * PCIE ports to be reset to the value in this register. */ typedef union { uint64_t u64; struct cvmx_npei_pcie_credit_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t p1_ccnt : 8; /**< Port1 C-TLP FIFO Credits. Legal values are 0x25 to 0x80. */ uint64_t p1_ncnt : 8; /**< Port1 N-TLP FIFO Credits. Legal values are 0x5 to 0x10. */ uint64_t p1_pcnt : 8; /**< Port1 P-TLP FIFO Credits. Legal values are 0x25 to 0x80. */ uint64_t p0_ccnt : 8; /**< Port0 C-TLP FIFO Credits. Legal values are 0x25 to 0x80. */ uint64_t p0_ncnt : 8; /**< Port0 N-TLP FIFO Credits. Legal values are 0x5 to 0x10. */ uint64_t p0_pcnt : 8; /**< Port0 P-TLP FIFO Credits. Legal values are 0x25 to 0x80. */ #else uint64_t p0_pcnt : 8; uint64_t p0_ncnt : 8; uint64_t p0_ccnt : 8; uint64_t p1_pcnt : 8; uint64_t p1_ncnt : 8; uint64_t p1_ccnt : 8; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_npei_pcie_credit_cnt_s cn52xx; struct cvmx_npei_pcie_credit_cnt_s cn56xx; } cvmx_npei_pcie_credit_cnt_t; /** * cvmx_npei_pcie_msi_rcv * * NPEI_PCIE_MSI_RCV = NPEI PCIe MSI Receive * * Register where MSI writes are directed from the PCIe. */ typedef union { uint64_t u64; struct cvmx_npei_pcie_msi_rcv_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t intr : 8; /**< A write to this register will result in a bit in one of the NPEI_MSI_RCV# registers being set. Which bit is set is dependent on the previously written using the NPEI_MSI_WR_MAP register or if not previously written the reset value of the MAP. */ #else uint64_t intr : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_npei_pcie_msi_rcv_s cn52xx; struct cvmx_npei_pcie_msi_rcv_s cn52xxp1; struct cvmx_npei_pcie_msi_rcv_s cn56xx; struct cvmx_npei_pcie_msi_rcv_s cn56xxp1; } cvmx_npei_pcie_msi_rcv_t; /** * cvmx_npei_pcie_msi_rcv_b1 * * NPEI_PCIE_MSI_RCV_B1 = NPEI PCIe MSI Receive Byte 1 * * Register where MSI writes are directed from the PCIe. */ typedef union { uint64_t u64; struct cvmx_npei_pcie_msi_rcv_b1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t intr : 8; /**< A write to this register will result in a bit in one of the NPEI_MSI_RCV# registers being set. Which bit is set is dependent on the previously written using the NPEI_MSI_WR_MAP register or if not previously written the reset value of the MAP. */ uint64_t reserved_0_7 : 8; #else uint64_t reserved_0_7 : 8; uint64_t intr : 8; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_npei_pcie_msi_rcv_b1_s cn52xx; struct cvmx_npei_pcie_msi_rcv_b1_s cn52xxp1; struct cvmx_npei_pcie_msi_rcv_b1_s cn56xx; struct cvmx_npei_pcie_msi_rcv_b1_s cn56xxp1; } cvmx_npei_pcie_msi_rcv_b1_t; /** * cvmx_npei_pcie_msi_rcv_b2 * * NPEI_PCIE_MSI_RCV_B2 = NPEI PCIe MSI Receive Byte 2 * * Register where MSI writes are directed from the PCIe. */ typedef union { uint64_t u64; struct cvmx_npei_pcie_msi_rcv_b2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t intr : 8; /**< A write to this register will result in a bit in one of the NPEI_MSI_RCV# registers being set. Which bit is set is dependent on the previously written using the NPEI_MSI_WR_MAP register or if not previously written the reset value of the MAP. */ uint64_t reserved_0_15 : 16; #else uint64_t reserved_0_15 : 16; uint64_t intr : 8; uint64_t reserved_24_63 : 40; #endif } s; struct cvmx_npei_pcie_msi_rcv_b2_s cn52xx; struct cvmx_npei_pcie_msi_rcv_b2_s cn52xxp1; struct cvmx_npei_pcie_msi_rcv_b2_s cn56xx; struct cvmx_npei_pcie_msi_rcv_b2_s cn56xxp1; } cvmx_npei_pcie_msi_rcv_b2_t; /** * cvmx_npei_pcie_msi_rcv_b3 * * NPEI_PCIE_MSI_RCV_B3 = NPEI PCIe MSI Receive Byte 3 * * Register where MSI writes are directed from the PCIe. */ typedef union { uint64_t u64; struct cvmx_npei_pcie_msi_rcv_b3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t intr : 8; /**< A write to this register will result in a bit in one of the NPEI_MSI_RCV# registers being set. Which bit is set is dependent on the previously written using the NPEI_MSI_WR_MAP register or if not previously written the reset value of the MAP. */ uint64_t reserved_0_23 : 24; #else uint64_t reserved_0_23 : 24; uint64_t intr : 8; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pcie_msi_rcv_b3_s cn52xx; struct cvmx_npei_pcie_msi_rcv_b3_s cn52xxp1; struct cvmx_npei_pcie_msi_rcv_b3_s cn56xx; struct cvmx_npei_pcie_msi_rcv_b3_s cn56xxp1; } cvmx_npei_pcie_msi_rcv_b3_t; /** * cvmx_npei_pkt#_cnts * * NPEI_PKT[0..31]_CNTS = NPEI Packet ring# Counts * * The counters for output rings. */ typedef union { uint64_t u64; struct cvmx_npei_pktx_cnts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_54_63 : 10; uint64_t timer : 22; /**< Timer incremented every 1024 core clocks when NPEI_PKTS#_CNTS[CNT] is non zero. Field cleared when NPEI_PKTS#_CNTS[CNT] goes to 0. Field is also cleared when NPEI_PKT_TIME_INT is cleared. The first increment of this count can occur between 0 to 1023 core clocks. */ uint64_t cnt : 32; /**< ring counter. This field is incremented as packets are sent out and decremented in response to writes to this field. When NPEI_PKT_OUT_BMODE is '0' a value of 1 is added to the register for each packet, when '1' and the info-pointer is NOT used the length of the packet plus 8 is added, when '1' and info-pointer mode IS used the packet length is added to this field. */ #else uint64_t cnt : 32; uint64_t timer : 22; uint64_t reserved_54_63 : 10; #endif } s; struct cvmx_npei_pktx_cnts_s cn52xx; struct cvmx_npei_pktx_cnts_s cn56xx; } cvmx_npei_pktx_cnts_t; /** * cvmx_npei_pkt#_in_bp * * NPEI_PKT[0..31]_IN_BP = NPEI Packet ring# Input Backpressure * * The counters and thresholds for input packets to apply backpressure to processing of the packets. */ typedef union { uint64_t u64; struct cvmx_npei_pktx_in_bp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t wmark : 32; /**< When CNT is greater than this threshold no more packets will be processed for this ring. When writing this field of the NPEI_PKT#_IN_BP register, use a 4-byte write so as to not write any other field of this register. */ uint64_t cnt : 32; /**< ring counter. This field is incremented by one whenever OCTEON receives, buffers, and creates a work queue entry for a packet that arrives by the cooresponding input ring. A write to this field will be subtracted from the field value. When writing this field of the NPEI_PKT#_IN_BP register, use a 4-byte write so as to not write any other field of this register. */ #else uint64_t cnt : 32; uint64_t wmark : 32; #endif } s; struct cvmx_npei_pktx_in_bp_s cn52xx; struct cvmx_npei_pktx_in_bp_s cn56xx; } cvmx_npei_pktx_in_bp_t; /** * cvmx_npei_pkt#_instr_baddr * * NPEI_PKT[0..31]_INSTR_BADDR = NPEI Packet ring# Instruction Base Address * * Start of Instruction for input packets. */ typedef union { uint64_t u64; struct cvmx_npei_pktx_instr_baddr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t addr : 61; /**< Base address for Instructions. */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t addr : 61; #endif } s; struct cvmx_npei_pktx_instr_baddr_s cn52xx; struct cvmx_npei_pktx_instr_baddr_s cn56xx; } cvmx_npei_pktx_instr_baddr_t; /** * cvmx_npei_pkt#_instr_baoff_dbell * * NPEI_PKT[0..31]_INSTR_BAOFF_DBELL = NPEI Packet ring# Instruction Base Address Offset and Doorbell * * The doorbell and base address offset for next read. */ typedef union { uint64_t u64; struct cvmx_npei_pktx_instr_baoff_dbell_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t aoff : 32; /**< The offset from the NPEI_PKT[0..31]_INSTR_BADDR where the next instruction will be read. */ uint64_t dbell : 32; /**< Instruction doorbell count. Writes to this field will increment the value here. Reads will return present value. A write of 0xffffffff will set the DBELL and AOFF fields to '0'. */ #else uint64_t dbell : 32; uint64_t aoff : 32; #endif } s; struct cvmx_npei_pktx_instr_baoff_dbell_s cn52xx; struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xx; } cvmx_npei_pktx_instr_baoff_dbell_t; /** * cvmx_npei_pkt#_instr_fifo_rsize * * NPEI_PKT[0..31]_INSTR_FIFO_RSIZE = NPEI Packet ring# Instruction FIFO and Ring Size. * * Fifo field and ring size for Instructions. */ typedef union { uint64_t u64; struct cvmx_npei_pktx_instr_fifo_rsize_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t max : 9; /**< Max Fifo Size. */ uint64_t rrp : 9; /**< Fifo read pointer. */ uint64_t wrp : 9; /**< Fifo write pointer. */ uint64_t fcnt : 5; /**< Fifo count. */ uint64_t rsize : 32; /**< Instruction ring size. */ #else uint64_t rsize : 32; uint64_t fcnt : 5; uint64_t wrp : 9; uint64_t rrp : 9; uint64_t max : 9; #endif } s; struct cvmx_npei_pktx_instr_fifo_rsize_s cn52xx; struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xx; } cvmx_npei_pktx_instr_fifo_rsize_t; /** * cvmx_npei_pkt#_instr_header * * NPEI_PKT[0..31]_INSTR_HEADER = NPEI Packet ring# Instruction Header. * * VAlues used to build input packet header. */ typedef union { uint64_t u64; struct cvmx_npei_pktx_instr_header_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_44_63 : 20; uint64_t pbp : 1; /**< Enable Packet-by-packet mode. */ uint64_t reserved_38_42 : 5; uint64_t rparmode : 2; /**< Parse Mode. Used when packet is raw and PBP==0. */ uint64_t reserved_35_35 : 1; uint64_t rskp_len : 7; /**< Skip Length. Used when packet is raw and PBP==0. */ uint64_t reserved_22_27 : 6; uint64_t use_ihdr : 1; /**< When set '1' the instruction header will be sent as part of the packet data, regardless of the value of bit [63] of the instruction header. USE_IHDR must be set whenever PBP is set. */ uint64_t reserved_16_20 : 5; uint64_t par_mode : 2; /**< Parse Mode. Used when USE_IHDR is set and packet is not raw and PBP is not set. */ uint64_t reserved_13_13 : 1; uint64_t skp_len : 7; /**< Skip Length. Used when USE_IHDR is set and packet is not raw and PBP is not set. */ uint64_t reserved_0_5 : 6; #else uint64_t reserved_0_5 : 6; uint64_t skp_len : 7; uint64_t reserved_13_13 : 1; uint64_t par_mode : 2; uint64_t reserved_16_20 : 5; uint64_t use_ihdr : 1; uint64_t reserved_22_27 : 6; uint64_t rskp_len : 7; uint64_t reserved_35_35 : 1; uint64_t rparmode : 2; uint64_t reserved_38_42 : 5; uint64_t pbp : 1; uint64_t reserved_44_63 : 20; #endif } s; struct cvmx_npei_pktx_instr_header_s cn52xx; struct cvmx_npei_pktx_instr_header_s cn56xx; } cvmx_npei_pktx_instr_header_t; /** * cvmx_npei_pkt#_slist_baddr * * NPEI_PKT[0..31]_SLIST_BADDR = NPEI Packet ring# Scatter List Base Address * * Start of Scatter List for output packet pointers - MUST be 16 byte alligned */ typedef union { uint64_t u64; struct cvmx_npei_pktx_slist_baddr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t addr : 60; /**< Base address for scatter list pointers. */ uint64_t reserved_0_3 : 4; #else uint64_t reserved_0_3 : 4; uint64_t addr : 60; #endif } s; struct cvmx_npei_pktx_slist_baddr_s cn52xx; struct cvmx_npei_pktx_slist_baddr_s cn56xx; } cvmx_npei_pktx_slist_baddr_t; /** * cvmx_npei_pkt#_slist_baoff_dbell * * NPEI_PKT[0..31]_SLIST_BAOFF_DBELL = NPEI Packet ring# Scatter List Base Address Offset and Doorbell * * The doorbell and base address offset for next read. */ typedef union { uint64_t u64; struct cvmx_npei_pktx_slist_baoff_dbell_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t aoff : 32; /**< The offset from the NPEI_PKT[0..31]_SLIST_BADDR where the next SList pointer will be read. A write of 0xFFFFFFFF to the DBELL field will clear DBELL and AOFF */ uint64_t dbell : 32; /**< Scatter list doorbell count. Writes to this field will increment the value here. Reads will return present value. The value of this field is decremented as read operations are ISSUED for scatter pointers. A write of 0xFFFFFFFF will clear DBELL and AOFF */ #else uint64_t dbell : 32; uint64_t aoff : 32; #endif } s; struct cvmx_npei_pktx_slist_baoff_dbell_s cn52xx; struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xx; } cvmx_npei_pktx_slist_baoff_dbell_t; /** * cvmx_npei_pkt#_slist_fifo_rsize * * NPEI_PKT[0..31]_SLIST_FIFO_RSIZE = NPEI Packet ring# Scatter List FIFO and Ring Size. * * The number of scatter pointer pairs in the scatter list. */ typedef union { uint64_t u64; struct cvmx_npei_pktx_slist_fifo_rsize_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t rsize : 32; /**< The number of scatter pointer pairs contained in the scatter list ring. */ #else uint64_t rsize : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pktx_slist_fifo_rsize_s cn52xx; struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xx; } cvmx_npei_pktx_slist_fifo_rsize_t; /** * cvmx_npei_pkt_cnt_int * * NPEI_PKT_CNT_INT = NPI Packet Counter Interrupt * * The packets rings that are interrupting because of Packet Counters. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_cnt_int_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t port : 32; /**< Bit vector cooresponding to ring number is set when NPEI_PKT#_CNTS[CNT] is greater than NPEI_PKT_INT_LEVELS[CNT]. */ #else uint64_t port : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_cnt_int_s cn52xx; struct cvmx_npei_pkt_cnt_int_s cn56xx; } cvmx_npei_pkt_cnt_int_t; /** * cvmx_npei_pkt_cnt_int_enb * * NPEI_PKT_CNT_INT_ENB = NPI Packet Counter Interrupt Enable * * Enable for the packets rings that are interrupting because of Packet Counters. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_cnt_int_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t port : 32; /**< Bit vector cooresponding to ring number when set allows NPEI_PKT_CNT_INT to generate an interrupt. */ #else uint64_t port : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_cnt_int_enb_s cn52xx; struct cvmx_npei_pkt_cnt_int_enb_s cn56xx; } cvmx_npei_pkt_cnt_int_enb_t; /** * cvmx_npei_pkt_data_out_es * * NPEI_PKT_DATA_OUT_ES = NPEI's Packet Data Out Endian Swap * * The Endian Swap for writing Data Out. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_data_out_es_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t es : 64; /**< The endian swap mode for Packet rings 0 through 31. Two bits are used per ring (i.e. ring 0 [1:0], ring 1 [3:2], ....). */ #else uint64_t es : 64; #endif } s; struct cvmx_npei_pkt_data_out_es_s cn52xx; struct cvmx_npei_pkt_data_out_es_s cn56xx; } cvmx_npei_pkt_data_out_es_t; /** * cvmx_npei_pkt_data_out_ns * * NPEI_PKT_DATA_OUT_NS = NPEI's Packet Data Out No Snoop * * The NS field for the TLP when writing packet data. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_data_out_ns_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t nsr : 32; /**< When asserted '1' the vector bit cooresponding to the Packet-ring will enable NS in TLP header. */ #else uint64_t nsr : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_data_out_ns_s cn52xx; struct cvmx_npei_pkt_data_out_ns_s cn56xx; } cvmx_npei_pkt_data_out_ns_t; /** * cvmx_npei_pkt_data_out_ror * * NPEI_PKT_DATA_OUT_ROR = NPEI's Packet Data Out Relaxed Ordering * * The ROR field for the TLP when writing Packet Data. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_data_out_ror_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ror : 32; /**< When asserted '1' the vector bit cooresponding to the Packet-ring will enable ROR in TLP header. */ #else uint64_t ror : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_data_out_ror_s cn52xx; struct cvmx_npei_pkt_data_out_ror_s cn56xx; } cvmx_npei_pkt_data_out_ror_t; /** * cvmx_npei_pkt_dpaddr * * NPEI_PKT_DPADDR = NPEI's Packet Data Pointer Addr * * Used to detemine address and attributes for packet data writes. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_dpaddr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t dptr : 32; /**< When asserted '1' the vector bit cooresponding to the Packet-ring will use: the address[63:60] to write packet data comes from the DPTR[63:60] in the scatter-list pair and the RO, NS, ES values come from the O0_ES, O0_NS, O0_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O0_ES[1:0], O0_NS, O0_RO. */ #else uint64_t dptr : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_dpaddr_s cn52xx; struct cvmx_npei_pkt_dpaddr_s cn56xx; } cvmx_npei_pkt_dpaddr_t; /** * cvmx_npei_pkt_in_bp * * NPEI_PKT_IN_BP = NPEI Packet Input Backpressure * * Which input rings have backpressure applied. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_in_bp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t bp : 32; /**< A packet input ring that has its count greater than its WMARK will have backpressure applied. Each of the 32 bits coorespond to an input ring. When '1' that ring has backpressure applied an will fetch no more instructions, but will process any previously fetched instructions. */ #else uint64_t bp : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_in_bp_s cn52xx; struct cvmx_npei_pkt_in_bp_s cn56xx; } cvmx_npei_pkt_in_bp_t; /** * cvmx_npei_pkt_in_done#_cnts * * NPEI_PKT_IN_DONE[0..31]_CNTS = NPEI Instruction Done ring# Counts * * Counters for instructions completed on Input rings. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_in_donex_cnts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< This field is incrmented by '1' when an instruction is completed. This field is incremented as the last of the data is read from the PCIe. */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_in_donex_cnts_s cn52xx; struct cvmx_npei_pkt_in_donex_cnts_s cn56xx; } cvmx_npei_pkt_in_donex_cnts_t; /** * cvmx_npei_pkt_in_instr_counts * * NPEI_PKT_IN_INSTR_COUNTS = NPEI Packet Input Instrutction Counts * * Keeps track of the number of instructions read into the FIFO and Packets sent to IPD. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_in_instr_counts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t wr_cnt : 32; /**< Shows the number of packets sent to the IPD. */ uint64_t rd_cnt : 32; /**< Shows the value of instructions that have had reads issued for them. to the Packet-ring is in reset. */ #else uint64_t rd_cnt : 32; uint64_t wr_cnt : 32; #endif } s; struct cvmx_npei_pkt_in_instr_counts_s cn52xx; struct cvmx_npei_pkt_in_instr_counts_s cn56xx; } cvmx_npei_pkt_in_instr_counts_t; /** * cvmx_npei_pkt_in_pcie_port * * NPEI_PKT_IN_PCIE_PORT = NPEI's Packet In To PCIe Port Assignment * * Assigns Packet Input rings to PCIe ports. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_in_pcie_port_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t pp : 64; /**< The PCIe port that the Packet ring number is assigned. Two bits are used per ring (i.e. ring 0 [1:0], ring 1 [3:2], ....). A value of '0 means that the Packetring is assign to PCIe Port 0, a '1' PCIe Port 1, '2' and '3' are reserved. */ #else uint64_t pp : 64; #endif } s; struct cvmx_npei_pkt_in_pcie_port_s cn52xx; struct cvmx_npei_pkt_in_pcie_port_s cn56xx; } cvmx_npei_pkt_in_pcie_port_t; /** * cvmx_npei_pkt_input_control * * NPEI_PKT_INPUT_CONTROL = NPEI's Packet Input Control * * Control for reads for gather list and instructions. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_input_control_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t pkt_rr : 1; /**< When set '1' the input packet selection will be made with a Round Robin arbitration. When '0' the input packet ring is fixed in priority, where the lower ring number has higher priority. */ uint64_t pbp_dhi : 13; /**< Field when in [PBP] is set to be used in calculating a DPTR. */ uint64_t d_nsr : 1; /**< Enables '1' NoSnoop for reading of gather data. */ uint64_t d_esr : 2; /**< The Endian-Swap-Mode for reading of gather data. */ uint64_t d_ror : 1; /**< Enables '1' Relaxed Ordering for reading of gather data. */ uint64_t use_csr : 1; /**< When set '1' the csr value will be used for ROR, ESR, and NSR. When clear '0' the value in DPTR will be used. In turn the bits not used for ROR, ESR, and NSR, will be used for bits [63:60] of the address used to fetch packet data. */ uint64_t nsr : 1; /**< Enables '1' NoSnoop for reading of gather list and gather instruction. */ uint64_t esr : 2; /**< The Endian-Swap-Mode for reading of gather list and gather instruction. */ uint64_t ror : 1; /**< Enables '1' Relaxed Ordering for reading of gather list and gather instruction. */ #else uint64_t ror : 1; uint64_t esr : 2; uint64_t nsr : 1; uint64_t use_csr : 1; uint64_t d_ror : 1; uint64_t d_esr : 2; uint64_t d_nsr : 1; uint64_t pbp_dhi : 13; uint64_t pkt_rr : 1; uint64_t reserved_23_63 : 41; #endif } s; struct cvmx_npei_pkt_input_control_s cn52xx; struct cvmx_npei_pkt_input_control_s cn56xx; } cvmx_npei_pkt_input_control_t; /** * cvmx_npei_pkt_instr_enb * * NPEI_PKT_INSTR_ENB = NPEI's Packet Instruction Enable * * Enables the instruction fetch for a Packet-ring. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_instr_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t enb : 32; /**< When asserted '1' the vector bit cooresponding to the Packet-ring is enabled. */ #else uint64_t enb : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_instr_enb_s cn52xx; struct cvmx_npei_pkt_instr_enb_s cn56xx; } cvmx_npei_pkt_instr_enb_t; /** * cvmx_npei_pkt_instr_rd_size * * NPEI_PKT_INSTR_RD_SIZE = NPEI Instruction Read Size * * The number of instruction allowed to be read at one time. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_instr_rd_size_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t rdsize : 64; /**< Number of instructions to be read in one PCIe read request for the 4 PKOport - 8 rings. Every two bits (i.e. 1:0, 3:2, 5:4..) are assign to the port/ring combinations. - 15:0 PKOPort0,Ring 7..0 31:16 PKOPort1,Ring 7..0 - 47:32 PKOPort2,Ring 7..0 63:48 PKOPort3,Ring 7..0 Two bit value are: 0 - 1 Instruction 1 - 2 Instructions 2 - 3 Instructions 3 - 4 Instructions */ #else uint64_t rdsize : 64; #endif } s; struct cvmx_npei_pkt_instr_rd_size_s cn52xx; struct cvmx_npei_pkt_instr_rd_size_s cn56xx; } cvmx_npei_pkt_instr_rd_size_t; /** * cvmx_npei_pkt_instr_size * * NPEI_PKT_INSTR_SIZE = NPEI's Packet Instruction Size * * Determines if instructions are 64 or 32 byte in size for a Packet-ring. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_instr_size_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t is_64b : 32; /**< When asserted '1' the vector bit cooresponding to the Packet-ring is a 64-byte instruction. */ #else uint64_t is_64b : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_instr_size_s cn52xx; struct cvmx_npei_pkt_instr_size_s cn56xx; } cvmx_npei_pkt_instr_size_t; /** * cvmx_npei_pkt_int_levels * * 0x90F0 reserved NPEI_PKT_PCIE_PORT2 * * * NPEI_PKT_INT_LEVELS = NPEI's Packet Interrupt Levels * * Output packet interrupt levels. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_int_levels_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_54_63 : 10; uint64_t time : 22; /**< When NPEI_PKT#_CNTS[TIME] is equal to this value an interrupt is generated. */ uint64_t cnt : 32; /**< When NPEI_PKT#_CNTS[CNT] becomes greater than this value an interrupt is generated. */ #else uint64_t cnt : 32; uint64_t time : 22; uint64_t reserved_54_63 : 10; #endif } s; struct cvmx_npei_pkt_int_levels_s cn52xx; struct cvmx_npei_pkt_int_levels_s cn56xx; } cvmx_npei_pkt_int_levels_t; /** * cvmx_npei_pkt_iptr * * NPEI_PKT_IPTR = NPEI's Packet Info Poitner * * Controls using the Info-Pointer to store length and data. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_iptr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t iptr : 32; /**< When asserted '1' the vector bit cooresponding to the Packet-ring will use the Info-Pointer to store length and data. */ #else uint64_t iptr : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_iptr_s cn52xx; struct cvmx_npei_pkt_iptr_s cn56xx; } cvmx_npei_pkt_iptr_t; /** * cvmx_npei_pkt_out_bmode * * NPEI_PKT_OUT_BMODE = NPEI's Packet Out Byte Mode * * Control the updating of the NPEI_PKT#_CNT register. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_out_bmode_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t bmode : 32; /**< When asserted '1' the vector bit cooresponding to the Packet-ring will have its NPEI_PKT#_CNT register updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ #else uint64_t bmode : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_out_bmode_s cn52xx; struct cvmx_npei_pkt_out_bmode_s cn56xx; } cvmx_npei_pkt_out_bmode_t; /** * cvmx_npei_pkt_out_enb * * NPEI_PKT_OUT_ENB = NPEI's Packet Output Enable * * Enables the output packet engines. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_out_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t enb : 32; /**< When asserted '1' the vector bit cooresponding to the Packet-ring is enabled. If an error occurs on reading pointers for an output ring, the ring will be disabled by clearing the bit associated with the ring to '0'. */ #else uint64_t enb : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_out_enb_s cn52xx; struct cvmx_npei_pkt_out_enb_s cn56xx; } cvmx_npei_pkt_out_enb_t; /** * cvmx_npei_pkt_output_wmark * * NPEI_PKT_OUTPUT_WMARK = NPEI's Packet Output Water Mark * * Value that when the NPEI_PKT#_SLIST_BAOFF_DBELL[DBELL] value is less then that backpressure for the rings will be applied. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_output_wmark_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t wmark : 32; /**< Value when DBELL count drops below backpressure for the ring will be applied to the PKO. */ #else uint64_t wmark : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_output_wmark_s cn52xx; struct cvmx_npei_pkt_output_wmark_s cn56xx; } cvmx_npei_pkt_output_wmark_t; /** * cvmx_npei_pkt_pcie_port * * NPEI_PKT_PCIE_PORT = NPEI's Packet To PCIe Port Assignment * * Assigns Packet Ports to PCIe ports. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_pcie_port_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t pp : 64; /**< The PCIe port that the Packet ring number is assigned. Two bits are used per ring (i.e. ring 0 [1:0], ring 1 [3:2], ....). A value of '0 means that the Packetring is assign to PCIe Port 0, a '1' PCIe Port 1, '2' and '3' are reserved. */ #else uint64_t pp : 64; #endif } s; struct cvmx_npei_pkt_pcie_port_s cn52xx; struct cvmx_npei_pkt_pcie_port_s cn56xx; } cvmx_npei_pkt_pcie_port_t; /** * cvmx_npei_pkt_port_in_rst * * NPEI_PKT_PORT_IN_RST = NPEI Packet Port In Reset * * Vector bits related to ring-port for ones that are reset. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_port_in_rst_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t in_rst : 32; /**< When asserted '1' the vector bit cooresponding to the inbound Packet-ring is in reset. */ uint64_t out_rst : 32; /**< When asserted '1' the vector bit cooresponding to the outbound Packet-ring is in reset. */ #else uint64_t out_rst : 32; uint64_t in_rst : 32; #endif } s; struct cvmx_npei_pkt_port_in_rst_s cn52xx; struct cvmx_npei_pkt_port_in_rst_s cn56xx; } cvmx_npei_pkt_port_in_rst_t; /** * cvmx_npei_pkt_slist_es * * NPEI_PKT_SLIST_ES = NPEI's Packet Scatter List Endian Swap * * The Endian Swap for Scatter List Read. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_slist_es_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t es : 64; /**< The endian swap mode for Packet rings 0 through 31. Two bits are used per ring (i.e. ring 0 [1:0], ring 1 [3:2], ....). */ #else uint64_t es : 64; #endif } s; struct cvmx_npei_pkt_slist_es_s cn52xx; struct cvmx_npei_pkt_slist_es_s cn56xx; } cvmx_npei_pkt_slist_es_t; /** * cvmx_npei_pkt_slist_id_size * * NPEI_PKT_SLIST_ID_SIZE = NPEI Packet Scatter List Info and Data Size * * The Size of the information and data fields pointed to by Scatter List pointers. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_slist_id_size_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t isize : 7; /**< Information size. Legal sizes are 0 to 120. */ uint64_t bsize : 16; /**< Data size. */ #else uint64_t bsize : 16; uint64_t isize : 7; uint64_t reserved_23_63 : 41; #endif } s; struct cvmx_npei_pkt_slist_id_size_s cn52xx; struct cvmx_npei_pkt_slist_id_size_s cn56xx; } cvmx_npei_pkt_slist_id_size_t; /** * cvmx_npei_pkt_slist_ns * * NPEI_PKT_SLIST_NS = NPEI's Packet Scatter List No Snoop * * The NS field for the TLP when fetching Scatter List. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_slist_ns_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t nsr : 32; /**< When asserted '1' the vector bit cooresponding to the Packet-ring will enable NS in TLP header. */ #else uint64_t nsr : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_slist_ns_s cn52xx; struct cvmx_npei_pkt_slist_ns_s cn56xx; } cvmx_npei_pkt_slist_ns_t; /** * cvmx_npei_pkt_slist_ror * * NPEI_PKT_SLIST_ROR = NPEI's Packet Scatter List Relaxed Ordering * * The ROR field for the TLP when fetching Scatter List. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_slist_ror_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ror : 32; /**< When asserted '1' the vector bit cooresponding to the Packet-ring will enable ROR in TLP header. */ #else uint64_t ror : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_slist_ror_s cn52xx; struct cvmx_npei_pkt_slist_ror_s cn56xx; } cvmx_npei_pkt_slist_ror_t; /** * cvmx_npei_pkt_time_int * * NPEI_PKT_TIME_INT = NPEI Packet Timer Interrupt * * The packets rings that are interrupting because of Packet Timers. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_time_int_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t port : 32; /**< Bit vector cooresponding to ring number is set when NPEI_PKT#_CNTS[TIMER] is greater than NPEI_PKT_INT_LEVELS[TIME]. */ #else uint64_t port : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_time_int_s cn52xx; struct cvmx_npei_pkt_time_int_s cn56xx; } cvmx_npei_pkt_time_int_t; /** * cvmx_npei_pkt_time_int_enb * * NPEI_PKT_TIME_INT_ENB = NPEI Packet Timer Interrupt Enable * * The packets rings that are interrupting because of Packet Timers. */ typedef union { uint64_t u64; struct cvmx_npei_pkt_time_int_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t port : 32; /**< Bit vector cooresponding to ring number when set allows NPEI_PKT_TIME_INT to generate an interrupt. */ #else uint64_t port : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_pkt_time_int_enb_s cn52xx; struct cvmx_npei_pkt_time_int_enb_s cn56xx; } cvmx_npei_pkt_time_int_enb_t; /** * cvmx_npei_rsl_int_blocks * * NPEI_RSL_INT_BLOCKS = NPEI RSL Interrupt Blocks Register * * Reading this register will return a vector with a bit set '1' for a corresponding RSL block * that presently has an interrupt pending. The Field Description below supplies the name of the * register that software should read to find out why that intterupt bit is set. */ typedef union { uint64_t u64; struct cvmx_npei_rsl_int_blocks_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t iob : 1; /**< IOB_INT_SUM */ uint64_t lmc1 : 1; /**< LMC1_MEM_CFG0 */ uint64_t agl : 1; /**< AGL_GMX_RX0_INT_REG & AGL_GMX_TX_INT_REG */ uint64_t reserved_24_27 : 4; uint64_t asxpcs1 : 1; /**< PCS1_INT*_REG */ uint64_t asxpcs0 : 1; /**< PCS0_INT*_REG */ uint64_t reserved_21_21 : 1; uint64_t pip : 1; /**< PIP_INT_REG. */ uint64_t spx1 : 1; /**< Always reads as zero */ uint64_t spx0 : 1; /**< Always reads as zero */ uint64_t lmc0 : 1; /**< LMC0_MEM_CFG0 */ uint64_t l2c : 1; /**< L2C_INT_STAT */ uint64_t usb1 : 1; /**< Always reads as zero */ uint64_t rad : 1; /**< RAD_REG_ERROR */ uint64_t usb : 1; /**< USBN0_INT_SUM */ uint64_t pow : 1; /**< POW_ECC_ERR */ uint64_t tim : 1; /**< TIM_REG_ERROR */ uint64_t pko : 1; /**< PKO_REG_ERROR */ uint64_t ipd : 1; /**< IPD_INT_SUM */ uint64_t reserved_8_8 : 1; uint64_t zip : 1; /**< ZIP_ERROR */ uint64_t dfa : 1; /**< Always reads as zero */ uint64_t fpa : 1; /**< FPA_INT_SUM */ uint64_t key : 1; /**< KEY_INT_SUM */ uint64_t npei : 1; /**< NPEI_INT_SUM */ uint64_t gmx1 : 1; /**< GMX1_RX*_INT_REG & GMX1_TX_INT_REG */ uint64_t gmx0 : 1; /**< GMX0_RX*_INT_REG & GMX0_TX_INT_REG */ uint64_t mio : 1; /**< MIO_BOOT_ERR */ #else uint64_t mio : 1; uint64_t gmx0 : 1; uint64_t gmx1 : 1; uint64_t npei : 1; uint64_t key : 1; uint64_t fpa : 1; uint64_t dfa : 1; uint64_t zip : 1; uint64_t reserved_8_8 : 1; uint64_t ipd : 1; uint64_t pko : 1; uint64_t tim : 1; uint64_t pow : 1; uint64_t usb : 1; uint64_t rad : 1; uint64_t usb1 : 1; uint64_t l2c : 1; uint64_t lmc0 : 1; uint64_t spx0 : 1; uint64_t spx1 : 1; uint64_t pip : 1; uint64_t reserved_21_21 : 1; uint64_t asxpcs0 : 1; uint64_t asxpcs1 : 1; uint64_t reserved_24_27 : 4; uint64_t agl : 1; uint64_t lmc1 : 1; uint64_t iob : 1; uint64_t reserved_31_63 : 33; #endif } s; struct cvmx_npei_rsl_int_blocks_s cn52xx; struct cvmx_npei_rsl_int_blocks_s cn52xxp1; struct cvmx_npei_rsl_int_blocks_s cn56xx; struct cvmx_npei_rsl_int_blocks_s cn56xxp1; } cvmx_npei_rsl_int_blocks_t; /** * cvmx_npei_scratch_1 * * NPEI_SCRATCH_1 = NPEI's Scratch 1 * * A general purpose 64 bit register for SW use. */ typedef union { uint64_t u64; struct cvmx_npei_scratch_1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< The value in this register is totaly SW dependent. */ #else uint64_t data : 64; #endif } s; struct cvmx_npei_scratch_1_s cn52xx; struct cvmx_npei_scratch_1_s cn52xxp1; struct cvmx_npei_scratch_1_s cn56xx; struct cvmx_npei_scratch_1_s cn56xxp1; } cvmx_npei_scratch_1_t; /** * cvmx_npei_state1 * * NPEI_STATE1 = NPEI State 1 * * State machines in NPEI. For debug. */ typedef union { uint64_t u64; struct cvmx_npei_state1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t cpl1 : 12; /**< CPL1 State */ uint64_t cpl0 : 12; /**< CPL0 State */ uint64_t arb : 1; /**< ARB State */ uint64_t csr : 39; /**< CSR State */ #else uint64_t csr : 39; uint64_t arb : 1; uint64_t cpl0 : 12; uint64_t cpl1 : 12; #endif } s; struct cvmx_npei_state1_s cn52xx; struct cvmx_npei_state1_s cn52xxp1; struct cvmx_npei_state1_s cn56xx; struct cvmx_npei_state1_s cn56xxp1; } cvmx_npei_state1_t; /** * cvmx_npei_state2 * * NPEI_STATE2 = NPEI State 2 * * State machines in NPEI. For debug. */ typedef union { uint64_t u64; struct cvmx_npei_state2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t npei : 1; /**< NPEI State */ uint64_t rac : 1; /**< RAC State */ uint64_t csm1 : 15; /**< CSM1 State */ uint64_t csm0 : 15; /**< CSM0 State */ uint64_t nnp0 : 8; /**< NNP0 State */ uint64_t nnd : 8; /**< NND State */ #else uint64_t nnd : 8; uint64_t nnp0 : 8; uint64_t csm0 : 15; uint64_t csm1 : 15; uint64_t rac : 1; uint64_t npei : 1; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_npei_state2_s cn52xx; struct cvmx_npei_state2_s cn52xxp1; struct cvmx_npei_state2_s cn56xx; struct cvmx_npei_state2_s cn56xxp1; } cvmx_npei_state2_t; /** * cvmx_npei_state3 * * NPEI_STATE3 = NPEI State 3 * * State machines in NPEI. For debug. */ typedef union { uint64_t u64; struct cvmx_npei_state3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_56_63 : 8; uint64_t psm1 : 15; /**< PSM1 State */ uint64_t psm0 : 15; /**< PSM0 State */ uint64_t nsm1 : 13; /**< NSM1 State */ uint64_t nsm0 : 13; /**< NSM0 State */ #else uint64_t nsm0 : 13; uint64_t nsm1 : 13; uint64_t psm0 : 15; uint64_t psm1 : 15; uint64_t reserved_56_63 : 8; #endif } s; struct cvmx_npei_state3_s cn52xx; struct cvmx_npei_state3_s cn52xxp1; struct cvmx_npei_state3_s cn56xx; struct cvmx_npei_state3_s cn56xxp1; } cvmx_npei_state3_t; /** * cvmx_npei_win_rd_addr * * NPEI_WIN_RD_ADDR = NPEI Window Read Address Register * * The address to be read when the NPEI_WIN_RD_DATA register is read. */ typedef union { uint64_t u64; struct cvmx_npei_win_rd_addr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_51_63 : 13; uint64_t ld_cmd : 2; /**< The load command sent wit hthe read. 0x0 == Load 8-bytes, 0x1 == Load 4-bytes, 0x2 == Load 2-bytes, 0x3 == Load 1-bytes, */ uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always read as '0'. */ uint64_t rd_addr : 48; /**< The address to be read from. Whenever the LSB of this register is written, the Read Operation will take place. [47:40] = NCB_ID [39:0] = Address When [47:43] == NPI & [42:0] == 0 bits [39:0] are: [39:32] == x, Not Used [31:27] == RSL_ID [12:0] == RSL Register Offset */ #else uint64_t rd_addr : 48; uint64_t iobit : 1; uint64_t ld_cmd : 2; uint64_t reserved_51_63 : 13; #endif } s; struct cvmx_npei_win_rd_addr_s cn52xx; struct cvmx_npei_win_rd_addr_s cn52xxp1; struct cvmx_npei_win_rd_addr_s cn56xx; struct cvmx_npei_win_rd_addr_s cn56xxp1; } cvmx_npei_win_rd_addr_t; /** * cvmx_npei_win_rd_data * * NPEI_WIN_RD_DATA = NPEI Window Read Data Register * * Reading this register causes a window read operation to take place. Address read is taht contained in the NPEI_WIN_RD_ADDR * register. */ typedef union { uint64_t u64; struct cvmx_npei_win_rd_data_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t rd_data : 64; /**< The read data. */ #else uint64_t rd_data : 64; #endif } s; struct cvmx_npei_win_rd_data_s cn52xx; struct cvmx_npei_win_rd_data_s cn52xxp1; struct cvmx_npei_win_rd_data_s cn56xx; struct cvmx_npei_win_rd_data_s cn56xxp1; } cvmx_npei_win_rd_data_t; /** * cvmx_npei_win_wr_addr * * NPEI_WIN_WR_ADDR = NPEI Window Write Address Register * * Contains the address to be writen to when a write operation is started by writing the * NPEI_WIN_WR_DATA register (see below). */ typedef union { uint64_t u64; struct cvmx_npei_win_wr_addr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always read as '0'. */ uint64_t wr_addr : 46; /**< The address that will be written to when the NPEI_WIN_WR_DATA register is written. [47:40] = NCB_ID [39:3] = Address When [47:43] == NPI & [42:0] == 0 bits [39:0] are: [39:32] == x, Not Used [31:27] == RSL_ID [12:2] == RSL Register Offset [1:0] == x, Not Used */ uint64_t reserved_0_1 : 2; #else uint64_t reserved_0_1 : 2; uint64_t wr_addr : 46; uint64_t iobit : 1; uint64_t reserved_49_63 : 15; #endif } s; struct cvmx_npei_win_wr_addr_s cn52xx; struct cvmx_npei_win_wr_addr_s cn52xxp1; struct cvmx_npei_win_wr_addr_s cn56xx; struct cvmx_npei_win_wr_addr_s cn56xxp1; } cvmx_npei_win_wr_addr_t; /** * cvmx_npei_win_wr_data * * NPEI_WIN_WR_DATA = NPEI Window Write Data Register * * Contains the data to write to the address located in the NPEI_WIN_WR_ADDR Register. * Writing the least-significant-byte of this register will cause a write operation to take place. */ typedef union { uint64_t u64; struct cvmx_npei_win_wr_data_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t wr_data : 64; /**< The data to be written. Whenever the LSB of this register is written, the Window Write will take place. */ #else uint64_t wr_data : 64; #endif } s; struct cvmx_npei_win_wr_data_s cn52xx; struct cvmx_npei_win_wr_data_s cn52xxp1; struct cvmx_npei_win_wr_data_s cn56xx; struct cvmx_npei_win_wr_data_s cn56xxp1; } cvmx_npei_win_wr_data_t; /** * cvmx_npei_win_wr_mask * * NPEI_WIN_WR_MASK = NPEI Window Write Mask Register * * Contains the mask for the data in the NPEI_WIN_WR_DATA Register. */ typedef union { uint64_t u64; struct cvmx_npei_win_wr_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t wr_mask : 8; /**< The data to be written. When a bit is '0' the corresponding byte will be written. */ #else uint64_t wr_mask : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_npei_win_wr_mask_s cn52xx; struct cvmx_npei_win_wr_mask_s cn52xxp1; struct cvmx_npei_win_wr_mask_s cn56xx; struct cvmx_npei_win_wr_mask_s cn56xxp1; } cvmx_npei_win_wr_mask_t; /** * cvmx_npei_window_ctl * * NPEI_WINDOW_CTL = NPEI's Window Control * * The name of this register is misleading. The timeout value is used for BAR0 access from PCIE0 and PCIE1. * Any access to the regigisters on the RML will timeout as 0xFFFF clock cycle. At time of timeout the next * RML access will start, and interrupt will be set, and in the case of reads no data will be returned. * * The value of this register should be set to a minimum of 0x200000 to ensure that a timeout to an RML register * occurs on the RML 0xFFFF timer before the timeout for a BAR0 access from the PCIE#. */ typedef union { uint64_t u64; struct cvmx_npei_window_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t time : 32; /**< Time to wait in core clocks to wait for a BAR0 access to completeon the NCB before timing out. A value of 0 will cause no timeouts. A minimum value of 0x200000 should be used when this register is not set to 0x0. */ #else uint64_t time : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npei_window_ctl_s cn52xx; struct cvmx_npei_window_ctl_s cn52xxp1; struct cvmx_npei_window_ctl_s cn56xx; struct cvmx_npei_window_ctl_s cn56xxp1; } cvmx_npei_window_ctl_t; /** * cvmx_npi_base_addr_input# * * NPI_BASE_ADDR_INPUT0 = NPI's Base Address Input 0 Register * * The address to start reading Instructions from for Input-0. */ typedef union { uint64_t u64; struct cvmx_npi_base_addr_inputx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t baddr : 61; /**< The address to read Instruction from for output 0. This address is 8-byte aligned, for this reason address bits [2:0] will always be zero. */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t baddr : 61; #endif } s; struct cvmx_npi_base_addr_inputx_s cn30xx; struct cvmx_npi_base_addr_inputx_s cn31xx; struct cvmx_npi_base_addr_inputx_s cn38xx; struct cvmx_npi_base_addr_inputx_s cn38xxp2; struct cvmx_npi_base_addr_inputx_s cn50xx; struct cvmx_npi_base_addr_inputx_s cn58xx; struct cvmx_npi_base_addr_inputx_s cn58xxp1; } cvmx_npi_base_addr_inputx_t; /** * cvmx_npi_base_addr_output# * * NPI_BASE_ADDR_OUTPUT0 = NPI's Base Address Output 0 Register * * The address to start reading Instructions from for Output-0. */ typedef union { uint64_t u64; struct cvmx_npi_base_addr_outputx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t baddr : 61; /**< The address to read Instruction from for output 0. This address is 8-byte aligned, for this reason address bits [2:0] will always be zero. */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t baddr : 61; #endif } s; struct cvmx_npi_base_addr_outputx_s cn30xx; struct cvmx_npi_base_addr_outputx_s cn31xx; struct cvmx_npi_base_addr_outputx_s cn38xx; struct cvmx_npi_base_addr_outputx_s cn38xxp2; struct cvmx_npi_base_addr_outputx_s cn50xx; struct cvmx_npi_base_addr_outputx_s cn58xx; struct cvmx_npi_base_addr_outputx_s cn58xxp1; } cvmx_npi_base_addr_outputx_t; /** * cvmx_npi_bist_status * * NPI_BIST_STATUS = NPI's BIST Status Register * * Results from BIST runs of NPI's memories. */ typedef union { uint64_t u64; struct cvmx_npi_bist_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t csr_bs : 1; /**< BIST Status for the csr_fifo */ uint64_t dif_bs : 1; /**< BIST Status for the dif_fifo */ uint64_t rdp_bs : 1; /**< BIST Status for the rdp_fifo */ uint64_t pcnc_bs : 1; /**< BIST Status for the pcn_cnt_fifo */ uint64_t pcn_bs : 1; /**< BIST Status for the pcn_fifo */ uint64_t rdn_bs : 1; /**< BIST Status for the rdn_fifo */ uint64_t pcac_bs : 1; /**< BIST Status for the pca_cmd_fifo */ uint64_t pcad_bs : 1; /**< BIST Status for the pca_data_fifo */ uint64_t rdnl_bs : 1; /**< BIST Status for the rdn_length_fifo */ uint64_t pgf_bs : 1; /**< BIST Status for the pgf_fifo */ uint64_t pig_bs : 1; /**< BIST Status for the pig_fifo */ uint64_t pof0_bs : 1; /**< BIST Status for the pof0_fifo */ uint64_t pof1_bs : 1; /**< BIST Status for the pof1_fifo */ uint64_t pof2_bs : 1; /**< BIST Status for the pof2_fifo */ uint64_t pof3_bs : 1; /**< BIST Status for the pof3_fifo */ uint64_t pos_bs : 1; /**< BIST Status for the pos_fifo */ uint64_t nus_bs : 1; /**< BIST Status for the nus_fifo */ uint64_t dob_bs : 1; /**< BIST Status for the dob_fifo */ uint64_t pdf_bs : 1; /**< BIST Status for the pdf_fifo */ uint64_t dpi_bs : 1; /**< BIST Status for the dpi_fifo */ #else uint64_t dpi_bs : 1; uint64_t pdf_bs : 1; uint64_t dob_bs : 1; uint64_t nus_bs : 1; uint64_t pos_bs : 1; uint64_t pof3_bs : 1; uint64_t pof2_bs : 1; uint64_t pof1_bs : 1; uint64_t pof0_bs : 1; uint64_t pig_bs : 1; uint64_t pgf_bs : 1; uint64_t rdnl_bs : 1; uint64_t pcad_bs : 1; uint64_t pcac_bs : 1; uint64_t rdn_bs : 1; uint64_t pcn_bs : 1; uint64_t pcnc_bs : 1; uint64_t rdp_bs : 1; uint64_t dif_bs : 1; uint64_t csr_bs : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_npi_bist_status_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t csr_bs : 1; /**< BIST Status for the csr_fifo */ uint64_t dif_bs : 1; /**< BIST Status for the dif_fifo */ uint64_t rdp_bs : 1; /**< BIST Status for the rdp_fifo */ uint64_t pcnc_bs : 1; /**< BIST Status for the pcn_cnt_fifo */ uint64_t pcn_bs : 1; /**< BIST Status for the pcn_fifo */ uint64_t rdn_bs : 1; /**< BIST Status for the rdn_fifo */ uint64_t pcac_bs : 1; /**< BIST Status for the pca_cmd_fifo */ uint64_t pcad_bs : 1; /**< BIST Status for the pca_data_fifo */ uint64_t rdnl_bs : 1; /**< BIST Status for the rdn_length_fifo */ uint64_t pgf_bs : 1; /**< BIST Status for the pgf_fifo */ uint64_t pig_bs : 1; /**< BIST Status for the pig_fifo */ uint64_t pof0_bs : 1; /**< BIST Status for the pof0_fifo */ uint64_t reserved_5_7 : 3; uint64_t pos_bs : 1; /**< BIST Status for the pos_fifo */ uint64_t nus_bs : 1; /**< BIST Status for the nus_fifo */ uint64_t dob_bs : 1; /**< BIST Status for the dob_fifo */ uint64_t pdf_bs : 1; /**< BIST Status for the pdf_fifo */ uint64_t dpi_bs : 1; /**< BIST Status for the dpi_fifo */ #else uint64_t dpi_bs : 1; uint64_t pdf_bs : 1; uint64_t dob_bs : 1; uint64_t nus_bs : 1; uint64_t pos_bs : 1; uint64_t reserved_5_7 : 3; uint64_t pof0_bs : 1; uint64_t pig_bs : 1; uint64_t pgf_bs : 1; uint64_t rdnl_bs : 1; uint64_t pcad_bs : 1; uint64_t pcac_bs : 1; uint64_t rdn_bs : 1; uint64_t pcn_bs : 1; uint64_t pcnc_bs : 1; uint64_t rdp_bs : 1; uint64_t dif_bs : 1; uint64_t csr_bs : 1; uint64_t reserved_20_63 : 44; #endif } cn30xx; struct cvmx_npi_bist_status_s cn31xx; struct cvmx_npi_bist_status_s cn38xx; struct cvmx_npi_bist_status_s cn38xxp2; struct cvmx_npi_bist_status_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t csr_bs : 1; /**< BIST Status for the csr_fifo */ uint64_t dif_bs : 1; /**< BIST Status for the dif_fifo */ uint64_t rdp_bs : 1; /**< BIST Status for the rdp_fifo */ uint64_t pcnc_bs : 1; /**< BIST Status for the pcn_cnt_fifo */ uint64_t pcn_bs : 1; /**< BIST Status for the pcn_fifo */ uint64_t rdn_bs : 1; /**< BIST Status for the rdn_fifo */ uint64_t pcac_bs : 1; /**< BIST Status for the pca_cmd_fifo */ uint64_t pcad_bs : 1; /**< BIST Status for the pca_data_fifo */ uint64_t rdnl_bs : 1; /**< BIST Status for the rdn_length_fifo */ uint64_t pgf_bs : 1; /**< BIST Status for the pgf_fifo */ uint64_t pig_bs : 1; /**< BIST Status for the pig_fifo */ uint64_t pof0_bs : 1; /**< BIST Status for the pof0_fifo */ uint64_t pof1_bs : 1; /**< BIST Status for the pof1_fifo */ uint64_t reserved_5_6 : 2; uint64_t pos_bs : 1; /**< BIST Status for the pos_fifo */ uint64_t nus_bs : 1; /**< BIST Status for the nus_fifo */ uint64_t dob_bs : 1; /**< BIST Status for the dob_fifo */ uint64_t pdf_bs : 1; /**< BIST Status for the pdf_fifo */ uint64_t dpi_bs : 1; /**< BIST Status for the dpi_fifo */ #else uint64_t dpi_bs : 1; uint64_t pdf_bs : 1; uint64_t dob_bs : 1; uint64_t nus_bs : 1; uint64_t pos_bs : 1; uint64_t reserved_5_6 : 2; uint64_t pof1_bs : 1; uint64_t pof0_bs : 1; uint64_t pig_bs : 1; uint64_t pgf_bs : 1; uint64_t rdnl_bs : 1; uint64_t pcad_bs : 1; uint64_t pcac_bs : 1; uint64_t rdn_bs : 1; uint64_t pcn_bs : 1; uint64_t pcnc_bs : 1; uint64_t rdp_bs : 1; uint64_t dif_bs : 1; uint64_t csr_bs : 1; uint64_t reserved_20_63 : 44; #endif } cn50xx; struct cvmx_npi_bist_status_s cn58xx; struct cvmx_npi_bist_status_s cn58xxp1; } cvmx_npi_bist_status_t; /** * cvmx_npi_buff_size_output# * * NPI_BUFF_SIZE_OUTPUT0 = NPI's D/I Buffer Sizes For Output 0 * * The size in bytes of the Data Bufffer and Information Buffer for output 0. */ typedef union { uint64_t u64; struct cvmx_npi_buff_size_outputx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t isize : 7; /**< The number of bytes to move to the Info-Pointer from the front of the packet. Legal values are 0-120. */ uint64_t bsize : 16; /**< The size in bytes of the area pointed to by buffer pointer for output packet data. */ #else uint64_t bsize : 16; uint64_t isize : 7; uint64_t reserved_23_63 : 41; #endif } s; struct cvmx_npi_buff_size_outputx_s cn30xx; struct cvmx_npi_buff_size_outputx_s cn31xx; struct cvmx_npi_buff_size_outputx_s cn38xx; struct cvmx_npi_buff_size_outputx_s cn38xxp2; struct cvmx_npi_buff_size_outputx_s cn50xx; struct cvmx_npi_buff_size_outputx_s cn58xx; struct cvmx_npi_buff_size_outputx_s cn58xxp1; } cvmx_npi_buff_size_outputx_t; /** * cvmx_npi_comp_ctl * * NPI_COMP_CTL = PCI Compensation Control * * PCI Compensation Control */ typedef union { uint64_t u64; struct cvmx_npi_comp_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t pctl : 5; /**< Bypass value for PCTL */ uint64_t nctl : 5; /**< Bypass value for NCTL */ #else uint64_t nctl : 5; uint64_t pctl : 5; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_npi_comp_ctl_s cn50xx; struct cvmx_npi_comp_ctl_s cn58xx; struct cvmx_npi_comp_ctl_s cn58xxp1; } cvmx_npi_comp_ctl_t; /** * cvmx_npi_ctl_status * * NPI_CTL_STATUS = NPI's Control Status Register * * Contains control ans status for NPI. * Writes to this register are not ordered with writes/reads to the PCI Memory space. * To ensure that a write has completed the user must read the register before * making an access(i.e. PCI memory space) that requires the value of this register to be updated. */ typedef union { uint64_t u64; struct cvmx_npi_ctl_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_63_63 : 1; uint64_t chip_rev : 8; /**< The revision of the N3. */ uint64_t dis_pniw : 1; /**< When asserted '1' access from the PNI Window Registers are disabled. */ uint64_t out3_enb : 1; /**< When asserted '1' the output3 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t out2_enb : 1; /**< When asserted '1' the output2 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t out1_enb : 1; /**< When asserted '1' the output1 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t out0_enb : 1; /**< When asserted '1' the output0 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t ins3_enb : 1; /**< When asserted '1' the gather3 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t ins2_enb : 1; /**< When asserted '1' the gather2 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t ins1_enb : 1; /**< When asserted '1' the gather1 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t ins0_enb : 1; /**< When asserted '1' the gather0 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t ins3_64b : 1; /**< When asserted '1' the instructions read by the gather3 engine are 64-Byte instructions, when de-asserted '0' instructions are 32-byte. */ uint64_t ins2_64b : 1; /**< When asserted '1' the instructions read by the gather2 engine are 64-Byte instructions, when de-asserted '0' instructions are 32-byte. */ uint64_t ins1_64b : 1; /**< When asserted '1' the instructions read by the gather1 engine are 64-Byte instructions, when de-asserted '0' instructions are 32-byte. */ uint64_t ins0_64b : 1; /**< When asserted '1' the instructions read by the gather0 engine are 64-Byte instructions, when de-asserted '0' instructions are 32-byte. */ uint64_t pci_wdis : 1; /**< When set '1' disables access to registers in PNI address range 0x1000 - 0x17FF from the PCI. */ uint64_t wait_com : 1; /**< When set '1' casues the NPI to wait for a commit from the L2C before sending additional access to the L2C from the PCI. */ uint64_t reserved_37_39 : 3; uint64_t max_word : 5; /**< The maximum number of words to merge into a single write operation from the PPs to the PCI. Legal values are 1 to 32, where a '0' is treated as 32. */ uint64_t reserved_10_31 : 22; uint64_t timer : 10; /**< When the NPI starts a PP to PCI write it will wait no longer than the value of TIMER in eclks to merge additional writes from the PPs into 1 large write. The values for this field is 1 to 1024 where a value of '0' is treated as 1024. */ #else uint64_t timer : 10; uint64_t reserved_10_31 : 22; uint64_t max_word : 5; uint64_t reserved_37_39 : 3; uint64_t wait_com : 1; uint64_t pci_wdis : 1; uint64_t ins0_64b : 1; uint64_t ins1_64b : 1; uint64_t ins2_64b : 1; uint64_t ins3_64b : 1; uint64_t ins0_enb : 1; uint64_t ins1_enb : 1; uint64_t ins2_enb : 1; uint64_t ins3_enb : 1; uint64_t out0_enb : 1; uint64_t out1_enb : 1; uint64_t out2_enb : 1; uint64_t out3_enb : 1; uint64_t dis_pniw : 1; uint64_t chip_rev : 8; uint64_t reserved_63_63 : 1; #endif } s; struct cvmx_npi_ctl_status_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_63_63 : 1; uint64_t chip_rev : 8; /**< The revision of the N3. */ uint64_t dis_pniw : 1; /**< When asserted '1' access from the PNI Window Registers are disabled. */ uint64_t reserved_51_53 : 3; uint64_t out0_enb : 1; /**< When asserted '1' the output0 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t reserved_47_49 : 3; uint64_t ins0_enb : 1; /**< When asserted '1' the gather0 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t reserved_43_45 : 3; uint64_t ins0_64b : 1; /**< When asserted '1' the instructions read by the gather0 engine are 64-Byte instructions, when de-asserted '0' instructions are 32-byte. */ uint64_t pci_wdis : 1; /**< When set '1' disables access to registers in PNI address range 0x1000 - 0x17FF from the PCI. */ uint64_t wait_com : 1; /**< When set '1' casues the NPI to wait for a commit from the L2C before sending additional access to the L2C from the PCI. */ uint64_t reserved_37_39 : 3; uint64_t max_word : 5; /**< The maximum number of words to merge into a single write operation from the PPs to the PCI. Legal values are 1 to 32, where a '0' is treated as 32. */ uint64_t reserved_10_31 : 22; uint64_t timer : 10; /**< When the NPI starts a PP to PCI write it will wait no longer than the value of TIMER in eclks to merge additional writes from the PPs into 1 large write. The values for this field is 1 to 1024 where a value of '0' is treated as 1024. */ #else uint64_t timer : 10; uint64_t reserved_10_31 : 22; uint64_t max_word : 5; uint64_t reserved_37_39 : 3; uint64_t wait_com : 1; uint64_t pci_wdis : 1; uint64_t ins0_64b : 1; uint64_t reserved_43_45 : 3; uint64_t ins0_enb : 1; uint64_t reserved_47_49 : 3; uint64_t out0_enb : 1; uint64_t reserved_51_53 : 3; uint64_t dis_pniw : 1; uint64_t chip_rev : 8; uint64_t reserved_63_63 : 1; #endif } cn30xx; struct cvmx_npi_ctl_status_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_63_63 : 1; uint64_t chip_rev : 8; /**< The revision of the N3. 0 => pass1.x, 1 => 2.0 */ uint64_t dis_pniw : 1; /**< When asserted '1' access from the PNI Window Registers are disabled. */ uint64_t reserved_52_53 : 2; uint64_t out1_enb : 1; /**< When asserted '1' the output1 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t out0_enb : 1; /**< When asserted '1' the output0 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t reserved_48_49 : 2; uint64_t ins1_enb : 1; /**< When asserted '1' the gather1 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t ins0_enb : 1; /**< When asserted '1' the gather0 engine is enabled. After enabling the values of the associated Address and Size Register should not be changed. */ uint64_t reserved_44_45 : 2; uint64_t ins1_64b : 1; /**< When asserted '1' the instructions read by the gather1 engine are 64-Byte instructions, when de-asserted '0' instructions are 32-byte. */ uint64_t ins0_64b : 1; /**< When asserted '1' the instructions read by the gather0 engine are 64-Byte instructions, when de-asserted '0' instructions are 32-byte. */ uint64_t pci_wdis : 1; /**< When set '1' disables access to registers in PNI address range 0x1000 - 0x17FF from the PCI. */ uint64_t wait_com : 1; /**< When set '1' casues the NPI to wait for a commit from the L2C before sending additional access to the L2C from the PCI. */ uint64_t reserved_37_39 : 3; uint64_t max_word : 5; /**< The maximum number of words to merge into a single write operation from the PPs to the PCI. Legal values are 1 to 32, where a '0' is treated as 32. */ uint64_t reserved_10_31 : 22; uint64_t timer : 10; /**< When the NPI starts a PP to PCI write it will wait no longer than the value of TIMER in eclks to merge additional writes from the PPs into 1 large write. The values for this field is 1 to 1024 where a value of '0' is treated as 1024. */ #else uint64_t timer : 10; uint64_t reserved_10_31 : 22; uint64_t max_word : 5; uint64_t reserved_37_39 : 3; uint64_t wait_com : 1; uint64_t pci_wdis : 1; uint64_t ins0_64b : 1; uint64_t ins1_64b : 1; uint64_t reserved_44_45 : 2; uint64_t ins0_enb : 1; uint64_t ins1_enb : 1; uint64_t reserved_48_49 : 2; uint64_t out0_enb : 1; uint64_t out1_enb : 1; uint64_t reserved_52_53 : 2; uint64_t dis_pniw : 1; uint64_t chip_rev : 8; uint64_t reserved_63_63 : 1; #endif } cn31xx; struct cvmx_npi_ctl_status_s cn38xx; struct cvmx_npi_ctl_status_s cn38xxp2; struct cvmx_npi_ctl_status_cn31xx cn50xx; struct cvmx_npi_ctl_status_s cn58xx; struct cvmx_npi_ctl_status_s cn58xxp1; } cvmx_npi_ctl_status_t; /** * cvmx_npi_dbg_select * * NPI_DBG_SELECT = Debug Select Register * * Contains the debug select value in last written to the RSLs. */ typedef union { uint64_t u64; struct cvmx_npi_dbg_select_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t dbg_sel : 16; /**< When this register is written its value is sent to all RSLs. */ #else uint64_t dbg_sel : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_npi_dbg_select_s cn30xx; struct cvmx_npi_dbg_select_s cn31xx; struct cvmx_npi_dbg_select_s cn38xx; struct cvmx_npi_dbg_select_s cn38xxp2; struct cvmx_npi_dbg_select_s cn50xx; struct cvmx_npi_dbg_select_s cn58xx; struct cvmx_npi_dbg_select_s cn58xxp1; } cvmx_npi_dbg_select_t; /** * cvmx_npi_dma_control * * NPI_DMA_CONTROL = DMA Control Register * * Controls operation of the DMA IN/OUT of the NPI. */ typedef union { uint64_t u64; struct cvmx_npi_dma_control_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t b0_lend : 1; /**< When set '1' and the NPI is in the mode to write 0 to L2C memory when a DMA is done, the address to be written to will be treated as a Little Endian address. This field is new to PASS-2. */ uint64_t dwb_denb : 1; /**< When set '1' the NPI will send a value in the DWB field for a free page operation for the memory that contained the data in N3. */ uint64_t dwb_ichk : 9; /**< When Instruction Chunks for DMA operations are freed this value is used for the DWB field of the operation. */ uint64_t fpa_que : 3; /**< The FPA queue that the instruction-chunk page will be returned to when used. */ uint64_t o_add1 : 1; /**< When set '1' 1 will be added to the DMA counters, if '0' then the number of bytes in the dma transfer will be added to the count register. */ uint64_t o_ro : 1; /**< Relaxed Ordering Mode for DMA. */ uint64_t o_ns : 1; /**< Nosnoop For DMA. */ uint64_t o_es : 2; /**< Endian Swap Mode for DMA. */ uint64_t o_mode : 1; /**< Select PCI_POINTER MODE to be used. '1' use pointer values for address and register values for RO, ES, and NS, '0' use register values for address and pointer values for RO, ES, and NS. */ uint64_t hp_enb : 1; /**< Enables the High Priority DMA. While this bit is disabled '0' then the value in the NPI_HIGHP_IBUFF_SADDR is re-loaded to the starting address of the High Priority DMA engine. CSIZE field will be reloaded, for the High Priority DMA Engine. */ uint64_t lp_enb : 1; /**< Enables the Low Priority DMA. While this bit is disabled '0' then the value in the NPI_LOWP_IBUFF_SADDR is re-loaded to the starting address of the Low Priority DMA engine. PASS-2: When this bit is '0' the value in the CSIZE field will be reloaded, for the Low Priority DMA Engine. */ uint64_t csize : 14; /**< The size in words of the DMA Instruction Chunk. This value should only be written once. After writing this value a new value will not be recognized until the end of the DMA I-Chunk is reached. */ #else uint64_t csize : 14; uint64_t lp_enb : 1; uint64_t hp_enb : 1; uint64_t o_mode : 1; uint64_t o_es : 2; uint64_t o_ns : 1; uint64_t o_ro : 1; uint64_t o_add1 : 1; uint64_t fpa_que : 3; uint64_t dwb_ichk : 9; uint64_t dwb_denb : 1; uint64_t b0_lend : 1; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_npi_dma_control_s cn30xx; struct cvmx_npi_dma_control_s cn31xx; struct cvmx_npi_dma_control_s cn38xx; struct cvmx_npi_dma_control_s cn38xxp2; struct cvmx_npi_dma_control_s cn50xx; struct cvmx_npi_dma_control_s cn58xx; struct cvmx_npi_dma_control_s cn58xxp1; } cvmx_npi_dma_control_t; /** * cvmx_npi_dma_highp_counts * * NPI_DMA_HIGHP_COUNTS = NPI's High Priority DMA Counts * * Values for determing the number of instructions for High Priority DMA in the NPI. */ typedef union { uint64_t u64; struct cvmx_npi_dma_highp_counts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_39_63 : 25; uint64_t fcnt : 7; /**< Number of words in the Instruction FIFO. */ uint64_t dbell : 32; /**< Number of available words of Instructions to read. */ #else uint64_t dbell : 32; uint64_t fcnt : 7; uint64_t reserved_39_63 : 25; #endif } s; struct cvmx_npi_dma_highp_counts_s cn30xx; struct cvmx_npi_dma_highp_counts_s cn31xx; struct cvmx_npi_dma_highp_counts_s cn38xx; struct cvmx_npi_dma_highp_counts_s cn38xxp2; struct cvmx_npi_dma_highp_counts_s cn50xx; struct cvmx_npi_dma_highp_counts_s cn58xx; struct cvmx_npi_dma_highp_counts_s cn58xxp1; } cvmx_npi_dma_highp_counts_t; /** * cvmx_npi_dma_highp_naddr * * NPI_DMA_HIGHP_NADDR = NPI's High Priority DMA Next Ichunk Address * * Place NPI will read the next Ichunk data from. This is valid when state is 0 */ typedef union { uint64_t u64; struct cvmx_npi_dma_highp_naddr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t state : 4; /**< The DMA instruction engine state vector. Typical value is 0 (IDLE). */ uint64_t addr : 36; /**< The next L2C address to read DMA instructions from for the High Priority DMA engine. */ #else uint64_t addr : 36; uint64_t state : 4; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_npi_dma_highp_naddr_s cn30xx; struct cvmx_npi_dma_highp_naddr_s cn31xx; struct cvmx_npi_dma_highp_naddr_s cn38xx; struct cvmx_npi_dma_highp_naddr_s cn38xxp2; struct cvmx_npi_dma_highp_naddr_s cn50xx; struct cvmx_npi_dma_highp_naddr_s cn58xx; struct cvmx_npi_dma_highp_naddr_s cn58xxp1; } cvmx_npi_dma_highp_naddr_t; /** * cvmx_npi_dma_lowp_counts * * NPI_DMA_LOWP_COUNTS = NPI's Low Priority DMA Counts * * Values for determing the number of instructions for Low Priority DMA in the NPI. */ typedef union { uint64_t u64; struct cvmx_npi_dma_lowp_counts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_39_63 : 25; uint64_t fcnt : 7; /**< Number of words in the Instruction FIFO. */ uint64_t dbell : 32; /**< Number of available words of Instructions to read. */ #else uint64_t dbell : 32; uint64_t fcnt : 7; uint64_t reserved_39_63 : 25; #endif } s; struct cvmx_npi_dma_lowp_counts_s cn30xx; struct cvmx_npi_dma_lowp_counts_s cn31xx; struct cvmx_npi_dma_lowp_counts_s cn38xx; struct cvmx_npi_dma_lowp_counts_s cn38xxp2; struct cvmx_npi_dma_lowp_counts_s cn50xx; struct cvmx_npi_dma_lowp_counts_s cn58xx; struct cvmx_npi_dma_lowp_counts_s cn58xxp1; } cvmx_npi_dma_lowp_counts_t; /** * cvmx_npi_dma_lowp_naddr * * NPI_DMA_LOWP_NADDR = NPI's Low Priority DMA Next Ichunk Address * * Place NPI will read the next Ichunk data from. This is valid when state is 0 */ typedef union { uint64_t u64; struct cvmx_npi_dma_lowp_naddr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t state : 4; /**< The DMA instruction engine state vector. Typical value is 0 (IDLE). */ uint64_t addr : 36; /**< The next L2C address to read DMA instructions from for the Low Priority DMA engine. */ #else uint64_t addr : 36; uint64_t state : 4; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_npi_dma_lowp_naddr_s cn30xx; struct cvmx_npi_dma_lowp_naddr_s cn31xx; struct cvmx_npi_dma_lowp_naddr_s cn38xx; struct cvmx_npi_dma_lowp_naddr_s cn38xxp2; struct cvmx_npi_dma_lowp_naddr_s cn50xx; struct cvmx_npi_dma_lowp_naddr_s cn58xx; struct cvmx_npi_dma_lowp_naddr_s cn58xxp1; } cvmx_npi_dma_lowp_naddr_t; /** * cvmx_npi_highp_dbell * * NPI_HIGHP_DBELL = High Priority Door Bell * * The door bell register for the high priority DMA queue. */ typedef union { uint64_t u64; struct cvmx_npi_highp_dbell_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t dbell : 16; /**< The value written to this register is added to the number of 8byte words to be read and processes for the high priority dma queue. */ #else uint64_t dbell : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_npi_highp_dbell_s cn30xx; struct cvmx_npi_highp_dbell_s cn31xx; struct cvmx_npi_highp_dbell_s cn38xx; struct cvmx_npi_highp_dbell_s cn38xxp2; struct cvmx_npi_highp_dbell_s cn50xx; struct cvmx_npi_highp_dbell_s cn58xx; struct cvmx_npi_highp_dbell_s cn58xxp1; } cvmx_npi_highp_dbell_t; /** * cvmx_npi_highp_ibuff_saddr * * NPI_HIGHP_IBUFF_SADDR = DMA High Priority Instruction Buffer Starting Address * * The address to start reading Instructions from for HIGHP. */ typedef union { uint64_t u64; struct cvmx_npi_highp_ibuff_saddr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t saddr : 36; /**< The starting address to read the first instruction. */ #else uint64_t saddr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_npi_highp_ibuff_saddr_s cn30xx; struct cvmx_npi_highp_ibuff_saddr_s cn31xx; struct cvmx_npi_highp_ibuff_saddr_s cn38xx; struct cvmx_npi_highp_ibuff_saddr_s cn38xxp2; struct cvmx_npi_highp_ibuff_saddr_s cn50xx; struct cvmx_npi_highp_ibuff_saddr_s cn58xx; struct cvmx_npi_highp_ibuff_saddr_s cn58xxp1; } cvmx_npi_highp_ibuff_saddr_t; /** * cvmx_npi_input_control * * NPI_INPUT_CONTROL = NPI's Input Control Register * * Control for reads for gather list and instructions. */ typedef union { uint64_t u64; struct cvmx_npi_input_control_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t pkt_rr : 1; /**< When set '1' the input packet selection will be made with a Round Robin arbitration. When '0' the input packet port is fixed in priority, where the lower port number has higher priority. PASS3 Field */ uint64_t pbp_dhi : 13; /**< Field when in [PBP] is set to be used in calculating a DPTR. */ uint64_t d_nsr : 1; /**< Enables '1' NoSnoop for reading of gather data. */ uint64_t d_esr : 2; /**< The Endian-Swap-Mode for reading of gather data. */ uint64_t d_ror : 1; /**< Enables '1' Relaxed Ordering for reading of gather data. */ uint64_t use_csr : 1; /**< When set '1' the csr value will be used for ROR, ESR, and NSR. When clear '0' the value in DPTR will be used. In turn the bits not used for ROR, ESR, and NSR, will be used for bits [63:60] of the address used to fetch packet data. */ uint64_t nsr : 1; /**< Enables '1' NoSnoop for reading of gather list and gather instruction. */ uint64_t esr : 2; /**< The Endian-Swap-Mode for reading of gather list and gather instruction. */ uint64_t ror : 1; /**< Enables '1' Relaxed Ordering for reading of gather list and gather instruction. */ #else uint64_t ror : 1; uint64_t esr : 2; uint64_t nsr : 1; uint64_t use_csr : 1; uint64_t d_ror : 1; uint64_t d_esr : 2; uint64_t d_nsr : 1; uint64_t pbp_dhi : 13; uint64_t pkt_rr : 1; uint64_t reserved_23_63 : 41; #endif } s; struct cvmx_npi_input_control_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_22_63 : 42; uint64_t pbp_dhi : 13; /**< Field when in [PBP] is set to be used in calculating a DPTR. */ uint64_t d_nsr : 1; /**< Enables '1' NoSnoop for reading of gather data. */ uint64_t d_esr : 2; /**< The Endian-Swap-Mode for reading of gather data. */ uint64_t d_ror : 1; /**< Enables '1' Relaxed Ordering for reading of gather data. */ uint64_t use_csr : 1; /**< When set '1' the csr value will be used for ROR, ESR, and NSR. When clear '0' the value in DPTR will be used. In turn the bits not used for ROR, ESR, and NSR, will be used for bits [63:60] of the address used to fetch packet data. */ uint64_t nsr : 1; /**< Enables '1' NoSnoop for reading of gather list and gather instruction. */ uint64_t esr : 2; /**< The Endian-Swap-Mode for reading of gather list and gather instruction. */ uint64_t ror : 1; /**< Enables '1' Relaxed Ordering for reading of gather list and gather instruction. */ #else uint64_t ror : 1; uint64_t esr : 2; uint64_t nsr : 1; uint64_t use_csr : 1; uint64_t d_ror : 1; uint64_t d_esr : 2; uint64_t d_nsr : 1; uint64_t pbp_dhi : 13; uint64_t reserved_22_63 : 42; #endif } cn30xx; struct cvmx_npi_input_control_cn30xx cn31xx; struct cvmx_npi_input_control_s cn38xx; struct cvmx_npi_input_control_cn30xx cn38xxp2; struct cvmx_npi_input_control_s cn50xx; struct cvmx_npi_input_control_s cn58xx; struct cvmx_npi_input_control_s cn58xxp1; } cvmx_npi_input_control_t; /** * cvmx_npi_int_enb * * NPI_INTERRUPT_ENB = NPI's Interrupt Enable Register * * Used to enable the various interrupting conditions of NPI */ typedef union { uint64_t u64; struct cvmx_npi_int_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_62_63 : 2; uint64_t q1_a_f : 1; /**< Enables NPI_INT_SUM[Q1_A_F] to generate an interrupt. */ uint64_t q1_s_e : 1; /**< Enables NPI_INT_SUM[Q1_S_E] to generate an interrupt. */ uint64_t pdf_p_f : 1; /**< Enables NPI_INT_SUM[PDF_P_F] to generate an interrupt. */ uint64_t pdf_p_e : 1; /**< Enables NPI_INT_SUM[PDF_P_E] to generate an interrupt. */ uint64_t pcf_p_f : 1; /**< Enables NPI_INT_SUM[PCF_P_F] to generate an interrupt. */ uint64_t pcf_p_e : 1; /**< Enables NPI_INT_SUM[PCF_P_E] to generate an interrupt. */ uint64_t rdx_s_e : 1; /**< Enables NPI_INT_SUM[RDX_S_E] to generate an interrupt. */ uint64_t rwx_s_e : 1; /**< Enables NPI_INT_SUM[RWX_S_E] to generate an interrupt. */ uint64_t pnc_a_f : 1; /**< Enables NPI_INT_SUM[PNC_A_F] to generate an interrupt. */ uint64_t pnc_s_e : 1; /**< Enables NPI_INT_SUM[PNC_S_E] to generate an interrupt. */ uint64_t com_a_f : 1; /**< Enables NPI_INT_SUM[COM_A_F] to generate an interrupt. */ uint64_t com_s_e : 1; /**< Enables NPI_INT_SUM[COM_S_E] to generate an interrupt. */ uint64_t q3_a_f : 1; /**< Enables NPI_INT_SUM[Q3_A_F] to generate an interrupt. */ uint64_t q3_s_e : 1; /**< Enables NPI_INT_SUM[Q3_S_E] to generate an interrupt. */ uint64_t q2_a_f : 1; /**< Enables NPI_INT_SUM[Q2_A_F] to generate an interrupt. */ uint64_t q2_s_e : 1; /**< Enables NPI_INT_SUM[Q2_S_E] to generate an interrupt. */ uint64_t pcr_a_f : 1; /**< Enables NPI_INT_SUM[PCR_A_F] to generate an interrupt. */ uint64_t pcr_s_e : 1; /**< Enables NPI_INT_SUM[PCR_S_E] to generate an interrupt. */ uint64_t fcr_a_f : 1; /**< Enables NPI_INT_SUM[FCR_A_F] to generate an interrupt. */ uint64_t fcr_s_e : 1; /**< Enables NPI_INT_SUM[FCR_S_E] to generate an interrupt. */ uint64_t iobdma : 1; /**< Enables NPI_INT_SUM[IOBDMA] to generate an interrupt. */ uint64_t p_dperr : 1; /**< Enables NPI_INT_SUM[P_DPERR] to generate an interrupt. */ uint64_t win_rto : 1; /**< Enables NPI_INT_SUM[WIN_RTO] to generate an interrupt. */ uint64_t i3_pperr : 1; /**< Enables NPI_INT_SUM[I3_PPERR] to generate an interrupt. */ uint64_t i2_pperr : 1; /**< Enables NPI_INT_SUM[I2_PPERR] to generate an interrupt. */ uint64_t i1_pperr : 1; /**< Enables NPI_INT_SUM[I1_PPERR] to generate an interrupt. */ uint64_t i0_pperr : 1; /**< Enables NPI_INT_SUM[I0_PPERR] to generate an interrupt. */ uint64_t p3_ptout : 1; /**< Enables NPI_INT_SUM[P3_PTOUT] to generate an interrupt. */ uint64_t p2_ptout : 1; /**< Enables NPI_INT_SUM[P2_PTOUT] to generate an interrupt. */ uint64_t p1_ptout : 1; /**< Enables NPI_INT_SUM[P1_PTOUT] to generate an interrupt. */ uint64_t p0_ptout : 1; /**< Enables NPI_INT_SUM[P0_PTOUT] to generate an interrupt. */ uint64_t p3_pperr : 1; /**< Enables NPI_INT_SUM[P3_PPERR] to generate an interrupt. */ uint64_t p2_pperr : 1; /**< Enables NPI_INT_SUM[P2_PPERR] to generate an interrupt. */ uint64_t p1_pperr : 1; /**< Enables NPI_INT_SUM[P1_PPERR] to generate an interrupt. */ uint64_t p0_pperr : 1; /**< Enables NPI_INT_SUM[P0_PPERR] to generate an interrupt. */ uint64_t g3_rtout : 1; /**< Enables NPI_INT_SUM[G3_RTOUT] to generate an interrupt. */ uint64_t g2_rtout : 1; /**< Enables NPI_INT_SUM[G2_RTOUT] to generate an interrupt. */ uint64_t g1_rtout : 1; /**< Enables NPI_INT_SUM[G1_RTOUT] to generate an interrupt. */ uint64_t g0_rtout : 1; /**< Enables NPI_INT_SUM[G0_RTOUT] to generate an interrupt. */ uint64_t p3_perr : 1; /**< Enables NPI_INT_SUM[P3_PERR] to generate an interrupt. */ uint64_t p2_perr : 1; /**< Enables NPI_INT_SUM[P2_PERR] to generate an interrupt. */ uint64_t p1_perr : 1; /**< Enables NPI_INT_SUM[P1_PERR] to generate an interrupt. */ uint64_t p0_perr : 1; /**< Enables NPI_INT_SUM[P0_PERR] to generate an interrupt. */ uint64_t p3_rtout : 1; /**< Enables NPI_INT_SUM[P3_RTOUT] to generate an interrupt. */ uint64_t p2_rtout : 1; /**< Enables NPI_INT_SUM[P2_RTOUT] to generate an interrupt. */ uint64_t p1_rtout : 1; /**< Enables NPI_INT_SUM[P1_RTOUT] to generate an interrupt. */ uint64_t p0_rtout : 1; /**< Enables NPI_INT_SUM[P0_RTOUT] to generate an interrupt. */ uint64_t i3_overf : 1; /**< Enables NPI_INT_SUM[I3_OVERF] to generate an interrupt. */ uint64_t i2_overf : 1; /**< Enables NPI_INT_SUM[I2_OVERF] to generate an interrupt. */ uint64_t i1_overf : 1; /**< Enables NPI_INT_SUM[I1_OVERF] to generate an interrupt. */ uint64_t i0_overf : 1; /**< Enables NPI_INT_SUM[I0_OVERF] to generate an interrupt. */ uint64_t i3_rtout : 1; /**< Enables NPI_INT_SUM[I3_RTOUT] to generate an interrupt. */ uint64_t i2_rtout : 1; /**< Enables NPI_INT_SUM[I2_RTOUT] to generate an interrupt. */ uint64_t i1_rtout : 1; /**< Enables NPI_INT_SUM[I1_RTOUT] to generate an interrupt. */ uint64_t i0_rtout : 1; /**< Enables NPI_INT_SUM[I0_RTOUT] to generate an interrupt. */ uint64_t po3_2sml : 1; /**< Enables NPI_INT_SUM[PO3_2SML] to generate an interrupt. */ uint64_t po2_2sml : 1; /**< Enables NPI_INT_SUM[PO2_2SML] to generate an interrupt. */ uint64_t po1_2sml : 1; /**< Enables NPI_INT_SUM[PO1_2SML] to generate an interrupt. */ uint64_t po0_2sml : 1; /**< Enables NPI_INT_SUM[PO0_2SML] to generate an interrupt. */ uint64_t pci_rsl : 1; /**< Enables NPI_INT_SUM[PCI_RSL] to generate an interrupt. */ uint64_t rml_wto : 1; /**< Enables NPI_INT_SUM[RML_WTO] to generate an interrupt. */ uint64_t rml_rto : 1; /**< Enables NPI_INT_SUM[RML_RTO] to generate an interrupt. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t pci_rsl : 1; uint64_t po0_2sml : 1; uint64_t po1_2sml : 1; uint64_t po2_2sml : 1; uint64_t po3_2sml : 1; uint64_t i0_rtout : 1; uint64_t i1_rtout : 1; uint64_t i2_rtout : 1; uint64_t i3_rtout : 1; uint64_t i0_overf : 1; uint64_t i1_overf : 1; uint64_t i2_overf : 1; uint64_t i3_overf : 1; uint64_t p0_rtout : 1; uint64_t p1_rtout : 1; uint64_t p2_rtout : 1; uint64_t p3_rtout : 1; uint64_t p0_perr : 1; uint64_t p1_perr : 1; uint64_t p2_perr : 1; uint64_t p3_perr : 1; uint64_t g0_rtout : 1; uint64_t g1_rtout : 1; uint64_t g2_rtout : 1; uint64_t g3_rtout : 1; uint64_t p0_pperr : 1; uint64_t p1_pperr : 1; uint64_t p2_pperr : 1; uint64_t p3_pperr : 1; uint64_t p0_ptout : 1; uint64_t p1_ptout : 1; uint64_t p2_ptout : 1; uint64_t p3_ptout : 1; uint64_t i0_pperr : 1; uint64_t i1_pperr : 1; uint64_t i2_pperr : 1; uint64_t i3_pperr : 1; uint64_t win_rto : 1; uint64_t p_dperr : 1; uint64_t iobdma : 1; uint64_t fcr_s_e : 1; uint64_t fcr_a_f : 1; uint64_t pcr_s_e : 1; uint64_t pcr_a_f : 1; uint64_t q2_s_e : 1; uint64_t q2_a_f : 1; uint64_t q3_s_e : 1; uint64_t q3_a_f : 1; uint64_t com_s_e : 1; uint64_t com_a_f : 1; uint64_t pnc_s_e : 1; uint64_t pnc_a_f : 1; uint64_t rwx_s_e : 1; uint64_t rdx_s_e : 1; uint64_t pcf_p_e : 1; uint64_t pcf_p_f : 1; uint64_t pdf_p_e : 1; uint64_t pdf_p_f : 1; uint64_t q1_s_e : 1; uint64_t q1_a_f : 1; uint64_t reserved_62_63 : 2; #endif } s; struct cvmx_npi_int_enb_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_62_63 : 2; uint64_t q1_a_f : 1; /**< Enables NPI_INT_SUM[Q1_A_F] to generate an interrupt. */ uint64_t q1_s_e : 1; /**< Enables NPI_INT_SUM[Q1_S_E] to generate an interrupt. */ uint64_t pdf_p_f : 1; /**< Enables NPI_INT_SUM[PDF_P_F] to generate an interrupt. */ uint64_t pdf_p_e : 1; /**< Enables NPI_INT_SUM[PDF_P_E] to generate an interrupt. */ uint64_t pcf_p_f : 1; /**< Enables NPI_INT_SUM[PCF_P_F] to generate an interrupt. */ uint64_t pcf_p_e : 1; /**< Enables NPI_INT_SUM[PCF_P_E] to generate an interrupt. */ uint64_t rdx_s_e : 1; /**< Enables NPI_INT_SUM[RDX_S_E] to generate an interrupt. */ uint64_t rwx_s_e : 1; /**< Enables NPI_INT_SUM[RWX_S_E] to generate an interrupt. */ uint64_t pnc_a_f : 1; /**< Enables NPI_INT_SUM[PNC_A_F] to generate an interrupt. */ uint64_t pnc_s_e : 1; /**< Enables NPI_INT_SUM[PNC_S_E] to generate an interrupt. */ uint64_t com_a_f : 1; /**< Enables NPI_INT_SUM[COM_A_F] to generate an interrupt. */ uint64_t com_s_e : 1; /**< Enables NPI_INT_SUM[COM_S_E] to generate an interrupt. */ uint64_t q3_a_f : 1; /**< Enables NPI_INT_SUM[Q3_A_F] to generate an interrupt. */ uint64_t q3_s_e : 1; /**< Enables NPI_INT_SUM[Q3_S_E] to generate an interrupt. */ uint64_t q2_a_f : 1; /**< Enables NPI_INT_SUM[Q2_A_F] to generate an interrupt. */ uint64_t q2_s_e : 1; /**< Enables NPI_INT_SUM[Q2_S_E] to generate an interrupt. */ uint64_t pcr_a_f : 1; /**< Enables NPI_INT_SUM[PCR_A_F] to generate an interrupt. */ uint64_t pcr_s_e : 1; /**< Enables NPI_INT_SUM[PCR_S_E] to generate an interrupt. */ uint64_t fcr_a_f : 1; /**< Enables NPI_INT_SUM[FCR_A_F] to generate an interrupt. */ uint64_t fcr_s_e : 1; /**< Enables NPI_INT_SUM[FCR_S_E] to generate an interrupt. */ uint64_t iobdma : 1; /**< Enables NPI_INT_SUM[IOBDMA] to generate an interrupt. */ uint64_t p_dperr : 1; /**< Enables NPI_INT_SUM[P_DPERR] to generate an interrupt. */ uint64_t win_rto : 1; /**< Enables NPI_INT_SUM[WIN_RTO] to generate an interrupt. */ uint64_t reserved_36_38 : 3; uint64_t i0_pperr : 1; /**< Enables NPI_INT_SUM[I0_PPERR] to generate an interrupt. */ uint64_t reserved_32_34 : 3; uint64_t p0_ptout : 1; /**< Enables NPI_INT_SUM[P0_PTOUT] to generate an interrupt. */ uint64_t reserved_28_30 : 3; uint64_t p0_pperr : 1; /**< Enables NPI_INT_SUM[P0_PPERR] to generate an interrupt. */ uint64_t reserved_24_26 : 3; uint64_t g0_rtout : 1; /**< Enables NPI_INT_SUM[G0_RTOUT] to generate an interrupt. */ uint64_t reserved_20_22 : 3; uint64_t p0_perr : 1; /**< Enables NPI_INT_SUM[P0_PERR] to generate an interrupt. */ uint64_t reserved_16_18 : 3; uint64_t p0_rtout : 1; /**< Enables NPI_INT_SUM[P0_RTOUT] to generate an interrupt. */ uint64_t reserved_12_14 : 3; uint64_t i0_overf : 1; /**< Enables NPI_INT_SUM[I0_OVERF] to generate an interrupt. */ uint64_t reserved_8_10 : 3; uint64_t i0_rtout : 1; /**< Enables NPI_INT_SUM[I0_RTOUT] to generate an interrupt. */ uint64_t reserved_4_6 : 3; uint64_t po0_2sml : 1; /**< Enables NPI_INT_SUM[PO0_2SML] to generate an interrupt. */ uint64_t pci_rsl : 1; /**< Enables NPI_INT_SUM[PCI_RSL] to generate an interrupt. */ uint64_t rml_wto : 1; /**< Enables NPI_INT_SUM[RML_WTO] to generate an interrupt. */ uint64_t rml_rto : 1; /**< Enables NPI_INT_SUM[RML_RTO] to generate an interrupt. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t pci_rsl : 1; uint64_t po0_2sml : 1; uint64_t reserved_4_6 : 3; uint64_t i0_rtout : 1; uint64_t reserved_8_10 : 3; uint64_t i0_overf : 1; uint64_t reserved_12_14 : 3; uint64_t p0_rtout : 1; uint64_t reserved_16_18 : 3; uint64_t p0_perr : 1; uint64_t reserved_20_22 : 3; uint64_t g0_rtout : 1; uint64_t reserved_24_26 : 3; uint64_t p0_pperr : 1; uint64_t reserved_28_30 : 3; uint64_t p0_ptout : 1; uint64_t reserved_32_34 : 3; uint64_t i0_pperr : 1; uint64_t reserved_36_38 : 3; uint64_t win_rto : 1; uint64_t p_dperr : 1; uint64_t iobdma : 1; uint64_t fcr_s_e : 1; uint64_t fcr_a_f : 1; uint64_t pcr_s_e : 1; uint64_t pcr_a_f : 1; uint64_t q2_s_e : 1; uint64_t q2_a_f : 1; uint64_t q3_s_e : 1; uint64_t q3_a_f : 1; uint64_t com_s_e : 1; uint64_t com_a_f : 1; uint64_t pnc_s_e : 1; uint64_t pnc_a_f : 1; uint64_t rwx_s_e : 1; uint64_t rdx_s_e : 1; uint64_t pcf_p_e : 1; uint64_t pcf_p_f : 1; uint64_t pdf_p_e : 1; uint64_t pdf_p_f : 1; uint64_t q1_s_e : 1; uint64_t q1_a_f : 1; uint64_t reserved_62_63 : 2; #endif } cn30xx; struct cvmx_npi_int_enb_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_62_63 : 2; uint64_t q1_a_f : 1; /**< Enables NPI_INT_SUM[Q1_A_F] to generate an interrupt. */ uint64_t q1_s_e : 1; /**< Enables NPI_INT_SUM[Q1_S_E] to generate an interrupt. */ uint64_t pdf_p_f : 1; /**< Enables NPI_INT_SUM[PDF_P_F] to generate an interrupt. */ uint64_t pdf_p_e : 1; /**< Enables NPI_INT_SUM[PDF_P_E] to generate an interrupt. */ uint64_t pcf_p_f : 1; /**< Enables NPI_INT_SUM[PCF_P_F] to generate an interrupt. */ uint64_t pcf_p_e : 1; /**< Enables NPI_INT_SUM[PCF_P_E] to generate an interrupt. */ uint64_t rdx_s_e : 1; /**< Enables NPI_INT_SUM[RDX_S_E] to generate an interrupt. */ uint64_t rwx_s_e : 1; /**< Enables NPI_INT_SUM[RWX_S_E] to generate an interrupt. */ uint64_t pnc_a_f : 1; /**< Enables NPI_INT_SUM[PNC_A_F] to generate an interrupt. */ uint64_t pnc_s_e : 1; /**< Enables NPI_INT_SUM[PNC_S_E] to generate an interrupt. */ uint64_t com_a_f : 1; /**< Enables NPI_INT_SUM[COM_A_F] to generate an interrupt. */ uint64_t com_s_e : 1; /**< Enables NPI_INT_SUM[COM_S_E] to generate an interrupt. */ uint64_t q3_a_f : 1; /**< Enables NPI_INT_SUM[Q3_A_F] to generate an interrupt. */ uint64_t q3_s_e : 1; /**< Enables NPI_INT_SUM[Q3_S_E] to generate an interrupt. */ uint64_t q2_a_f : 1; /**< Enables NPI_INT_SUM[Q2_A_F] to generate an interrupt. */ uint64_t q2_s_e : 1; /**< Enables NPI_INT_SUM[Q2_S_E] to generate an interrupt. */ uint64_t pcr_a_f : 1; /**< Enables NPI_INT_SUM[PCR_A_F] to generate an interrupt. */ uint64_t pcr_s_e : 1; /**< Enables NPI_INT_SUM[PCR_S_E] to generate an interrupt. */ uint64_t fcr_a_f : 1; /**< Enables NPI_INT_SUM[FCR_A_F] to generate an interrupt. */ uint64_t fcr_s_e : 1; /**< Enables NPI_INT_SUM[FCR_S_E] to generate an interrupt. */ uint64_t iobdma : 1; /**< Enables NPI_INT_SUM[IOBDMA] to generate an interrupt. */ uint64_t p_dperr : 1; /**< Enables NPI_INT_SUM[P_DPERR] to generate an interrupt. */ uint64_t win_rto : 1; /**< Enables NPI_INT_SUM[WIN_RTO] to generate an interrupt. */ uint64_t reserved_37_38 : 2; uint64_t i1_pperr : 1; /**< Enables NPI_INT_SUM[I1_PPERR] to generate an interrupt. */ uint64_t i0_pperr : 1; /**< Enables NPI_INT_SUM[I0_PPERR] to generate an interrupt. */ uint64_t reserved_33_34 : 2; uint64_t p1_ptout : 1; /**< Enables NPI_INT_SUM[P1_PTOUT] to generate an interrupt. */ uint64_t p0_ptout : 1; /**< Enables NPI_INT_SUM[P0_PTOUT] to generate an interrupt. */ uint64_t reserved_29_30 : 2; uint64_t p1_pperr : 1; /**< Enables NPI_INT_SUM[P1_PPERR] to generate an interrupt. */ uint64_t p0_pperr : 1; /**< Enables NPI_INT_SUM[P0_PPERR] to generate an interrupt. */ uint64_t reserved_25_26 : 2; uint64_t g1_rtout : 1; /**< Enables NPI_INT_SUM[G1_RTOUT] to generate an interrupt. */ uint64_t g0_rtout : 1; /**< Enables NPI_INT_SUM[G0_RTOUT] to generate an interrupt. */ uint64_t reserved_21_22 : 2; uint64_t p1_perr : 1; /**< Enables NPI_INT_SUM[P1_PERR] to generate an interrupt. */ uint64_t p0_perr : 1; /**< Enables NPI_INT_SUM[P0_PERR] to generate an interrupt. */ uint64_t reserved_17_18 : 2; uint64_t p1_rtout : 1; /**< Enables NPI_INT_SUM[P1_RTOUT] to generate an interrupt. */ uint64_t p0_rtout : 1; /**< Enables NPI_INT_SUM[P0_RTOUT] to generate an interrupt. */ uint64_t reserved_13_14 : 2; uint64_t i1_overf : 1; /**< Enables NPI_INT_SUM[I1_OVERF] to generate an interrupt. */ uint64_t i0_overf : 1; /**< Enables NPI_INT_SUM[I0_OVERF] to generate an interrupt. */ uint64_t reserved_9_10 : 2; uint64_t i1_rtout : 1; /**< Enables NPI_INT_SUM[I1_RTOUT] to generate an interrupt. */ uint64_t i0_rtout : 1; /**< Enables NPI_INT_SUM[I0_RTOUT] to generate an interrupt. */ uint64_t reserved_5_6 : 2; uint64_t po1_2sml : 1; /**< Enables NPI_INT_SUM[PO1_2SML] to generate an interrupt. */ uint64_t po0_2sml : 1; /**< Enables NPI_INT_SUM[PO0_2SML] to generate an interrupt. */ uint64_t pci_rsl : 1; /**< Enables NPI_INT_SUM[PCI_RSL] to generate an interrupt. */ uint64_t rml_wto : 1; /**< Enables NPI_INT_SUM[RML_WTO] to generate an interrupt. */ uint64_t rml_rto : 1; /**< Enables NPI_INT_SUM[RML_RTO] to generate an interrupt. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t pci_rsl : 1; uint64_t po0_2sml : 1; uint64_t po1_2sml : 1; uint64_t reserved_5_6 : 2; uint64_t i0_rtout : 1; uint64_t i1_rtout : 1; uint64_t reserved_9_10 : 2; uint64_t i0_overf : 1; uint64_t i1_overf : 1; uint64_t reserved_13_14 : 2; uint64_t p0_rtout : 1; uint64_t p1_rtout : 1; uint64_t reserved_17_18 : 2; uint64_t p0_perr : 1; uint64_t p1_perr : 1; uint64_t reserved_21_22 : 2; uint64_t g0_rtout : 1; uint64_t g1_rtout : 1; uint64_t reserved_25_26 : 2; uint64_t p0_pperr : 1; uint64_t p1_pperr : 1; uint64_t reserved_29_30 : 2; uint64_t p0_ptout : 1; uint64_t p1_ptout : 1; uint64_t reserved_33_34 : 2; uint64_t i0_pperr : 1; uint64_t i1_pperr : 1; uint64_t reserved_37_38 : 2; uint64_t win_rto : 1; uint64_t p_dperr : 1; uint64_t iobdma : 1; uint64_t fcr_s_e : 1; uint64_t fcr_a_f : 1; uint64_t pcr_s_e : 1; uint64_t pcr_a_f : 1; uint64_t q2_s_e : 1; uint64_t q2_a_f : 1; uint64_t q3_s_e : 1; uint64_t q3_a_f : 1; uint64_t com_s_e : 1; uint64_t com_a_f : 1; uint64_t pnc_s_e : 1; uint64_t pnc_a_f : 1; uint64_t rwx_s_e : 1; uint64_t rdx_s_e : 1; uint64_t pcf_p_e : 1; uint64_t pcf_p_f : 1; uint64_t pdf_p_e : 1; uint64_t pdf_p_f : 1; uint64_t q1_s_e : 1; uint64_t q1_a_f : 1; uint64_t reserved_62_63 : 2; #endif } cn31xx; struct cvmx_npi_int_enb_s cn38xx; struct cvmx_npi_int_enb_cn38xxp2 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_42_63 : 22; uint64_t iobdma : 1; /**< Enables NPI_INT_SUM[IOBDMA] to generate an interrupt. */ uint64_t p_dperr : 1; /**< Enables NPI_INT_SUM[P_DPERR] to generate an interrupt. */ uint64_t win_rto : 1; /**< Enables NPI_INT_SUM[WIN_RTO] to generate an interrupt. */ uint64_t i3_pperr : 1; /**< Enables NPI_INT_SUM[I3_PPERR] to generate an interrupt. */ uint64_t i2_pperr : 1; /**< Enables NPI_INT_SUM[I2_PPERR] to generate an interrupt. */ uint64_t i1_pperr : 1; /**< Enables NPI_INT_SUM[I1_PPERR] to generate an interrupt. */ uint64_t i0_pperr : 1; /**< Enables NPI_INT_SUM[I0_PPERR] to generate an interrupt. */ uint64_t p3_ptout : 1; /**< Enables NPI_INT_SUM[P3_PTOUT] to generate an interrupt. */ uint64_t p2_ptout : 1; /**< Enables NPI_INT_SUM[P2_PTOUT] to generate an interrupt. */ uint64_t p1_ptout : 1; /**< Enables NPI_INT_SUM[P1_PTOUT] to generate an interrupt. */ uint64_t p0_ptout : 1; /**< Enables NPI_INT_SUM[P0_PTOUT] to generate an interrupt. */ uint64_t p3_pperr : 1; /**< Enables NPI_INT_SUM[P3_PPERR] to generate an interrupt. */ uint64_t p2_pperr : 1; /**< Enables NPI_INT_SUM[P2_PPERR] to generate an interrupt. */ uint64_t p1_pperr : 1; /**< Enables NPI_INT_SUM[P1_PPERR] to generate an interrupt. */ uint64_t p0_pperr : 1; /**< Enables NPI_INT_SUM[P0_PPERR] to generate an interrupt. */ uint64_t g3_rtout : 1; /**< Enables NPI_INT_SUM[G3_RTOUT] to generate an interrupt. */ uint64_t g2_rtout : 1; /**< Enables NPI_INT_SUM[G2_RTOUT] to generate an interrupt. */ uint64_t g1_rtout : 1; /**< Enables NPI_INT_SUM[G1_RTOUT] to generate an interrupt. */ uint64_t g0_rtout : 1; /**< Enables NPI_INT_SUM[G0_RTOUT] to generate an interrupt. */ uint64_t p3_perr : 1; /**< Enables NPI_INT_SUM[P3_PERR] to generate an interrupt. */ uint64_t p2_perr : 1; /**< Enables NPI_INT_SUM[P2_PERR] to generate an interrupt. */ uint64_t p1_perr : 1; /**< Enables NPI_INT_SUM[P1_PERR] to generate an interrupt. */ uint64_t p0_perr : 1; /**< Enables NPI_INT_SUM[P0_PERR] to generate an interrupt. */ uint64_t p3_rtout : 1; /**< Enables NPI_INT_SUM[P3_RTOUT] to generate an interrupt. */ uint64_t p2_rtout : 1; /**< Enables NPI_INT_SUM[P2_RTOUT] to generate an interrupt. */ uint64_t p1_rtout : 1; /**< Enables NPI_INT_SUM[P1_RTOUT] to generate an interrupt. */ uint64_t p0_rtout : 1; /**< Enables NPI_INT_SUM[P0_RTOUT] to generate an interrupt. */ uint64_t i3_overf : 1; /**< Enables NPI_INT_SUM[I3_OVERF] to generate an interrupt. */ uint64_t i2_overf : 1; /**< Enables NPI_INT_SUM[I2_OVERF] to generate an interrupt. */ uint64_t i1_overf : 1; /**< Enables NPI_INT_SUM[I1_OVERF] to generate an interrupt. */ uint64_t i0_overf : 1; /**< Enables NPI_INT_SUM[I0_OVERF] to generate an interrupt. */ uint64_t i3_rtout : 1; /**< Enables NPI_INT_SUM[I3_RTOUT] to generate an interrupt. */ uint64_t i2_rtout : 1; /**< Enables NPI_INT_SUM[I2_RTOUT] to generate an interrupt. */ uint64_t i1_rtout : 1; /**< Enables NPI_INT_SUM[I1_RTOUT] to generate an interrupt. */ uint64_t i0_rtout : 1; /**< Enables NPI_INT_SUM[I0_RTOUT] to generate an interrupt. */ uint64_t po3_2sml : 1; /**< Enables NPI_INT_SUM[PO3_2SML] to generate an interrupt. */ uint64_t po2_2sml : 1; /**< Enables NPI_INT_SUM[PO2_2SML] to generate an interrupt. */ uint64_t po1_2sml : 1; /**< Enables NPI_INT_SUM[PO1_2SML] to generate an interrupt. */ uint64_t po0_2sml : 1; /**< Enables NPI_INT_SUM[PO0_2SML] to generate an interrupt. */ uint64_t pci_rsl : 1; /**< Enables NPI_INT_SUM[PCI_RSL] to generate an interrupt. */ uint64_t rml_wto : 1; /**< Enables NPI_INT_SUM[RML_WTO] to generate an interrupt. */ uint64_t rml_rto : 1; /**< Enables NPI_INT_SUM[RML_RTO] to generate an interrupt. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t pci_rsl : 1; uint64_t po0_2sml : 1; uint64_t po1_2sml : 1; uint64_t po2_2sml : 1; uint64_t po3_2sml : 1; uint64_t i0_rtout : 1; uint64_t i1_rtout : 1; uint64_t i2_rtout : 1; uint64_t i3_rtout : 1; uint64_t i0_overf : 1; uint64_t i1_overf : 1; uint64_t i2_overf : 1; uint64_t i3_overf : 1; uint64_t p0_rtout : 1; uint64_t p1_rtout : 1; uint64_t p2_rtout : 1; uint64_t p3_rtout : 1; uint64_t p0_perr : 1; uint64_t p1_perr : 1; uint64_t p2_perr : 1; uint64_t p3_perr : 1; uint64_t g0_rtout : 1; uint64_t g1_rtout : 1; uint64_t g2_rtout : 1; uint64_t g3_rtout : 1; uint64_t p0_pperr : 1; uint64_t p1_pperr : 1; uint64_t p2_pperr : 1; uint64_t p3_pperr : 1; uint64_t p0_ptout : 1; uint64_t p1_ptout : 1; uint64_t p2_ptout : 1; uint64_t p3_ptout : 1; uint64_t i0_pperr : 1; uint64_t i1_pperr : 1; uint64_t i2_pperr : 1; uint64_t i3_pperr : 1; uint64_t win_rto : 1; uint64_t p_dperr : 1; uint64_t iobdma : 1; uint64_t reserved_42_63 : 22; #endif } cn38xxp2; struct cvmx_npi_int_enb_cn31xx cn50xx; struct cvmx_npi_int_enb_s cn58xx; struct cvmx_npi_int_enb_s cn58xxp1; } cvmx_npi_int_enb_t; /** * cvmx_npi_int_sum * * NPI_INTERRUPT_SUM = NPI Interrupt Summary Register * * Set when an interrupt condition occurs, write '1' to clear. */ typedef union { uint64_t u64; struct cvmx_npi_int_sum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_62_63 : 2; uint64_t q1_a_f : 1; /**< Attempted to add when Queue-1 FIFO is full. PASS3 Field. */ uint64_t q1_s_e : 1; /**< Attempted to subtract when Queue-1 FIFO is empty. PASS3 Field. */ uint64_t pdf_p_f : 1; /**< Attempted to push a full PCN-DATA-FIFO. PASS3 Field. */ uint64_t pdf_p_e : 1; /**< Attempted to pop an empty PCN-DATA-FIFO. PASS3 Field. */ uint64_t pcf_p_f : 1; /**< Attempted to push a full PCN-CNT-FIFO. PASS3 Field. */ uint64_t pcf_p_e : 1; /**< Attempted to pop an empty PCN-CNT-FIFO. PASS3 Field. */ uint64_t rdx_s_e : 1; /**< Attempted to subtract when DPI-XFR-Wait count is 0. PASS3 Field. */ uint64_t rwx_s_e : 1; /**< Attempted to subtract when RDN-XFR-Wait count is 0. PASS3 Field. */ uint64_t pnc_a_f : 1; /**< Attempted to add when PNI-NPI Credits are max. PASS3 Field. */ uint64_t pnc_s_e : 1; /**< Attempted to subtract when PNI-NPI Credits are 0. PASS3 Field. */ uint64_t com_a_f : 1; /**< Attempted to add when PCN-Commit Counter is max. PASS3 Field. */ uint64_t com_s_e : 1; /**< Attempted to subtract when PCN-Commit Counter is 0. PASS3 Field. */ uint64_t q3_a_f : 1; /**< Attempted to add when Queue-3 FIFO is full. PASS3 Field. */ uint64_t q3_s_e : 1; /**< Attempted to subtract when Queue-3 FIFO is empty. PASS3 Field. */ uint64_t q2_a_f : 1; /**< Attempted to add when Queue-2 FIFO is full. PASS3 Field. */ uint64_t q2_s_e : 1; /**< Attempted to subtract when Queue-2 FIFO is empty. PASS3 Field. */ uint64_t pcr_a_f : 1; /**< Attempted to add when POW Credits is full. PASS3 Field. */ uint64_t pcr_s_e : 1; /**< Attempted to subtract when POW Credits is empty. PASS3 Field. */ uint64_t fcr_a_f : 1; /**< Attempted to add when FPA Credits is full. PASS3 Field. */ uint64_t fcr_s_e : 1; /**< Attempted to subtract when FPA Credits is empty. PASS3 Field. */ uint64_t iobdma : 1; /**< Requested IOBDMA read size exceeded 128 words. */ uint64_t p_dperr : 1; /**< If a parity error occured on data written to L2C from the PCI this bit may be set. */ uint64_t win_rto : 1; /**< Windowed Load Timed Out. */ uint64_t i3_pperr : 1; /**< If a parity error occured on the port's instruction this bit may be set. */ uint64_t i2_pperr : 1; /**< If a parity error occured on the port's instruction this bit may be set. */ uint64_t i1_pperr : 1; /**< If a parity error occured on the port's instruction this bit may be set. */ uint64_t i0_pperr : 1; /**< If a parity error occured on the port's instruction this bit may be set. */ uint64_t p3_ptout : 1; /**< Port-3 output had a read timeout on a DATA/INFO pair. */ uint64_t p2_ptout : 1; /**< Port-2 output had a read timeout on a DATA/INFO pair. */ uint64_t p1_ptout : 1; /**< Port-1 output had a read timeout on a DATA/INFO pair. */ uint64_t p0_ptout : 1; /**< Port-0 output had a read timeout on a DATA/INFO pair. */ uint64_t p3_pperr : 1; /**< If a parity error occured on the port DATA/INFO pointer-pair, this bit may be set. */ uint64_t p2_pperr : 1; /**< If a parity error occured on the port DATA/INFO pointer-pair, this bit may be set. */ uint64_t p1_pperr : 1; /**< If a parity error occured on the port DATA/INFO pointer-pair, this bit may be set. */ uint64_t p0_pperr : 1; /**< If a parity error occured on the port DATA/INFO pointer-pair, this bit may be set. */ uint64_t g3_rtout : 1; /**< Port-3 had a read timeout while attempting to read a gather list. */ uint64_t g2_rtout : 1; /**< Port-2 had a read timeout while attempting to read a gather list. */ uint64_t g1_rtout : 1; /**< Port-1 had a read timeout while attempting to read a gather list. */ uint64_t g0_rtout : 1; /**< Port-0 had a read timeout while attempting to read a gather list. */ uint64_t p3_perr : 1; /**< If a parity error occured on the port's packet data this bit may be set. */ uint64_t p2_perr : 1; /**< If a parity error occured on the port's packet data this bit may be set. */ uint64_t p1_perr : 1; /**< If a parity error occured on the port's packet data this bit may be set. */ uint64_t p0_perr : 1; /**< If a parity error occured on the port's packet data this bit may be set. */ uint64_t p3_rtout : 1; /**< Port-3 had a read timeout while attempting to read packet data. */ uint64_t p2_rtout : 1; /**< Port-2 had a read timeout while attempting to read packet data. */ uint64_t p1_rtout : 1; /**< Port-1 had a read timeout while attempting to read packet data. */ uint64_t p0_rtout : 1; /**< Port-0 had a read timeout while attempting to read packet data. */ uint64_t i3_overf : 1; /**< Port-3 had a doorbell overflow. Bit[31] of the doorbell count was set. */ uint64_t i2_overf : 1; /**< Port-2 had a doorbell overflow. Bit[31] of the doorbell count was set. */ uint64_t i1_overf : 1; /**< Port-1 had a doorbell overflow. Bit[31] of the doorbell count was set. */ uint64_t i0_overf : 1; /**< Port-0 had a doorbell overflow. Bit[31] of the doorbell count was set. */ uint64_t i3_rtout : 1; /**< Port-3 had a read timeout while attempting to read instructions. */ uint64_t i2_rtout : 1; /**< Port-2 had a read timeout while attempting to read instructions. */ uint64_t i1_rtout : 1; /**< Port-1 had a read timeout while attempting to read instructions. */ uint64_t i0_rtout : 1; /**< Port-0 had a read timeout while attempting to read instructions. */ uint64_t po3_2sml : 1; /**< The packet being sent out on Port3 is smaller than the NPI_BUFF_SIZE_OUTPUT3[ISIZE] field. */ uint64_t po2_2sml : 1; /**< The packet being sent out on Port2 is smaller than the NPI_BUFF_SIZE_OUTPUT2[ISIZE] field. */ uint64_t po1_2sml : 1; /**< The packet being sent out on Port1 is smaller than the NPI_BUFF_SIZE_OUTPUT1[ISIZE] field. */ uint64_t po0_2sml : 1; /**< The packet being sent out on Port0 is smaller than the NPI_BUFF_SIZE_OUTPUT0[ISIZE] field. */ uint64_t pci_rsl : 1; /**< This '1' when a bit in PCI_INT_SUM2 is SET and the corresponding bit in the PCI_INT_ENB2 is SET. */ uint64_t rml_wto : 1; /**< Set '1' when the RML does not receive a commit back from a RSL after sending a write command to a RSL. */ uint64_t rml_rto : 1; /**< Set '1' when the RML does not receive read data back from a RSL after sending a read command to a RSL. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t pci_rsl : 1; uint64_t po0_2sml : 1; uint64_t po1_2sml : 1; uint64_t po2_2sml : 1; uint64_t po3_2sml : 1; uint64_t i0_rtout : 1; uint64_t i1_rtout : 1; uint64_t i2_rtout : 1; uint64_t i3_rtout : 1; uint64_t i0_overf : 1; uint64_t i1_overf : 1; uint64_t i2_overf : 1; uint64_t i3_overf : 1; uint64_t p0_rtout : 1; uint64_t p1_rtout : 1; uint64_t p2_rtout : 1; uint64_t p3_rtout : 1; uint64_t p0_perr : 1; uint64_t p1_perr : 1; uint64_t p2_perr : 1; uint64_t p3_perr : 1; uint64_t g0_rtout : 1; uint64_t g1_rtout : 1; uint64_t g2_rtout : 1; uint64_t g3_rtout : 1; uint64_t p0_pperr : 1; uint64_t p1_pperr : 1; uint64_t p2_pperr : 1; uint64_t p3_pperr : 1; uint64_t p0_ptout : 1; uint64_t p1_ptout : 1; uint64_t p2_ptout : 1; uint64_t p3_ptout : 1; uint64_t i0_pperr : 1; uint64_t i1_pperr : 1; uint64_t i2_pperr : 1; uint64_t i3_pperr : 1; uint64_t win_rto : 1; uint64_t p_dperr : 1; uint64_t iobdma : 1; uint64_t fcr_s_e : 1; uint64_t fcr_a_f : 1; uint64_t pcr_s_e : 1; uint64_t pcr_a_f : 1; uint64_t q2_s_e : 1; uint64_t q2_a_f : 1; uint64_t q3_s_e : 1; uint64_t q3_a_f : 1; uint64_t com_s_e : 1; uint64_t com_a_f : 1; uint64_t pnc_s_e : 1; uint64_t pnc_a_f : 1; uint64_t rwx_s_e : 1; uint64_t rdx_s_e : 1; uint64_t pcf_p_e : 1; uint64_t pcf_p_f : 1; uint64_t pdf_p_e : 1; uint64_t pdf_p_f : 1; uint64_t q1_s_e : 1; uint64_t q1_a_f : 1; uint64_t reserved_62_63 : 2; #endif } s; struct cvmx_npi_int_sum_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_62_63 : 2; uint64_t q1_a_f : 1; /**< Attempted to add when Queue-1 FIFO is full. */ uint64_t q1_s_e : 1; /**< Attempted to subtract when Queue-1 FIFO is empty. */ uint64_t pdf_p_f : 1; /**< Attempted to push a full PCN-DATA-FIFO. */ uint64_t pdf_p_e : 1; /**< Attempted to pop an empty PCN-DATA-FIFO. */ uint64_t pcf_p_f : 1; /**< Attempted to push a full PCN-CNT-FIFO. */ uint64_t pcf_p_e : 1; /**< Attempted to pop an empty PCN-CNT-FIFO. */ uint64_t rdx_s_e : 1; /**< Attempted to subtract when DPI-XFR-Wait count is 0. */ uint64_t rwx_s_e : 1; /**< Attempted to subtract when RDN-XFR-Wait count is 0. */ uint64_t pnc_a_f : 1; /**< Attempted to add when PNI-NPI Credits are max. */ uint64_t pnc_s_e : 1; /**< Attempted to subtract when PNI-NPI Credits are 0. */ uint64_t com_a_f : 1; /**< Attempted to add when PCN-Commit Counter is max. */ uint64_t com_s_e : 1; /**< Attempted to subtract when PCN-Commit Counter is 0. */ uint64_t q3_a_f : 1; /**< Attempted to add when Queue-3 FIFO is full. */ uint64_t q3_s_e : 1; /**< Attempted to subtract when Queue-3 FIFO is empty. */ uint64_t q2_a_f : 1; /**< Attempted to add when Queue-2 FIFO is full. */ uint64_t q2_s_e : 1; /**< Attempted to subtract when Queue-2 FIFO is empty. */ uint64_t pcr_a_f : 1; /**< Attempted to add when POW Credits is full. */ uint64_t pcr_s_e : 1; /**< Attempted to subtract when POW Credits is empty. */ uint64_t fcr_a_f : 1; /**< Attempted to add when FPA Credits is full. */ uint64_t fcr_s_e : 1; /**< Attempted to subtract when FPA Credits is empty. */ uint64_t iobdma : 1; /**< Requested IOBDMA read size exceeded 128 words. */ uint64_t p_dperr : 1; /**< If a parity error occured on data written to L2C from the PCI this bit may be set. */ uint64_t win_rto : 1; /**< Windowed Load Timed Out. */ uint64_t reserved_36_38 : 3; uint64_t i0_pperr : 1; /**< If a parity error occured on the port's instruction this bit may be set. */ uint64_t reserved_32_34 : 3; uint64_t p0_ptout : 1; /**< Port-0 output had a read timeout on a DATA/INFO pair. */ uint64_t reserved_28_30 : 3; uint64_t p0_pperr : 1; /**< If a parity error occured on the port DATA/INFO pointer-pair, this bit may be set. */ uint64_t reserved_24_26 : 3; uint64_t g0_rtout : 1; /**< Port-0 had a read timeout while attempting to read a gather list. */ uint64_t reserved_20_22 : 3; uint64_t p0_perr : 1; /**< If a parity error occured on the port's packet data this bit may be set. */ uint64_t reserved_16_18 : 3; uint64_t p0_rtout : 1; /**< Port-0 had a read timeout while attempting to read packet data. */ uint64_t reserved_12_14 : 3; uint64_t i0_overf : 1; /**< Port-0 had a doorbell overflow. Bit[31] of the doorbell count was set. */ uint64_t reserved_8_10 : 3; uint64_t i0_rtout : 1; /**< Port-0 had a read timeout while attempting to read instructions. */ uint64_t reserved_4_6 : 3; uint64_t po0_2sml : 1; /**< The packet being sent out on Port0 is smaller than the NPI_BUFF_SIZE_OUTPUT0[ISIZE] field. */ uint64_t pci_rsl : 1; /**< This '1' when a bit in PCI_INT_SUM2 is SET and the corresponding bit in the PCI_INT_ENB2 is SET. */ uint64_t rml_wto : 1; /**< Set '1' when the RML does not receive a commit back from a RSL after sending a write command to a RSL. */ uint64_t rml_rto : 1; /**< Set '1' when the RML does not receive read data back from a RSL after sending a read command to a RSL. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t pci_rsl : 1; uint64_t po0_2sml : 1; uint64_t reserved_4_6 : 3; uint64_t i0_rtout : 1; uint64_t reserved_8_10 : 3; uint64_t i0_overf : 1; uint64_t reserved_12_14 : 3; uint64_t p0_rtout : 1; uint64_t reserved_16_18 : 3; uint64_t p0_perr : 1; uint64_t reserved_20_22 : 3; uint64_t g0_rtout : 1; uint64_t reserved_24_26 : 3; uint64_t p0_pperr : 1; uint64_t reserved_28_30 : 3; uint64_t p0_ptout : 1; uint64_t reserved_32_34 : 3; uint64_t i0_pperr : 1; uint64_t reserved_36_38 : 3; uint64_t win_rto : 1; uint64_t p_dperr : 1; uint64_t iobdma : 1; uint64_t fcr_s_e : 1; uint64_t fcr_a_f : 1; uint64_t pcr_s_e : 1; uint64_t pcr_a_f : 1; uint64_t q2_s_e : 1; uint64_t q2_a_f : 1; uint64_t q3_s_e : 1; uint64_t q3_a_f : 1; uint64_t com_s_e : 1; uint64_t com_a_f : 1; uint64_t pnc_s_e : 1; uint64_t pnc_a_f : 1; uint64_t rwx_s_e : 1; uint64_t rdx_s_e : 1; uint64_t pcf_p_e : 1; uint64_t pcf_p_f : 1; uint64_t pdf_p_e : 1; uint64_t pdf_p_f : 1; uint64_t q1_s_e : 1; uint64_t q1_a_f : 1; uint64_t reserved_62_63 : 2; #endif } cn30xx; struct cvmx_npi_int_sum_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_62_63 : 2; uint64_t q1_a_f : 1; /**< Attempted to add when Queue-1 FIFO is full. */ uint64_t q1_s_e : 1; /**< Attempted to subtract when Queue-1 FIFO is empty. */ uint64_t pdf_p_f : 1; /**< Attempted to push a full PCN-DATA-FIFO. */ uint64_t pdf_p_e : 1; /**< Attempted to pop an empty PCN-DATA-FIFO. */ uint64_t pcf_p_f : 1; /**< Attempted to push a full PCN-CNT-FIFO. */ uint64_t pcf_p_e : 1; /**< Attempted to pop an empty PCN-CNT-FIFO. */ uint64_t rdx_s_e : 1; /**< Attempted to subtract when DPI-XFR-Wait count is 0. */ uint64_t rwx_s_e : 1; /**< Attempted to subtract when RDN-XFR-Wait count is 0. */ uint64_t pnc_a_f : 1; /**< Attempted to add when PNI-NPI Credits are max. */ uint64_t pnc_s_e : 1; /**< Attempted to subtract when PNI-NPI Credits are 0. */ uint64_t com_a_f : 1; /**< Attempted to add when PCN-Commit Counter is max. */ uint64_t com_s_e : 1; /**< Attempted to subtract when PCN-Commit Counter is 0. */ uint64_t q3_a_f : 1; /**< Attempted to add when Queue-3 FIFO is full. */ uint64_t q3_s_e : 1; /**< Attempted to subtract when Queue-3 FIFO is empty. */ uint64_t q2_a_f : 1; /**< Attempted to add when Queue-2 FIFO is full. */ uint64_t q2_s_e : 1; /**< Attempted to subtract when Queue-2 FIFO is empty. */ uint64_t pcr_a_f : 1; /**< Attempted to add when POW Credits is full. */ uint64_t pcr_s_e : 1; /**< Attempted to subtract when POW Credits is empty. */ uint64_t fcr_a_f : 1; /**< Attempted to add when FPA Credits is full. */ uint64_t fcr_s_e : 1; /**< Attempted to subtract when FPA Credits is empty. */ uint64_t iobdma : 1; /**< Requested IOBDMA read size exceeded 128 words. */ uint64_t p_dperr : 1; /**< If a parity error occured on data written to L2C from the PCI this bit may be set. */ uint64_t win_rto : 1; /**< Windowed Load Timed Out. */ uint64_t reserved_37_38 : 2; uint64_t i1_pperr : 1; /**< If a parity error occured on the port's instruction this bit may be set. */ uint64_t i0_pperr : 1; /**< If a parity error occured on the port's instruction this bit may be set. */ uint64_t reserved_33_34 : 2; uint64_t p1_ptout : 1; /**< Port-1 output had a read timeout on a DATA/INFO pair. */ uint64_t p0_ptout : 1; /**< Port-0 output had a read timeout on a DATA/INFO pair. */ uint64_t reserved_29_30 : 2; uint64_t p1_pperr : 1; /**< If a parity error occured on the port DATA/INFO pointer-pair, this bit may be set. */ uint64_t p0_pperr : 1; /**< If a parity error occured on the port DATA/INFO pointer-pair, this bit may be set. */ uint64_t reserved_25_26 : 2; uint64_t g1_rtout : 1; /**< Port-1 had a read timeout while attempting to read a gather list. */ uint64_t g0_rtout : 1; /**< Port-0 had a read timeout while attempting to read a gather list. */ uint64_t reserved_21_22 : 2; uint64_t p1_perr : 1; /**< If a parity error occured on the port's packet data this bit may be set. */ uint64_t p0_perr : 1; /**< If a parity error occured on the port's packet data this bit may be set. */ uint64_t reserved_17_18 : 2; uint64_t p1_rtout : 1; /**< Port-1 had a read timeout while attempting to read packet data. */ uint64_t p0_rtout : 1; /**< Port-0 had a read timeout while attempting to read packet data. */ uint64_t reserved_13_14 : 2; uint64_t i1_overf : 1; /**< Port-1 had a doorbell overflow. Bit[31] of the doorbell count was set. */ uint64_t i0_overf : 1; /**< Port-0 had a doorbell overflow. Bit[31] of the doorbell count was set. */ uint64_t reserved_9_10 : 2; uint64_t i1_rtout : 1; /**< Port-1 had a read timeout while attempting to read instructions. */ uint64_t i0_rtout : 1; /**< Port-0 had a read timeout while attempting to read instructions. */ uint64_t reserved_5_6 : 2; uint64_t po1_2sml : 1; /**< The packet being sent out on Port1 is smaller than the NPI_BUFF_SIZE_OUTPUT1[ISIZE] field. */ uint64_t po0_2sml : 1; /**< The packet being sent out on Port0 is smaller than the NPI_BUFF_SIZE_OUTPUT0[ISIZE] field. */ uint64_t pci_rsl : 1; /**< This '1' when a bit in PCI_INT_SUM2 is SET and the corresponding bit in the PCI_INT_ENB2 is SET. */ uint64_t rml_wto : 1; /**< Set '1' when the RML does not receive a commit back from a RSL after sending a write command to a RSL. */ uint64_t rml_rto : 1; /**< Set '1' when the RML does not receive read data back from a RSL after sending a read command to a RSL. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t pci_rsl : 1; uint64_t po0_2sml : 1; uint64_t po1_2sml : 1; uint64_t reserved_5_6 : 2; uint64_t i0_rtout : 1; uint64_t i1_rtout : 1; uint64_t reserved_9_10 : 2; uint64_t i0_overf : 1; uint64_t i1_overf : 1; uint64_t reserved_13_14 : 2; uint64_t p0_rtout : 1; uint64_t p1_rtout : 1; uint64_t reserved_17_18 : 2; uint64_t p0_perr : 1; uint64_t p1_perr : 1; uint64_t reserved_21_22 : 2; uint64_t g0_rtout : 1; uint64_t g1_rtout : 1; uint64_t reserved_25_26 : 2; uint64_t p0_pperr : 1; uint64_t p1_pperr : 1; uint64_t reserved_29_30 : 2; uint64_t p0_ptout : 1; uint64_t p1_ptout : 1; uint64_t reserved_33_34 : 2; uint64_t i0_pperr : 1; uint64_t i1_pperr : 1; uint64_t reserved_37_38 : 2; uint64_t win_rto : 1; uint64_t p_dperr : 1; uint64_t iobdma : 1; uint64_t fcr_s_e : 1; uint64_t fcr_a_f : 1; uint64_t pcr_s_e : 1; uint64_t pcr_a_f : 1; uint64_t q2_s_e : 1; uint64_t q2_a_f : 1; uint64_t q3_s_e : 1; uint64_t q3_a_f : 1; uint64_t com_s_e : 1; uint64_t com_a_f : 1; uint64_t pnc_s_e : 1; uint64_t pnc_a_f : 1; uint64_t rwx_s_e : 1; uint64_t rdx_s_e : 1; uint64_t pcf_p_e : 1; uint64_t pcf_p_f : 1; uint64_t pdf_p_e : 1; uint64_t pdf_p_f : 1; uint64_t q1_s_e : 1; uint64_t q1_a_f : 1; uint64_t reserved_62_63 : 2; #endif } cn31xx; struct cvmx_npi_int_sum_s cn38xx; struct cvmx_npi_int_sum_cn38xxp2 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_42_63 : 22; uint64_t iobdma : 1; /**< Requested IOBDMA read size exceeded 128 words. */ uint64_t p_dperr : 1; /**< If a parity error occured on data written to L2C from the PCI this bit may be set. */ uint64_t win_rto : 1; /**< Windowed Load Timed Out. */ uint64_t i3_pperr : 1; /**< If a parity error occured on the port's instruction this bit may be set. */ uint64_t i2_pperr : 1; /**< If a parity error occured on the port's instruction this bit may be set. */ uint64_t i1_pperr : 1; /**< If a parity error occured on the port's instruction this bit may be set. */ uint64_t i0_pperr : 1; /**< If a parity error occured on the port's instruction this bit may be set. */ uint64_t p3_ptout : 1; /**< Port-3 output had a read timeout on a DATA/INFO pair. */ uint64_t p2_ptout : 1; /**< Port-2 output had a read timeout on a DATA/INFO pair. */ uint64_t p1_ptout : 1; /**< Port-1 output had a read timeout on a DATA/INFO pair. */ uint64_t p0_ptout : 1; /**< Port-0 output had a read timeout on a DATA/INFO pair. */ uint64_t p3_pperr : 1; /**< If a parity error occured on the port DATA/INFO pointer-pair, this bit may be set. */ uint64_t p2_pperr : 1; /**< If a parity error occured on the port DATA/INFO pointer-pair, this bit may be set. */ uint64_t p1_pperr : 1; /**< If a parity error occured on the port DATA/INFO pointer-pair, this bit may be set. */ uint64_t p0_pperr : 1; /**< If a parity error occured on the port DATA/INFO pointer-pair, this bit may be set. */ uint64_t g3_rtout : 1; /**< Port-3 had a read timeout while attempting to read a gather list. */ uint64_t g2_rtout : 1; /**< Port-2 had a read timeout while attempting to read a gather list. */ uint64_t g1_rtout : 1; /**< Port-1 had a read timeout while attempting to read a gather list. */ uint64_t g0_rtout : 1; /**< Port-0 had a read timeout while attempting to read a gather list. */ uint64_t p3_perr : 1; /**< If a parity error occured on the port's packet data this bit may be set. */ uint64_t p2_perr : 1; /**< If a parity error occured on the port's packet data this bit may be set. */ uint64_t p1_perr : 1; /**< If a parity error occured on the port's packet data this bit may be set. */ uint64_t p0_perr : 1; /**< If a parity error occured on the port's packet data this bit may be set. */ uint64_t p3_rtout : 1; /**< Port-3 had a read timeout while attempting to read packet data. */ uint64_t p2_rtout : 1; /**< Port-2 had a read timeout while attempting to read packet data. */ uint64_t p1_rtout : 1; /**< Port-1 had a read timeout while attempting to read packet data. */ uint64_t p0_rtout : 1; /**< Port-0 had a read timeout while attempting to read packet data. */ uint64_t i3_overf : 1; /**< Port-3 had a doorbell overflow. Bit[31] of the doorbell count was set. */ uint64_t i2_overf : 1; /**< Port-2 had a doorbell overflow. Bit[31] of the doorbell count was set. */ uint64_t i1_overf : 1; /**< Port-1 had a doorbell overflow. Bit[31] of the doorbell count was set. */ uint64_t i0_overf : 1; /**< Port-0 had a doorbell overflow. Bit[31] of the doorbell count was set. */ uint64_t i3_rtout : 1; /**< Port-3 had a read timeout while attempting to read instructions. */ uint64_t i2_rtout : 1; /**< Port-2 had a read timeout while attempting to read instructions. */ uint64_t i1_rtout : 1; /**< Port-1 had a read timeout while attempting to read instructions. */ uint64_t i0_rtout : 1; /**< Port-0 had a read timeout while attempting to read instructions. */ uint64_t po3_2sml : 1; /**< The packet being sent out on Port3 is smaller than the NPI_BUFF_SIZE_OUTPUT3[ISIZE] field. */ uint64_t po2_2sml : 1; /**< The packet being sent out on Port2 is smaller than the NPI_BUFF_SIZE_OUTPUT2[ISIZE] field. */ uint64_t po1_2sml : 1; /**< The packet being sent out on Port1 is smaller than the NPI_BUFF_SIZE_OUTPUT1[ISIZE] field. */ uint64_t po0_2sml : 1; /**< The packet being sent out on Port0 is smaller than the NPI_BUFF_SIZE_OUTPUT0[ISIZE] field. */ uint64_t pci_rsl : 1; /**< This '1' when a bit in PCI_INT_SUM2 is SET and the corresponding bit in the PCI_INT_ENB2 is SET. */ uint64_t rml_wto : 1; /**< Set '1' when the RML does not receive a commit back from a RSL after sending a write command to a RSL. */ uint64_t rml_rto : 1; /**< Set '1' when the RML does not receive read data back from a RSL after sending a read command to a RSL. */ #else uint64_t rml_rto : 1; uint64_t rml_wto : 1; uint64_t pci_rsl : 1; uint64_t po0_2sml : 1; uint64_t po1_2sml : 1; uint64_t po2_2sml : 1; uint64_t po3_2sml : 1; uint64_t i0_rtout : 1; uint64_t i1_rtout : 1; uint64_t i2_rtout : 1; uint64_t i3_rtout : 1; uint64_t i0_overf : 1; uint64_t i1_overf : 1; uint64_t i2_overf : 1; uint64_t i3_overf : 1; uint64_t p0_rtout : 1; uint64_t p1_rtout : 1; uint64_t p2_rtout : 1; uint64_t p3_rtout : 1; uint64_t p0_perr : 1; uint64_t p1_perr : 1; uint64_t p2_perr : 1; uint64_t p3_perr : 1; uint64_t g0_rtout : 1; uint64_t g1_rtout : 1; uint64_t g2_rtout : 1; uint64_t g3_rtout : 1; uint64_t p0_pperr : 1; uint64_t p1_pperr : 1; uint64_t p2_pperr : 1; uint64_t p3_pperr : 1; uint64_t p0_ptout : 1; uint64_t p1_ptout : 1; uint64_t p2_ptout : 1; uint64_t p3_ptout : 1; uint64_t i0_pperr : 1; uint64_t i1_pperr : 1; uint64_t i2_pperr : 1; uint64_t i3_pperr : 1; uint64_t win_rto : 1; uint64_t p_dperr : 1; uint64_t iobdma : 1; uint64_t reserved_42_63 : 22; #endif } cn38xxp2; struct cvmx_npi_int_sum_cn31xx cn50xx; struct cvmx_npi_int_sum_s cn58xx; struct cvmx_npi_int_sum_s cn58xxp1; } cvmx_npi_int_sum_t; /** * cvmx_npi_lowp_dbell * * NPI_LOWP_DBELL = Low Priority Door Bell * * The door bell register for the low priority DMA queue. */ typedef union { uint64_t u64; struct cvmx_npi_lowp_dbell_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t dbell : 16; /**< The value written to this register is added to the number of 8byte words to be read and processes for the low priority dma queue. */ #else uint64_t dbell : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_npi_lowp_dbell_s cn30xx; struct cvmx_npi_lowp_dbell_s cn31xx; struct cvmx_npi_lowp_dbell_s cn38xx; struct cvmx_npi_lowp_dbell_s cn38xxp2; struct cvmx_npi_lowp_dbell_s cn50xx; struct cvmx_npi_lowp_dbell_s cn58xx; struct cvmx_npi_lowp_dbell_s cn58xxp1; } cvmx_npi_lowp_dbell_t; /** * cvmx_npi_lowp_ibuff_saddr * * NPI_LOWP_IBUFF_SADDR = DMA Low Priority's Instruction Buffer Starting Address * * The address to start reading Instructions from for LOWP. */ typedef union { uint64_t u64; struct cvmx_npi_lowp_ibuff_saddr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t saddr : 36; /**< The starting address to read the first instruction. */ #else uint64_t saddr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_npi_lowp_ibuff_saddr_s cn30xx; struct cvmx_npi_lowp_ibuff_saddr_s cn31xx; struct cvmx_npi_lowp_ibuff_saddr_s cn38xx; struct cvmx_npi_lowp_ibuff_saddr_s cn38xxp2; struct cvmx_npi_lowp_ibuff_saddr_s cn50xx; struct cvmx_npi_lowp_ibuff_saddr_s cn58xx; struct cvmx_npi_lowp_ibuff_saddr_s cn58xxp1; } cvmx_npi_lowp_ibuff_saddr_t; /** * cvmx_npi_mem_access_subid# * * NPI_MEM_ACCESS_SUBID3 = Memory Access SubId 3Register * * Carries Read/Write parameters for PP access to PCI memory that use NPI SubId3. * Writes to this register are not ordered with writes/reads to the PCI Memory space. * To ensure that a write has completed the user must read the register before * making an access(i.e. PCI memory space) that requires the value of this register to be updated. */ typedef union { uint64_t u64; struct cvmx_npi_mem_access_subidx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t shortl : 1; /**< Generate CMD-6 on PCI(x) when '1'. Loads from the cores to the corresponding subid that are 32-bits or smaller: - Will generate the PCI-X "Memory Read DWORD" command in PCI-X mode. (Note that "Memory Read DWORD" appears much like an IO read on the PCI-X bus.) - Will generate the PCI "Memory Read" command in PCI-X mode, irrespective of the NPI_PCI_READ_CMD[CMD_SIZE] value. NOT IN PASS 1 NOR PASS 2 */ uint64_t nmerge : 1; /**< No Merge. (NOT IN PASS 1 NOR PASS 2) */ uint64_t esr : 2; /**< Endian-Swap on read. */ uint64_t esw : 2; /**< Endian-Swap on write. */ uint64_t nsr : 1; /**< No-Snoop on read. */ uint64_t nsw : 1; /**< No-Snoop on write. */ uint64_t ror : 1; /**< Relax Read on read. */ uint64_t row : 1; /**< Relax Order on write. */ uint64_t ba : 28; /**< PCI Address bits [63:36]. */ #else uint64_t ba : 28; uint64_t row : 1; uint64_t ror : 1; uint64_t nsw : 1; uint64_t nsr : 1; uint64_t esw : 2; uint64_t esr : 2; uint64_t nmerge : 1; uint64_t shortl : 1; uint64_t reserved_38_63 : 26; #endif } s; struct cvmx_npi_mem_access_subidx_s cn30xx; struct cvmx_npi_mem_access_subidx_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t esr : 2; /**< Endian-Swap on read. */ uint64_t esw : 2; /**< Endian-Swap on write. */ uint64_t nsr : 1; /**< No-Snoop on read. */ uint64_t nsw : 1; /**< No-Snoop on write. */ uint64_t ror : 1; /**< Relax Read on read. */ uint64_t row : 1; /**< Relax Order on write. */ uint64_t ba : 28; /**< PCI Address bits [63:36]. */ #else uint64_t ba : 28; uint64_t row : 1; uint64_t ror : 1; uint64_t nsw : 1; uint64_t nsr : 1; uint64_t esw : 2; uint64_t esr : 2; uint64_t reserved_36_63 : 28; #endif } cn31xx; struct cvmx_npi_mem_access_subidx_s cn38xx; struct cvmx_npi_mem_access_subidx_cn31xx cn38xxp2; struct cvmx_npi_mem_access_subidx_s cn50xx; struct cvmx_npi_mem_access_subidx_s cn58xx; struct cvmx_npi_mem_access_subidx_s cn58xxp1; } cvmx_npi_mem_access_subidx_t; /** * cvmx_npi_msi_rcv * * NPI_MSI_RCV = NPI MSI Receive Vector Register * * A bit is set in this register relative to the vector received during a MSI. And cleared by a W1 to the register. */ typedef union { uint64_t u64; struct cvmx_npi_msi_rcv_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t int_vec : 64; /**< Refer to PCI_MSI_RCV */ #else uint64_t int_vec : 64; #endif } s; struct cvmx_npi_msi_rcv_s cn30xx; struct cvmx_npi_msi_rcv_s cn31xx; struct cvmx_npi_msi_rcv_s cn38xx; struct cvmx_npi_msi_rcv_s cn38xxp2; struct cvmx_npi_msi_rcv_s cn50xx; struct cvmx_npi_msi_rcv_s cn58xx; struct cvmx_npi_msi_rcv_s cn58xxp1; } cvmx_npi_msi_rcv_t; /** * cvmx_npi_num_desc_output# * * NUM_DESC_OUTPUT0 = Number Of Descriptors Available For Output 0 * * The size of the Buffer/Info Pointer Pair ring for Output-0. */ typedef union { uint64_t u64; struct cvmx_npi_num_desc_outputx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t size : 32; /**< The size of the Buffer/Info Pointer Pair ring. */ #else uint64_t size : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npi_num_desc_outputx_s cn30xx; struct cvmx_npi_num_desc_outputx_s cn31xx; struct cvmx_npi_num_desc_outputx_s cn38xx; struct cvmx_npi_num_desc_outputx_s cn38xxp2; struct cvmx_npi_num_desc_outputx_s cn50xx; struct cvmx_npi_num_desc_outputx_s cn58xx; struct cvmx_npi_num_desc_outputx_s cn58xxp1; } cvmx_npi_num_desc_outputx_t; /** * cvmx_npi_output_control * * NPI_OUTPUT_CONTROL = NPI's Output Control Register * * The address to start reading Instructions from for Output-3. */ typedef union { uint64_t u64; struct cvmx_npi_output_control_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t pkt_rr : 1; /**< When set '1' the output packet selection will be made with a Round Robin arbitration. When '0' the output packet port is fixed in priority, where the lower port number has higher priority. PASS3 Field */ uint64_t p3_bmode : 1; /**< When set '1' PCI_PKTS_SENT3 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t p2_bmode : 1; /**< When set '1' PCI_PKTS_SENT2 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t p1_bmode : 1; /**< When set '1' PCI_PKTS_SENT1 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t p0_bmode : 1; /**< When set '1' PCI_PKTS_SENT0 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t o3_es : 2; /**< Endian Swap for Output3 Data. */ uint64_t o3_ns : 1; /**< NoSnoop Enable for Output3 Data. */ uint64_t o3_ro : 1; /**< Relaxed Ordering Enable for Output3 Data. */ uint64_t o2_es : 2; /**< Endian Swap for Output2 Data. */ uint64_t o2_ns : 1; /**< NoSnoop Enable for Output2 Data. */ uint64_t o2_ro : 1; /**< Relaxed Ordering Enable for Output2 Data. */ uint64_t o1_es : 2; /**< Endian Swap for Output1 Data. */ uint64_t o1_ns : 1; /**< NoSnoop Enable for Output1 Data. */ uint64_t o1_ro : 1; /**< Relaxed Ordering Enable for Output1 Data. */ uint64_t o0_es : 2; /**< Endian Swap for Output0 Data. */ uint64_t o0_ns : 1; /**< NoSnoop Enable for Output0 Data. */ uint64_t o0_ro : 1; /**< Relaxed Ordering Enable for Output0 Data. */ uint64_t o3_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O3_ES, O3_NS, O3_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O3_ES[1:0], O3_NS, O3_RO. For Output Port-3. */ uint64_t o2_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O2_ES, O2_NS, O2_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O2_ES[1:0], O2_NS, O2_RO. For Output Port-2. */ uint64_t o1_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O1_ES, O1_NS, O1_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O1_ES[1:0], O1_NS, O1_RO. For Output Port-1. */ uint64_t o0_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O0_ES, O0_NS, O0_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O0_ES[1:0], O0_NS, O0_RO. For Output Port-0. */ uint64_t reserved_20_23 : 4; uint64_t iptr_o3 : 1; /**< Uses the Info-Pointer to store length and data for output-3. */ uint64_t iptr_o2 : 1; /**< Uses the Info-Pointer to store length and data for output-2. */ uint64_t iptr_o1 : 1; /**< Uses the Info-Pointer to store length and data for output-1. */ uint64_t iptr_o0 : 1; /**< Uses the Info-Pointer to store length and data for output-0. */ uint64_t esr_sl3 : 2; /**< The Endian-Swap-Mode for Slist3 reads. */ uint64_t nsr_sl3 : 1; /**< Enables '1' NoSnoop for Slist3 reads. */ uint64_t ror_sl3 : 1; /**< Enables '1' Relaxed Ordering for Slist3 reads. */ uint64_t esr_sl2 : 2; /**< The Endian-Swap-Mode for Slist2 reads. */ uint64_t nsr_sl2 : 1; /**< Enables '1' NoSnoop for Slist2 reads. */ uint64_t ror_sl2 : 1; /**< Enables '1' Relaxed Ordering for Slist2 reads. */ uint64_t esr_sl1 : 2; /**< The Endian-Swap-Mode for Slist1 reads. */ uint64_t nsr_sl1 : 1; /**< Enables '1' NoSnoop for Slist1 reads. */ uint64_t ror_sl1 : 1; /**< Enables '1' Relaxed Ordering for Slist1 reads. */ uint64_t esr_sl0 : 2; /**< The Endian-Swap-Mode for Slist0 reads. */ uint64_t nsr_sl0 : 1; /**< Enables '1' NoSnoop for Slist0 reads. */ uint64_t ror_sl0 : 1; /**< Enables '1' Relaxed Ordering for Slist0 reads. */ #else uint64_t ror_sl0 : 1; uint64_t nsr_sl0 : 1; uint64_t esr_sl0 : 2; uint64_t ror_sl1 : 1; uint64_t nsr_sl1 : 1; uint64_t esr_sl1 : 2; uint64_t ror_sl2 : 1; uint64_t nsr_sl2 : 1; uint64_t esr_sl2 : 2; uint64_t ror_sl3 : 1; uint64_t nsr_sl3 : 1; uint64_t esr_sl3 : 2; uint64_t iptr_o0 : 1; uint64_t iptr_o1 : 1; uint64_t iptr_o2 : 1; uint64_t iptr_o3 : 1; uint64_t reserved_20_23 : 4; uint64_t o0_csrm : 1; uint64_t o1_csrm : 1; uint64_t o2_csrm : 1; uint64_t o3_csrm : 1; uint64_t o0_ro : 1; uint64_t o0_ns : 1; uint64_t o0_es : 2; uint64_t o1_ro : 1; uint64_t o1_ns : 1; uint64_t o1_es : 2; uint64_t o2_ro : 1; uint64_t o2_ns : 1; uint64_t o2_es : 2; uint64_t o3_ro : 1; uint64_t o3_ns : 1; uint64_t o3_es : 2; uint64_t p0_bmode : 1; uint64_t p1_bmode : 1; uint64_t p2_bmode : 1; uint64_t p3_bmode : 1; uint64_t pkt_rr : 1; uint64_t reserved_49_63 : 15; #endif } s; struct cvmx_npi_output_control_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_45_63 : 19; uint64_t p0_bmode : 1; /**< When set '1' PCI_PKTS_SENT0 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t reserved_32_43 : 12; uint64_t o0_es : 2; /**< Endian Swap for Output0 Data. */ uint64_t o0_ns : 1; /**< NoSnoop Enable for Output0 Data. */ uint64_t o0_ro : 1; /**< Relaxed Ordering Enable for Output0 Data. */ uint64_t reserved_25_27 : 3; uint64_t o0_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O0_ES, O0_NS, O0_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O0_ES[1:0], O0_NS, O0_RO. For Output Port-0. */ uint64_t reserved_17_23 : 7; uint64_t iptr_o0 : 1; /**< Uses the Info-Pointer to store length and data for output-0. */ uint64_t reserved_4_15 : 12; uint64_t esr_sl0 : 2; /**< The Endian-Swap-Mode for Slist0 reads. */ uint64_t nsr_sl0 : 1; /**< Enables '1' NoSnoop for Slist0 reads. */ uint64_t ror_sl0 : 1; /**< Enables '1' Relaxed Ordering for Slist0 reads. */ #else uint64_t ror_sl0 : 1; uint64_t nsr_sl0 : 1; uint64_t esr_sl0 : 2; uint64_t reserved_4_15 : 12; uint64_t iptr_o0 : 1; uint64_t reserved_17_23 : 7; uint64_t o0_csrm : 1; uint64_t reserved_25_27 : 3; uint64_t o0_ro : 1; uint64_t o0_ns : 1; uint64_t o0_es : 2; uint64_t reserved_32_43 : 12; uint64_t p0_bmode : 1; uint64_t reserved_45_63 : 19; #endif } cn30xx; struct cvmx_npi_output_control_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_46_63 : 18; uint64_t p1_bmode : 1; /**< When set '1' PCI_PKTS_SENT1 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t p0_bmode : 1; /**< When set '1' PCI_PKTS_SENT0 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t reserved_36_43 : 8; uint64_t o1_es : 2; /**< Endian Swap for Output1 Data. */ uint64_t o1_ns : 1; /**< NoSnoop Enable for Output1 Data. */ uint64_t o1_ro : 1; /**< Relaxed Ordering Enable for Output1 Data. */ uint64_t o0_es : 2; /**< Endian Swap for Output0 Data. */ uint64_t o0_ns : 1; /**< NoSnoop Enable for Output0 Data. */ uint64_t o0_ro : 1; /**< Relaxed Ordering Enable for Output0 Data. */ uint64_t reserved_26_27 : 2; uint64_t o1_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O1_ES, O1_NS, O1_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O1_ES[1:0], O1_NS, O1_RO. For Output Port-1. */ uint64_t o0_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O0_ES, O0_NS, O0_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O0_ES[1:0], O0_NS, O0_RO. For Output Port-0. */ uint64_t reserved_18_23 : 6; uint64_t iptr_o1 : 1; /**< Uses the Info-Pointer to store length and data for output-1. */ uint64_t iptr_o0 : 1; /**< Uses the Info-Pointer to store length and data for output-0. */ uint64_t reserved_8_15 : 8; uint64_t esr_sl1 : 2; /**< The Endian-Swap-Mode for Slist1 reads. */ uint64_t nsr_sl1 : 1; /**< Enables '1' NoSnoop for Slist1 reads. */ uint64_t ror_sl1 : 1; /**< Enables '1' Relaxed Ordering for Slist1 reads. */ uint64_t esr_sl0 : 2; /**< The Endian-Swap-Mode for Slist0 reads. */ uint64_t nsr_sl0 : 1; /**< Enables '1' NoSnoop for Slist0 reads. */ uint64_t ror_sl0 : 1; /**< Enables '1' Relaxed Ordering for Slist0 reads. */ #else uint64_t ror_sl0 : 1; uint64_t nsr_sl0 : 1; uint64_t esr_sl0 : 2; uint64_t ror_sl1 : 1; uint64_t nsr_sl1 : 1; uint64_t esr_sl1 : 2; uint64_t reserved_8_15 : 8; uint64_t iptr_o0 : 1; uint64_t iptr_o1 : 1; uint64_t reserved_18_23 : 6; uint64_t o0_csrm : 1; uint64_t o1_csrm : 1; uint64_t reserved_26_27 : 2; uint64_t o0_ro : 1; uint64_t o0_ns : 1; uint64_t o0_es : 2; uint64_t o1_ro : 1; uint64_t o1_ns : 1; uint64_t o1_es : 2; uint64_t reserved_36_43 : 8; uint64_t p0_bmode : 1; uint64_t p1_bmode : 1; uint64_t reserved_46_63 : 18; #endif } cn31xx; struct cvmx_npi_output_control_s cn38xx; struct cvmx_npi_output_control_cn38xxp2 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t p3_bmode : 1; /**< When set '1' PCI_PKTS_SENT3 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t p2_bmode : 1; /**< When set '1' PCI_PKTS_SENT2 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t p1_bmode : 1; /**< When set '1' PCI_PKTS_SENT1 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t p0_bmode : 1; /**< When set '1' PCI_PKTS_SENT0 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t o3_es : 2; /**< Endian Swap for Output3 Data. */ uint64_t o3_ns : 1; /**< NoSnoop Enable for Output3 Data. */ uint64_t o3_ro : 1; /**< Relaxed Ordering Enable for Output3 Data. */ uint64_t o2_es : 2; /**< Endian Swap for Output2 Data. */ uint64_t o2_ns : 1; /**< NoSnoop Enable for Output2 Data. */ uint64_t o2_ro : 1; /**< Relaxed Ordering Enable for Output2 Data. */ uint64_t o1_es : 2; /**< Endian Swap for Output1 Data. */ uint64_t o1_ns : 1; /**< NoSnoop Enable for Output1 Data. */ uint64_t o1_ro : 1; /**< Relaxed Ordering Enable for Output1 Data. */ uint64_t o0_es : 2; /**< Endian Swap for Output0 Data. */ uint64_t o0_ns : 1; /**< NoSnoop Enable for Output0 Data. */ uint64_t o0_ro : 1; /**< Relaxed Ordering Enable for Output0 Data. */ uint64_t o3_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O3_ES, O3_NS, O3_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O3_ES[1:0], O3_NS, O3_RO. For Output Port-3. */ uint64_t o2_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O2_ES, O2_NS, O2_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O2_ES[1:0], O2_NS, O2_RO. For Output Port-2. */ uint64_t o1_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O1_ES, O1_NS, O1_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O1_ES[1:0], O1_NS, O1_RO. For Output Port-1. */ uint64_t o0_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O0_ES, O0_NS, O0_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O0_ES[1:0], O0_NS, O0_RO. For Output Port-0. */ uint64_t reserved_20_23 : 4; uint64_t iptr_o3 : 1; /**< Uses the Info-Pointer to store length and data for output-3. */ uint64_t iptr_o2 : 1; /**< Uses the Info-Pointer to store length and data for output-2. */ uint64_t iptr_o1 : 1; /**< Uses the Info-Pointer to store length and data for output-1. */ uint64_t iptr_o0 : 1; /**< Uses the Info-Pointer to store length and data for output-0. */ uint64_t esr_sl3 : 2; /**< The Endian-Swap-Mode for Slist3 reads. */ uint64_t nsr_sl3 : 1; /**< Enables '1' NoSnoop for Slist3 reads. */ uint64_t ror_sl3 : 1; /**< Enables '1' Relaxed Ordering for Slist3 reads. */ uint64_t esr_sl2 : 2; /**< The Endian-Swap-Mode for Slist2 reads. */ uint64_t nsr_sl2 : 1; /**< Enables '1' NoSnoop for Slist2 reads. */ uint64_t ror_sl2 : 1; /**< Enables '1' Relaxed Ordering for Slist2 reads. */ uint64_t esr_sl1 : 2; /**< The Endian-Swap-Mode for Slist1 reads. */ uint64_t nsr_sl1 : 1; /**< Enables '1' NoSnoop for Slist1 reads. */ uint64_t ror_sl1 : 1; /**< Enables '1' Relaxed Ordering for Slist1 reads. */ uint64_t esr_sl0 : 2; /**< The Endian-Swap-Mode for Slist0 reads. */ uint64_t nsr_sl0 : 1; /**< Enables '1' NoSnoop for Slist0 reads. */ uint64_t ror_sl0 : 1; /**< Enables '1' Relaxed Ordering for Slist0 reads. */ #else uint64_t ror_sl0 : 1; uint64_t nsr_sl0 : 1; uint64_t esr_sl0 : 2; uint64_t ror_sl1 : 1; uint64_t nsr_sl1 : 1; uint64_t esr_sl1 : 2; uint64_t ror_sl2 : 1; uint64_t nsr_sl2 : 1; uint64_t esr_sl2 : 2; uint64_t ror_sl3 : 1; uint64_t nsr_sl3 : 1; uint64_t esr_sl3 : 2; uint64_t iptr_o0 : 1; uint64_t iptr_o1 : 1; uint64_t iptr_o2 : 1; uint64_t iptr_o3 : 1; uint64_t reserved_20_23 : 4; uint64_t o0_csrm : 1; uint64_t o1_csrm : 1; uint64_t o2_csrm : 1; uint64_t o3_csrm : 1; uint64_t o0_ro : 1; uint64_t o0_ns : 1; uint64_t o0_es : 2; uint64_t o1_ro : 1; uint64_t o1_ns : 1; uint64_t o1_es : 2; uint64_t o2_ro : 1; uint64_t o2_ns : 1; uint64_t o2_es : 2; uint64_t o3_ro : 1; uint64_t o3_ns : 1; uint64_t o3_es : 2; uint64_t p0_bmode : 1; uint64_t p1_bmode : 1; uint64_t p2_bmode : 1; uint64_t p3_bmode : 1; uint64_t reserved_48_63 : 16; #endif } cn38xxp2; struct cvmx_npi_output_control_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t pkt_rr : 1; /**< When set '1' the output packet selection will be made with a Round Robin arbitration. When '0' the output packet port is fixed in priority, where the lower port number has higher priority. PASS2 Field */ uint64_t reserved_46_47 : 2; uint64_t p1_bmode : 1; /**< When set '1' PCI_PKTS_SENT1 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t p0_bmode : 1; /**< When set '1' PCI_PKTS_SENT0 register will be updated with the number of bytes in the packet sent, when '0' the register will have a value of '1' added. */ uint64_t reserved_36_43 : 8; uint64_t o1_es : 2; /**< Endian Swap for Output1 Data. */ uint64_t o1_ns : 1; /**< NoSnoop Enable for Output1 Data. */ uint64_t o1_ro : 1; /**< Relaxed Ordering Enable for Output1 Data. */ uint64_t o0_es : 2; /**< Endian Swap for Output0 Data. */ uint64_t o0_ns : 1; /**< NoSnoop Enable for Output0 Data. */ uint64_t o0_ro : 1; /**< Relaxed Ordering Enable for Output0 Data. */ uint64_t reserved_26_27 : 2; uint64_t o1_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O1_ES, O1_NS, O1_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O1_ES[1:0], O1_NS, O1_RO. For Output Port-1. */ uint64_t o0_csrm : 1; /**< When '1' the address[63:60] to write packet data, comes from the DPTR[63:60] in the scatter-list pair, and the RO, NS, ES values come from the O0_ES, O0_NS, O0_RO. When '0' the RO == DPTR[60], NS == DPTR[61], ES == DPTR[63:62], the address the packet will be written to is ADDR[63:60] == O0_ES[1:0], O0_NS, O0_RO. For Output Port-0. */ uint64_t reserved_18_23 : 6; uint64_t iptr_o1 : 1; /**< Uses the Info-Pointer to store length and data for output-1. */ uint64_t iptr_o0 : 1; /**< Uses the Info-Pointer to store length and data for output-0. */ uint64_t reserved_8_15 : 8; uint64_t esr_sl1 : 2; /**< The Endian-Swap-Mode for Slist1 reads. */ uint64_t nsr_sl1 : 1; /**< Enables '1' NoSnoop for Slist1 reads. */ uint64_t ror_sl1 : 1; /**< Enables '1' Relaxed Ordering for Slist1 reads. */ uint64_t esr_sl0 : 2; /**< The Endian-Swap-Mode for Slist0 reads. */ uint64_t nsr_sl0 : 1; /**< Enables '1' NoSnoop for Slist0 reads. */ uint64_t ror_sl0 : 1; /**< Enables '1' Relaxed Ordering for Slist0 reads. */ #else uint64_t ror_sl0 : 1; uint64_t nsr_sl0 : 1; uint64_t esr_sl0 : 2; uint64_t ror_sl1 : 1; uint64_t nsr_sl1 : 1; uint64_t esr_sl1 : 2; uint64_t reserved_8_15 : 8; uint64_t iptr_o0 : 1; uint64_t iptr_o1 : 1; uint64_t reserved_18_23 : 6; uint64_t o0_csrm : 1; uint64_t o1_csrm : 1; uint64_t reserved_26_27 : 2; uint64_t o0_ro : 1; uint64_t o0_ns : 1; uint64_t o0_es : 2; uint64_t o1_ro : 1; uint64_t o1_ns : 1; uint64_t o1_es : 2; uint64_t reserved_36_43 : 8; uint64_t p0_bmode : 1; uint64_t p1_bmode : 1; uint64_t reserved_46_47 : 2; uint64_t pkt_rr : 1; uint64_t reserved_49_63 : 15; #endif } cn50xx; struct cvmx_npi_output_control_s cn58xx; struct cvmx_npi_output_control_s cn58xxp1; } cvmx_npi_output_control_t; /** * cvmx_npi_p#_dbpair_addr * * NPI_P0_DBPAIR_ADDR = NPI's Port-0 DATA-BUFFER Pair Next Read Address. * * Contains the next address to read for Port's-0 Data/Buffer Pair. */ typedef union { uint64_t u64; struct cvmx_npi_px_dbpair_addr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_63_63 : 1; uint64_t state : 2; /**< POS state machine vector. Used to tell when NADDR is valid (when STATE == 0). */ uint64_t naddr : 61; /**< Bits [63:3] of the next Data-Info Pair to read. Value is only valid when STATE == 0. */ #else uint64_t naddr : 61; uint64_t state : 2; uint64_t reserved_63_63 : 1; #endif } s; struct cvmx_npi_px_dbpair_addr_s cn30xx; struct cvmx_npi_px_dbpair_addr_s cn31xx; struct cvmx_npi_px_dbpair_addr_s cn38xx; struct cvmx_npi_px_dbpair_addr_s cn38xxp2; struct cvmx_npi_px_dbpair_addr_s cn50xx; struct cvmx_npi_px_dbpair_addr_s cn58xx; struct cvmx_npi_px_dbpair_addr_s cn58xxp1; } cvmx_npi_px_dbpair_addr_t; /** * cvmx_npi_p#_instr_addr * * NPI_P0_INSTR_ADDR = NPI's Port-0 Instruction Next Read Address. * * Contains the next address to read for Port's-0 Instructions. */ typedef union { uint64_t u64; struct cvmx_npi_px_instr_addr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t state : 3; /**< Gather engine state vector. Used to tell when NADDR is valid (when STATE == 0). */ uint64_t naddr : 61; /**< Bits [63:3] of the next Instruction to read. Value is only valid when STATE == 0. */ #else uint64_t naddr : 61; uint64_t state : 3; #endif } s; struct cvmx_npi_px_instr_addr_s cn30xx; struct cvmx_npi_px_instr_addr_s cn31xx; struct cvmx_npi_px_instr_addr_s cn38xx; struct cvmx_npi_px_instr_addr_s cn38xxp2; struct cvmx_npi_px_instr_addr_s cn50xx; struct cvmx_npi_px_instr_addr_s cn58xx; struct cvmx_npi_px_instr_addr_s cn58xxp1; } cvmx_npi_px_instr_addr_t; /** * cvmx_npi_p#_instr_cnts * * NPI_P0_INSTR_CNTS = NPI's Port-0 Instruction Counts For Packets In. * * Used to determine the number of instruction in the NPI and to be fetched for Input-Packets. */ typedef union { uint64_t u64; struct cvmx_npi_px_instr_cnts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t fcnt : 6; /**< Number entries in the Instruction FIFO. */ uint64_t avail : 32; /**< Doorbell count to be read. */ #else uint64_t avail : 32; uint64_t fcnt : 6; uint64_t reserved_38_63 : 26; #endif } s; struct cvmx_npi_px_instr_cnts_s cn30xx; struct cvmx_npi_px_instr_cnts_s cn31xx; struct cvmx_npi_px_instr_cnts_s cn38xx; struct cvmx_npi_px_instr_cnts_s cn38xxp2; struct cvmx_npi_px_instr_cnts_s cn50xx; struct cvmx_npi_px_instr_cnts_s cn58xx; struct cvmx_npi_px_instr_cnts_s cn58xxp1; } cvmx_npi_px_instr_cnts_t; /** * cvmx_npi_p#_pair_cnts * * NPI_P0_PAIR_CNTS = NPI's Port-0 Instruction Counts For Packets Out. * * Used to determine the number of instruction in the NPI and to be fetched for Output-Packets. */ typedef union { uint64_t u64; struct cvmx_npi_px_pair_cnts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_37_63 : 27; uint64_t fcnt : 5; /**< 16 - number entries in the D/I Pair FIFO. */ uint64_t avail : 32; /**< Doorbell count to be read. */ #else uint64_t avail : 32; uint64_t fcnt : 5; uint64_t reserved_37_63 : 27; #endif } s; struct cvmx_npi_px_pair_cnts_s cn30xx; struct cvmx_npi_px_pair_cnts_s cn31xx; struct cvmx_npi_px_pair_cnts_s cn38xx; struct cvmx_npi_px_pair_cnts_s cn38xxp2; struct cvmx_npi_px_pair_cnts_s cn50xx; struct cvmx_npi_px_pair_cnts_s cn58xx; struct cvmx_npi_px_pair_cnts_s cn58xxp1; } cvmx_npi_px_pair_cnts_t; /** * cvmx_npi_pci_burst_size * * NPI_PCI_BURST_SIZE = NPI PCI Burst Size Register * * Control the number of words the NPI will attempt to read / write to/from the PCI. */ typedef union { uint64_t u64; struct cvmx_npi_pci_burst_size_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t wr_brst : 7; /**< The number of 8B words to write to PCI in any one write operation. A zero is equal to 128. This value is used the packet reads and is clamped at a max of 112 for dma writes. */ uint64_t rd_brst : 7; /**< Number of 8B words to read from PCI in any one read operation. Legal values are 1 to 127, where a 0 will be treated as a 1. "For reading of packet data value is limited to 64 in PASS-2." This value does not control the size of a read caused by an IOBDMA from a PP. */ #else uint64_t rd_brst : 7; uint64_t wr_brst : 7; uint64_t reserved_14_63 : 50; #endif } s; struct cvmx_npi_pci_burst_size_s cn30xx; struct cvmx_npi_pci_burst_size_s cn31xx; struct cvmx_npi_pci_burst_size_s cn38xx; struct cvmx_npi_pci_burst_size_s cn38xxp2; struct cvmx_npi_pci_burst_size_s cn50xx; struct cvmx_npi_pci_burst_size_s cn58xx; struct cvmx_npi_pci_burst_size_s cn58xxp1; } cvmx_npi_pci_burst_size_t; /** * cvmx_npi_pci_int_arb_cfg * * NPI_PCI_INT_ARB_CFG = Configuration For PCI Arbiter * * Controls operation of the Internal PCI Arbiter. This register should * only be written when PRST# is asserted. NPI_PCI_INT_ARB_CFG[EN] should * only be set when Octane is a host. */ typedef union { uint64_t u64; struct cvmx_npi_pci_int_arb_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t hostmode : 1; /**< PCI Host Mode Pin (sampled for use by software). This bit reflects the sampled PCI_HOSTMODE pin. In HOST Mode, OCTEON drives the PCI_CLK_OUT and PCI initialization pattern during PCI_RST_N deassertion). *** NOTE: O9N PASS1 Addition */ uint64_t pci_ovr : 4; /**< PCI Host Mode Bus Speed/Type Override When in Host Mode(PCI_HOSTMODE pin =1), OCTEON acting as the PCI Central Agent, samples the PCI_PCI100, PCI_M66EN and PCI_PCIXCAP pins to determine the 'sampled' PCI Bus speed and Bus Type (PCI or PCIX). (see: PCI_CNT_REG[HM_SPEED,HM_PCIX]) However, in some cases, SW may want to override the the 'sampled' PCI Bus Type/Speed, and use some SLOWER Bus frequency. The PCI_OVR field encoding represents the 'override' PCI Bus Type/Speed which will be used to generate the PCI_CLK_OUT and determines the PCI initialization pattern driven during PCI_RST_N deassertion. PCI_OVR[3]: OVERRIDE (0:DISABLE/1:ENABLE) PCI_OVR[2]: BUS TYPE(0:PCI/1:PCIX) PCI_OVR[1:0]: BUS SPEED(0:33/1:66/2:100/3:133) OVERRIDE TYPE SPEED | Override Configuration [3] [2] [1:0] | TYPE SPEED ------------------+------------------------------- 0 x xx | No override(uses 'sampled' | Bus Speed(HM_SPEED) and Bus Type(HM_PCIX) 1 0 00 | PCI Mode 33MHz 1 0 01 | PCI Mode 66MHz 1 0 10 | RESERVED (DO NOT USE) 1 0 11 | RESERVED (DO NOT USE) 1 1 00 | RESERVED (DO NOT USE) 1 1 01 | PCIX Mode 66MHz 1 1 10 | PCIX Mode 100MHz 1 1 11 | PCIX Mode 133MHz NOTES: - NPI_PCI_INT_ARB_CFG[PCI_OVR] has NO EFFECT on PCI_CNT_REG[HM_SPEED,HM_PCIX] (ie: the sampled PCI Bus Type/Speed), but WILL EFFECT PCI_CTL_STATUS_2[AP_PCIX] which reflects the actual PCI Bus Type(0:PCI/1:PCIX). - Software should never 'up' configure the recommended values. In other words, if the 'sampled' Bus Type=PCI(HM_PCIX=0), then SW should NOT attempt to set TYPE[2]=1 for PCIX Mode. Likewise, if the sampled Bus Speed=66MHz(HM_SPEED=01), then SW should NOT attempt to 'speed up' the bus [ie: SPEED[1:0]=10(100MHz)]. - If PCI_OVR<3> is set prior to PCI reset de-assertion in host mode, NPI_PCI_INT_ARB_CFG[PCI_OVR] indicates the Bus Type/Speed that OCTEON drove on the DEVSEL/STOP/TRDY pins during reset de-assertion. (user should then ignore the 'sampled' Bus Type/Speed contained in the PCI_CNT_REG[HM_PCIX, HM_SPEED]) fields. - If PCI_OVR<3> is clear prior to PCI reset de-assertion in host mode, PCI_CNT_REG[HM_PCIX,HM_SPEED]) indicates the Bus Type/Speed that OCTEON drove on the DEVSEL/STOP/TRDY pins during reset de-assertion. *** NOTE: O9N PASS1 Addition */ uint64_t reserved_5_7 : 3; uint64_t en : 1; /**< Internal arbiter enable. */ uint64_t park_mod : 1; /**< Bus park mode. 0=park on last, 1=park on device. */ uint64_t park_dev : 3; /**< Bus park device. 0-3 External device, 4 = Octane. */ #else uint64_t park_dev : 3; uint64_t park_mod : 1; uint64_t en : 1; uint64_t reserved_5_7 : 3; uint64_t pci_ovr : 4; uint64_t hostmode : 1; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_npi_pci_int_arb_cfg_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t en : 1; /**< Internal arbiter enable. */ uint64_t park_mod : 1; /**< Bus park mode. 0=park on last, 1=park on device. */ uint64_t park_dev : 3; /**< Bus park device. 0-3 External device, 4 = Octane. */ #else uint64_t park_dev : 3; uint64_t park_mod : 1; uint64_t en : 1; uint64_t reserved_5_63 : 59; #endif } cn30xx; struct cvmx_npi_pci_int_arb_cfg_cn30xx cn31xx; struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xx; struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xxp2; struct cvmx_npi_pci_int_arb_cfg_s cn50xx; struct cvmx_npi_pci_int_arb_cfg_s cn58xx; struct cvmx_npi_pci_int_arb_cfg_s cn58xxp1; } cvmx_npi_pci_int_arb_cfg_t; /** * cvmx_npi_pci_read_cmd * * NPI_PCI_READ_CMD = NPI PCI Read Command Register * * Controls the type of read command sent. * Writes to this register are not ordered with writes/reads to the PCI Memory space. * To ensure that a write has completed the user must read the register before * making an access(i.e. PCI memory space) that requires the value of this register to be updated. * Also any previously issued reads/writes to PCI memory space, still stored in the outbound * FIFO will use the value of this register after it has been updated. */ typedef union { uint64_t u64; struct cvmx_npi_pci_read_cmd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t cmd_size : 11; /**< Number bytes to be read is equal to or exceeds this size will cause the PCI in PCI mode to use a Memory-Read-Multiple. This register has a value from 8 to 2048. A value of 0-7 will be treated as a value of 2048. */ #else uint64_t cmd_size : 11; uint64_t reserved_11_63 : 53; #endif } s; struct cvmx_npi_pci_read_cmd_s cn30xx; struct cvmx_npi_pci_read_cmd_s cn31xx; struct cvmx_npi_pci_read_cmd_s cn38xx; struct cvmx_npi_pci_read_cmd_s cn38xxp2; struct cvmx_npi_pci_read_cmd_s cn50xx; struct cvmx_npi_pci_read_cmd_s cn58xx; struct cvmx_npi_pci_read_cmd_s cn58xxp1; } cvmx_npi_pci_read_cmd_t; /** * cvmx_npi_port32_instr_hdr * * NPI_PORT32_INSTR_HDR = NPI Port 32 Instruction Header * * Contains bits [62:42] of the Instruction Header for port 32. */ typedef union { uint64_t u64; struct cvmx_npi_port32_instr_hdr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_44_63 : 20; uint64_t pbp : 1; /**< Enable Packet-by-packet mode. */ uint64_t rsv_f : 5; /**< Reserved */ uint64_t rparmode : 2; /**< Parse Mode. Used when packet is raw and PBP==0. */ uint64_t rsv_e : 1; /**< Reserved */ uint64_t rskp_len : 7; /**< Skip Length. Used when packet is raw and PBP==0. */ uint64_t rsv_d : 6; /**< Reserved */ uint64_t use_ihdr : 1; /**< When set '1' the instruction header will be sent as part of the packet data, regardless of the value of bit [63] of the instruction header. USE_IHDR must be set whenever PBP is set. */ uint64_t rsv_c : 5; /**< Reserved */ uint64_t par_mode : 2; /**< Parse Mode. Used when USE_IHDR is set and packet is not raw and PBP is not set. */ uint64_t rsv_b : 1; /**< Reserved instruction header sent to IPD. */ uint64_t skp_len : 7; /**< Skip Length. Used when USE_IHDR is set and packet is not raw and PBP is not set. */ uint64_t rsv_a : 6; /**< Reserved */ #else uint64_t rsv_a : 6; uint64_t skp_len : 7; uint64_t rsv_b : 1; uint64_t par_mode : 2; uint64_t rsv_c : 5; uint64_t use_ihdr : 1; uint64_t rsv_d : 6; uint64_t rskp_len : 7; uint64_t rsv_e : 1; uint64_t rparmode : 2; uint64_t rsv_f : 5; uint64_t pbp : 1; uint64_t reserved_44_63 : 20; #endif } s; struct cvmx_npi_port32_instr_hdr_s cn30xx; struct cvmx_npi_port32_instr_hdr_s cn31xx; struct cvmx_npi_port32_instr_hdr_s cn38xx; struct cvmx_npi_port32_instr_hdr_s cn38xxp2; struct cvmx_npi_port32_instr_hdr_s cn50xx; struct cvmx_npi_port32_instr_hdr_s cn58xx; struct cvmx_npi_port32_instr_hdr_s cn58xxp1; } cvmx_npi_port32_instr_hdr_t; /** * cvmx_npi_port33_instr_hdr * * NPI_PORT33_INSTR_HDR = NPI Port 33 Instruction Header * * Contains bits [62:42] of the Instruction Header for port 33. */ typedef union { uint64_t u64; struct cvmx_npi_port33_instr_hdr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_44_63 : 20; uint64_t pbp : 1; /**< Enable Packet-by-packet mode. */ uint64_t rsv_f : 5; /**< Reserved */ uint64_t rparmode : 2; /**< Parse Mode. Used when packet is raw and PBP==0. */ uint64_t rsv_e : 1; /**< Reserved */ uint64_t rskp_len : 7; /**< Skip Length. Used when packet is raw and PBP==0. */ uint64_t rsv_d : 6; /**< Reserved */ uint64_t use_ihdr : 1; /**< When set '1' the instruction header will be sent as part of the packet data, regardless of the value of bit [63] of the instruction header. USE_IHDR must be set whenever PBP is set. */ uint64_t rsv_c : 5; /**< Reserved */ uint64_t par_mode : 2; /**< Parse Mode. Used when USE_IHDR is set and packet is not raw and PBP is not set. */ uint64_t rsv_b : 1; /**< Reserved instruction header sent to IPD. */ uint64_t skp_len : 7; /**< Skip Length. Used when USE_IHDR is set and packet is not raw and PBP is not set. */ uint64_t rsv_a : 6; /**< Reserved */ #else uint64_t rsv_a : 6; uint64_t skp_len : 7; uint64_t rsv_b : 1; uint64_t par_mode : 2; uint64_t rsv_c : 5; uint64_t use_ihdr : 1; uint64_t rsv_d : 6; uint64_t rskp_len : 7; uint64_t rsv_e : 1; uint64_t rparmode : 2; uint64_t rsv_f : 5; uint64_t pbp : 1; uint64_t reserved_44_63 : 20; #endif } s; struct cvmx_npi_port33_instr_hdr_s cn31xx; struct cvmx_npi_port33_instr_hdr_s cn38xx; struct cvmx_npi_port33_instr_hdr_s cn38xxp2; struct cvmx_npi_port33_instr_hdr_s cn50xx; struct cvmx_npi_port33_instr_hdr_s cn58xx; struct cvmx_npi_port33_instr_hdr_s cn58xxp1; } cvmx_npi_port33_instr_hdr_t; /** * cvmx_npi_port34_instr_hdr * * NPI_PORT34_INSTR_HDR = NPI Port 34 Instruction Header * * Contains bits [62:42] of the Instruction Header for port 34. Added for PASS-2. */ typedef union { uint64_t u64; struct cvmx_npi_port34_instr_hdr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_44_63 : 20; uint64_t pbp : 1; /**< Enable Packet-by-packet mode. */ uint64_t rsv_f : 5; /**< Reserved */ uint64_t rparmode : 2; /**< Parse Mode. Used when packet is raw and PBP==0. */ uint64_t rsv_e : 1; /**< Reserved */ uint64_t rskp_len : 7; /**< Skip Length. Used when packet is raw and PBP==0. */ uint64_t rsv_d : 6; /**< Reserved */ uint64_t use_ihdr : 1; /**< When set '1' the instruction header will be sent as part of the packet data, regardless of the value of bit [63] of the instruction header. USE_IHDR must be set whenever PBP is set. */ uint64_t rsv_c : 5; /**< Reserved */ uint64_t par_mode : 2; /**< Parse Mode. Used when USE_IHDR is set and packet is not raw and PBP is not set. */ uint64_t rsv_b : 1; /**< Reserved instruction header sent to IPD. */ uint64_t skp_len : 7; /**< Skip Length. Used when USE_IHDR is set and packet is not raw and PBP is not set. */ uint64_t rsv_a : 6; /**< Reserved */ #else uint64_t rsv_a : 6; uint64_t skp_len : 7; uint64_t rsv_b : 1; uint64_t par_mode : 2; uint64_t rsv_c : 5; uint64_t use_ihdr : 1; uint64_t rsv_d : 6; uint64_t rskp_len : 7; uint64_t rsv_e : 1; uint64_t rparmode : 2; uint64_t rsv_f : 5; uint64_t pbp : 1; uint64_t reserved_44_63 : 20; #endif } s; struct cvmx_npi_port34_instr_hdr_s cn38xx; struct cvmx_npi_port34_instr_hdr_s cn38xxp2; struct cvmx_npi_port34_instr_hdr_s cn58xx; struct cvmx_npi_port34_instr_hdr_s cn58xxp1; } cvmx_npi_port34_instr_hdr_t; /** * cvmx_npi_port35_instr_hdr * * NPI_PORT35_INSTR_HDR = NPI Port 35 Instruction Header * * Contains bits [62:42] of the Instruction Header for port 35. Added for PASS-2. */ typedef union { uint64_t u64; struct cvmx_npi_port35_instr_hdr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_44_63 : 20; uint64_t pbp : 1; /**< Enable Packet-by-packet mode. */ uint64_t rsv_f : 5; /**< Reserved */ uint64_t rparmode : 2; /**< Parse Mode. Used when packet is raw and PBP==0. */ uint64_t rsv_e : 1; /**< Reserved */ uint64_t rskp_len : 7; /**< Skip Length. Used when packet is raw and PBP==0. */ uint64_t rsv_d : 6; /**< Reserved */ uint64_t use_ihdr : 1; /**< When set '1' the instruction header will be sent as part of the packet data, regardless of the value of bit [63] of the instruction header. USE_IHDR must be set whenever PBP is set. */ uint64_t rsv_c : 5; /**< Reserved */ uint64_t par_mode : 2; /**< Parse Mode. Used when USE_IHDR is set and packet is not raw and PBP is not set. */ uint64_t rsv_b : 1; /**< Reserved instruction header sent to IPD. */ uint64_t skp_len : 7; /**< Skip Length. Used when USE_IHDR is set and packet is not raw and PBP is not set. */ uint64_t rsv_a : 6; /**< Reserved */ #else uint64_t rsv_a : 6; uint64_t skp_len : 7; uint64_t rsv_b : 1; uint64_t par_mode : 2; uint64_t rsv_c : 5; uint64_t use_ihdr : 1; uint64_t rsv_d : 6; uint64_t rskp_len : 7; uint64_t rsv_e : 1; uint64_t rparmode : 2; uint64_t rsv_f : 5; uint64_t pbp : 1; uint64_t reserved_44_63 : 20; #endif } s; struct cvmx_npi_port35_instr_hdr_s cn38xx; struct cvmx_npi_port35_instr_hdr_s cn38xxp2; struct cvmx_npi_port35_instr_hdr_s cn58xx; struct cvmx_npi_port35_instr_hdr_s cn58xxp1; } cvmx_npi_port35_instr_hdr_t; /** * cvmx_npi_port_bp_control * * NPI_PORT_BP_CONTROL = Port Backpressure Control * * Enables Port Level Backpressure */ typedef union { uint64_t u64; struct cvmx_npi_port_bp_control_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t bp_on : 4; /**< Port 35-32 port level backpressure applied. */ uint64_t enb : 4; /**< Enables port level backpressure from the IPD. */ #else uint64_t enb : 4; uint64_t bp_on : 4; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_npi_port_bp_control_s cn30xx; struct cvmx_npi_port_bp_control_s cn31xx; struct cvmx_npi_port_bp_control_s cn38xx; struct cvmx_npi_port_bp_control_s cn38xxp2; struct cvmx_npi_port_bp_control_s cn50xx; struct cvmx_npi_port_bp_control_s cn58xx; struct cvmx_npi_port_bp_control_s cn58xxp1; } cvmx_npi_port_bp_control_t; /** * cvmx_npi_rsl_int_blocks * * RSL_INT_BLOCKS = RSL Interrupt Blocks Register * * Reading this register will return a vector with a bit set '1' for a corresponding RSL block * that presently has an interrupt pending. The Field Description below supplies the name of the * register that software should read to find out why that intterupt bit is set. */ typedef union { uint64_t u64; struct cvmx_npi_rsl_int_blocks_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t rint_31 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t iob : 1; /**< IOB_INT_SUM */ uint64_t reserved_28_29 : 2; uint64_t rint_27 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_26 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_25 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_24 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t asx1 : 1; /**< ASX1_INT_REG */ uint64_t asx0 : 1; /**< ASX0_INT_REG */ uint64_t rint_21 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t pip : 1; /**< PIP_INT_REG. */ uint64_t spx1 : 1; /**< SPX1_INT_REG & STX1_INT_REG */ uint64_t spx0 : 1; /**< SPX0_INT_REG & STX0_INT_REG */ uint64_t lmc : 1; /**< LMC_MEM_CFG0 */ uint64_t l2c : 1; /**< L2T_ERR & L2D_ERR */ uint64_t rint_15 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t reserved_13_14 : 2; uint64_t pow : 1; /**< POW_ECC_ERR */ uint64_t tim : 1; /**< TIM_REG_ERROR */ uint64_t pko : 1; /**< PKO_REG_ERROR */ uint64_t ipd : 1; /**< IPD_INT_SUM */ uint64_t rint_8 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t zip : 1; /**< ZIP_ERROR */ uint64_t dfa : 1; /**< DFA_ERR */ uint64_t fpa : 1; /**< FPA_INT_SUM */ uint64_t key : 1; /**< KEY_INT_SUM */ uint64_t npi : 1; /**< NPI_INT_SUM */ uint64_t gmx1 : 1; /**< GMX1_RX*_INT_REG & GMX1_TX_INT_REG */ uint64_t gmx0 : 1; /**< GMX0_RX*_INT_REG & GMX0_TX_INT_REG */ uint64_t mio : 1; /**< MIO_BOOT_ERR */ #else uint64_t mio : 1; uint64_t gmx0 : 1; uint64_t gmx1 : 1; uint64_t npi : 1; uint64_t key : 1; uint64_t fpa : 1; uint64_t dfa : 1; uint64_t zip : 1; uint64_t rint_8 : 1; uint64_t ipd : 1; uint64_t pko : 1; uint64_t tim : 1; uint64_t pow : 1; uint64_t reserved_13_14 : 2; uint64_t rint_15 : 1; uint64_t l2c : 1; uint64_t lmc : 1; uint64_t spx0 : 1; uint64_t spx1 : 1; uint64_t pip : 1; uint64_t rint_21 : 1; uint64_t asx0 : 1; uint64_t asx1 : 1; uint64_t rint_24 : 1; uint64_t rint_25 : 1; uint64_t rint_26 : 1; uint64_t rint_27 : 1; uint64_t reserved_28_29 : 2; uint64_t iob : 1; uint64_t rint_31 : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npi_rsl_int_blocks_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t rint_31 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t iob : 1; /**< IOB_INT_SUM */ uint64_t rint_29 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_28 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_27 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_26 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_25 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_24 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t asx1 : 1; /**< ASX1_INT_REG */ uint64_t asx0 : 1; /**< ASX0_INT_REG */ uint64_t rint_21 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t pip : 1; /**< PIP_INT_REG. */ uint64_t spx1 : 1; /**< SPX1_INT_REG & STX1_INT_REG */ uint64_t spx0 : 1; /**< SPX0_INT_REG & STX0_INT_REG */ uint64_t lmc : 1; /**< LMC_MEM_CFG0 */ uint64_t l2c : 1; /**< L2T_ERR & L2D_ERR */ uint64_t rint_15 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_14 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t usb : 1; /**< USBN_INT_SUM */ uint64_t pow : 1; /**< POW_ECC_ERR */ uint64_t tim : 1; /**< TIM_REG_ERROR */ uint64_t pko : 1; /**< PKO_REG_ERROR */ uint64_t ipd : 1; /**< IPD_INT_SUM */ uint64_t rint_8 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t zip : 1; /**< ZIP_ERROR */ uint64_t dfa : 1; /**< DFA_ERR */ uint64_t fpa : 1; /**< FPA_INT_SUM */ uint64_t key : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t npi : 1; /**< NPI_INT_SUM */ uint64_t gmx1 : 1; /**< GMX1_RX*_INT_REG & GMX1_TX_INT_REG */ uint64_t gmx0 : 1; /**< GMX0_RX*_INT_REG & GMX0_TX_INT_REG */ uint64_t mio : 1; /**< MIO_BOOT_ERR */ #else uint64_t mio : 1; uint64_t gmx0 : 1; uint64_t gmx1 : 1; uint64_t npi : 1; uint64_t key : 1; uint64_t fpa : 1; uint64_t dfa : 1; uint64_t zip : 1; uint64_t rint_8 : 1; uint64_t ipd : 1; uint64_t pko : 1; uint64_t tim : 1; uint64_t pow : 1; uint64_t usb : 1; uint64_t rint_14 : 1; uint64_t rint_15 : 1; uint64_t l2c : 1; uint64_t lmc : 1; uint64_t spx0 : 1; uint64_t spx1 : 1; uint64_t pip : 1; uint64_t rint_21 : 1; uint64_t asx0 : 1; uint64_t asx1 : 1; uint64_t rint_24 : 1; uint64_t rint_25 : 1; uint64_t rint_26 : 1; uint64_t rint_27 : 1; uint64_t rint_28 : 1; uint64_t rint_29 : 1; uint64_t iob : 1; uint64_t rint_31 : 1; uint64_t reserved_32_63 : 32; #endif } cn30xx; struct cvmx_npi_rsl_int_blocks_cn30xx cn31xx; struct cvmx_npi_rsl_int_blocks_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t rint_31 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t iob : 1; /**< IOB_INT_SUM */ uint64_t rint_29 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_28 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_27 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_26 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_25 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_24 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t asx1 : 1; /**< ASX1_INT_REG */ uint64_t asx0 : 1; /**< ASX0_INT_REG */ uint64_t rint_21 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t pip : 1; /**< PIP_INT_REG. */ uint64_t spx1 : 1; /**< SPX1_INT_REG & STX1_INT_REG */ uint64_t spx0 : 1; /**< SPX0_INT_REG & STX0_INT_REG */ uint64_t lmc : 1; /**< LMC_MEM_CFG0 */ uint64_t l2c : 1; /**< L2T_ERR & L2D_ERR */ uint64_t rint_15 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_14 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t rint_13 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t pow : 1; /**< POW_ECC_ERR */ uint64_t tim : 1; /**< TIM_REG_ERROR */ uint64_t pko : 1; /**< PKO_REG_ERROR */ uint64_t ipd : 1; /**< IPD_INT_SUM */ uint64_t rint_8 : 1; /**< Set '1' when RSL bLock has an interrupt. */ uint64_t zip : 1; /**< ZIP_ERROR */ uint64_t dfa : 1; /**< DFA_ERR */ uint64_t fpa : 1; /**< FPA_INT_SUM */ uint64_t key : 1; /**< KEY_INT_SUM */ uint64_t npi : 1; /**< NPI_INT_SUM */ uint64_t gmx1 : 1; /**< GMX1_RX*_INT_REG & GMX1_TX_INT_REG */ uint64_t gmx0 : 1; /**< GMX0_RX*_INT_REG & GMX0_TX_INT_REG */ uint64_t mio : 1; /**< MIO_BOOT_ERR */ #else uint64_t mio : 1; uint64_t gmx0 : 1; uint64_t gmx1 : 1; uint64_t npi : 1; uint64_t key : 1; uint64_t fpa : 1; uint64_t dfa : 1; uint64_t zip : 1; uint64_t rint_8 : 1; uint64_t ipd : 1; uint64_t pko : 1; uint64_t tim : 1; uint64_t pow : 1; uint64_t rint_13 : 1; uint64_t rint_14 : 1; uint64_t rint_15 : 1; uint64_t l2c : 1; uint64_t lmc : 1; uint64_t spx0 : 1; uint64_t spx1 : 1; uint64_t pip : 1; uint64_t rint_21 : 1; uint64_t asx0 : 1; uint64_t asx1 : 1; uint64_t rint_24 : 1; uint64_t rint_25 : 1; uint64_t rint_26 : 1; uint64_t rint_27 : 1; uint64_t rint_28 : 1; uint64_t rint_29 : 1; uint64_t iob : 1; uint64_t rint_31 : 1; uint64_t reserved_32_63 : 32; #endif } cn38xx; struct cvmx_npi_rsl_int_blocks_cn38xx cn38xxp2; struct cvmx_npi_rsl_int_blocks_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t iob : 1; /**< IOB_INT_SUM */ uint64_t lmc1 : 1; /**< Always reads as zero */ uint64_t agl : 1; /**< Always reads as zero */ uint64_t reserved_24_27 : 4; uint64_t asx1 : 1; /**< Always reads as zero */ uint64_t asx0 : 1; /**< ASX0_INT_REG */ uint64_t reserved_21_21 : 1; uint64_t pip : 1; /**< PIP_INT_REG. */ uint64_t spx1 : 1; /**< Always reads as zero */ uint64_t spx0 : 1; /**< Always reads as zero */ uint64_t lmc : 1; /**< LMC_MEM_CFG0 */ uint64_t l2c : 1; /**< L2T_ERR & L2D_ERR */ uint64_t reserved_15_15 : 1; uint64_t rad : 1; /**< Always reads as zero */ uint64_t usb : 1; /**< USBN_INT_SUM */ uint64_t pow : 1; /**< POW_ECC_ERR */ uint64_t tim : 1; /**< TIM_REG_ERROR */ uint64_t pko : 1; /**< PKO_REG_ERROR */ uint64_t ipd : 1; /**< IPD_INT_SUM */ uint64_t reserved_8_8 : 1; uint64_t zip : 1; /**< Always reads as zero */ uint64_t dfa : 1; /**< Always reads as zero */ uint64_t fpa : 1; /**< FPA_INT_SUM */ uint64_t key : 1; /**< Always reads as zero */ uint64_t npi : 1; /**< NPI_INT_SUM */ uint64_t gmx1 : 1; /**< Always reads as zero */ uint64_t gmx0 : 1; /**< GMX0_RX*_INT_REG & GMX0_TX_INT_REG */ uint64_t mio : 1; /**< MIO_BOOT_ERR */ #else uint64_t mio : 1; uint64_t gmx0 : 1; uint64_t gmx1 : 1; uint64_t npi : 1; uint64_t key : 1; uint64_t fpa : 1; uint64_t dfa : 1; uint64_t zip : 1; uint64_t reserved_8_8 : 1; uint64_t ipd : 1; uint64_t pko : 1; uint64_t tim : 1; uint64_t pow : 1; uint64_t usb : 1; uint64_t rad : 1; uint64_t reserved_15_15 : 1; uint64_t l2c : 1; uint64_t lmc : 1; uint64_t spx0 : 1; uint64_t spx1 : 1; uint64_t pip : 1; uint64_t reserved_21_21 : 1; uint64_t asx0 : 1; uint64_t asx1 : 1; uint64_t reserved_24_27 : 4; uint64_t agl : 1; uint64_t lmc1 : 1; uint64_t iob : 1; uint64_t reserved_31_63 : 33; #endif } cn50xx; struct cvmx_npi_rsl_int_blocks_cn38xx cn58xx; struct cvmx_npi_rsl_int_blocks_cn38xx cn58xxp1; } cvmx_npi_rsl_int_blocks_t; /** * cvmx_npi_size_input# * * NPI_SIZE_INPUT0 = NPI's Size for Input 0 Register * * The size (in instructions) of Instruction Queue-0. */ typedef union { uint64_t u64; struct cvmx_npi_size_inputx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t size : 32; /**< The size of the Instruction Queue used by Octane. The value [SIZE] is in Instructions. A value of 0 in this field is illegal. */ #else uint64_t size : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npi_size_inputx_s cn30xx; struct cvmx_npi_size_inputx_s cn31xx; struct cvmx_npi_size_inputx_s cn38xx; struct cvmx_npi_size_inputx_s cn38xxp2; struct cvmx_npi_size_inputx_s cn50xx; struct cvmx_npi_size_inputx_s cn58xx; struct cvmx_npi_size_inputx_s cn58xxp1; } cvmx_npi_size_inputx_t; /** * cvmx_npi_win_read_to * * NPI_WIN_READ_TO = NPI WINDOW READ Timeout Register * * Number of core clocks to wait before timing out on a WINDOW-READ to the NCB. */ typedef union { uint64_t u64; struct cvmx_npi_win_read_to_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t time : 32; /**< Time to wait in core clocks. A value of 0 will cause no timeouts. */ #else uint64_t time : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_npi_win_read_to_s cn30xx; struct cvmx_npi_win_read_to_s cn31xx; struct cvmx_npi_win_read_to_s cn38xx; struct cvmx_npi_win_read_to_s cn38xxp2; struct cvmx_npi_win_read_to_s cn50xx; struct cvmx_npi_win_read_to_s cn58xx; struct cvmx_npi_win_read_to_s cn58xxp1; } cvmx_npi_win_read_to_t; /** * cvmx_pci_bar1_index# * * PCI_BAR1_INDEXX = PCI IndexX Register * * Contains address index and control bits for access to memory ranges of Bar-1, * when PCI supplied address-bits [26:22] == X. */ typedef union { uint32_t u32; struct cvmx_pci_bar1_indexx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_18_31 : 14; uint32_t addr_idx : 14; /**< Address bits [35:22] sent to L2C */ uint32_t ca : 1; /**< Set '1' when access is not to be cached in L2. */ uint32_t end_swp : 2; /**< Endian Swap Mode */ uint32_t addr_v : 1; /**< Set '1' when the selected address range is valid. */ #else uint32_t addr_v : 1; uint32_t end_swp : 2; uint32_t ca : 1; uint32_t addr_idx : 14; uint32_t reserved_18_31 : 14; #endif } s; struct cvmx_pci_bar1_indexx_s cn30xx; struct cvmx_pci_bar1_indexx_s cn31xx; struct cvmx_pci_bar1_indexx_s cn38xx; struct cvmx_pci_bar1_indexx_s cn38xxp2; struct cvmx_pci_bar1_indexx_s cn50xx; struct cvmx_pci_bar1_indexx_s cn58xx; struct cvmx_pci_bar1_indexx_s cn58xxp1; } cvmx_pci_bar1_indexx_t; /** * cvmx_pci_bist_reg * * PCI_BIST_REG = PCI PNI BIST Status Register * * Contains the bist results for the PNI memories. */ typedef union { uint64_t u64; struct cvmx_pci_bist_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t rsp_bs : 1; /**< Bist Status For b12_rsp_fifo_bist The value of this register is available 100,000 core clocks + 21,000 pclks after: Host Mode - deassertion of pci_rst_n Non Host Mode - deassertion of pci_rst_n */ uint64_t dma0_bs : 1; /**< Bist Status For dmao_count The value of this register is available 100,000 core clocks + 21,000 pclks after: Host Mode - deassertion of pci_rst_n Non Host Mode - deassertion of pci_rst_n */ uint64_t cmd0_bs : 1; /**< Bist Status For npi_cmd0_pni_am0 The value of this register is available 100,000 core clocks + 21,000 pclks after: Host Mode - deassertion of pci_rst_n Non Host Mode - deassertion of pci_rst_n */ uint64_t cmd_bs : 1; /**< Bist Status For npi_cmd_pni_am1 The value of this register is available 100,000 core clocks + 21,000 pclks after: Host Mode - deassertion of pci_rst_n Non Host Mode - deassertion of pci_rst_n */ uint64_t csr2p_bs : 1; /**< Bist Status For npi_csr_2_pni_am The value of this register is available 100,000 core clocks + 21,000 pclks after: Host Mode - deassertion of pci_rst_n Non Host Mode - deassertion of pci_rst_n */ uint64_t csrr_bs : 1; /**< Bist Status For npi_csr_rsp_2_pni_am The value of this register is available 100,000 core clocks + 21,000 pclks after: Host Mode - deassertion of pci_rst_n Non Host Mode - deassertion of pci_rst_n */ uint64_t rsp2p_bs : 1; /**< Bist Status For npi_rsp_2_pni_am The value of this register is available 100,000 core clocks + 21,000 pclks after: Host Mode - deassertion of pci_rst_n Non Host Mode - deassertion of pci_rst_n */ uint64_t csr2n_bs : 1; /**< Bist Status For pni_csr_2_npi_am The value of this register is available 100,000 core clocks + 21,000 pclks after: Host Mode - deassertion of pci_rst_n Non Host Mode - deassertion of pci_rst_n */ uint64_t dat2n_bs : 1; /**< Bist Status For pni_data_2_npi_am The value of this register is available 100,000 core clocks + 21,000 pclks after: Host Mode - deassertion of pci_rst_n Non Host Mode - deassertion of pci_rst_n */ uint64_t dbg2n_bs : 1; /**< Bist Status For pni_dbg_data_2_npi_am The value of this register is available 100,000 core clocks + 21,000 pclks after: Host Mode - deassertion of pci_rst_n Non Host Mode - deassertion of pci_rst_n */ #else uint64_t dbg2n_bs : 1; uint64_t dat2n_bs : 1; uint64_t csr2n_bs : 1; uint64_t rsp2p_bs : 1; uint64_t csrr_bs : 1; uint64_t csr2p_bs : 1; uint64_t cmd_bs : 1; uint64_t cmd0_bs : 1; uint64_t dma0_bs : 1; uint64_t rsp_bs : 1; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_pci_bist_reg_s cn50xx; } cvmx_pci_bist_reg_t; /** * cvmx_pci_cfg00 * * Registers at address 0x1000 -> 0x17FF are PNI * Start at 0x100 into range * these are shifted by 2 to the left to make address * Registers at address 0x1800 -> 0x18FF are CFG * these are shifted by 2 to the left to make address * * PCI_CFG00 = First 32-bits of PCI config space (PCI Vendor + Device) * * This register contains the first 32-bits of the PCI config space registers */ typedef union { uint32_t u32; struct cvmx_pci_cfg00_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t devid : 16; /**< This is the device ID for OCTEON (90nm shhrink) */ uint32_t vendid : 16; /**< This is the Cavium's vendor ID */ #else uint32_t vendid : 16; uint32_t devid : 16; #endif } s; struct cvmx_pci_cfg00_s cn30xx; struct cvmx_pci_cfg00_s cn31xx; struct cvmx_pci_cfg00_s cn38xx; struct cvmx_pci_cfg00_s cn38xxp2; struct cvmx_pci_cfg00_s cn50xx; struct cvmx_pci_cfg00_s cn58xx; struct cvmx_pci_cfg00_s cn58xxp1; } cvmx_pci_cfg00_t; /** * cvmx_pci_cfg01 * * PCI_CFG01 = Second 32-bits of PCI config space (Command/Status Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg01_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dpe : 1; /**< Detected Parity Error */ uint32_t sse : 1; /**< Signaled System Error */ uint32_t rma : 1; /**< Received Master Abort */ uint32_t rta : 1; /**< Received Target Abort */ uint32_t sta : 1; /**< Signaled Target Abort */ uint32_t devt : 2; /**< DEVSEL# timing (for PCI only/for PCIX = don't care) */ uint32_t mdpe : 1; /**< Master Data Parity Error */ uint32_t fbb : 1; /**< Fast Back-to-Back Transactions Capable Mode Dependent (1 = PCI Mode / 0 = PCIX Mode) */ uint32_t reserved_22_22 : 1; uint32_t m66 : 1; /**< 66MHz Capable */ uint32_t cle : 1; /**< Capabilities List Enable */ uint32_t i_stat : 1; /**< When INTx# is asserted by OCTEON this bit will be set. When deasserted by OCTEON this bit will be cleared. */ uint32_t reserved_11_18 : 8; uint32_t i_dis : 1; /**< When asserted '1' disables the generation of INTx# by OCTEON. When disabled '0' allows assertion of INTx# by OCTEON. */ uint32_t fbbe : 1; /**< Fast Back to Back Transaction Enable */ uint32_t see : 1; /**< System Error Enable */ uint32_t ads : 1; /**< Address/Data Stepping NOTE: Octeon does NOT support address/data stepping. */ uint32_t pee : 1; /**< PERR# Enable */ uint32_t vps : 1; /**< VGA Palette Snooping */ uint32_t mwice : 1; /**< Memory Write & Invalidate Command Enable */ uint32_t scse : 1; /**< Special Cycle Snooping Enable */ uint32_t me : 1; /**< Master Enable Must be set for OCTEON to master a PCI/PCI-X transaction. This should always be set any time that OCTEON is connected to a PCI/PCI-X bus. */ uint32_t msae : 1; /**< Memory Space Access Enable Must be set to recieve a PCI/PCI-X memory space transaction. This must always be set any time that OCTEON is connected to a PCI/PCI-X bus. */ uint32_t isae : 1; /**< I/O Space Access Enable NOTE: For OCTEON, this bit MUST NEVER be set (it is read-only and OCTEON does not respond to I/O Space accesses). */ #else uint32_t isae : 1; uint32_t msae : 1; uint32_t me : 1; uint32_t scse : 1; uint32_t mwice : 1; uint32_t vps : 1; uint32_t pee : 1; uint32_t ads : 1; uint32_t see : 1; uint32_t fbbe : 1; uint32_t i_dis : 1; uint32_t reserved_11_18 : 8; uint32_t i_stat : 1; uint32_t cle : 1; uint32_t m66 : 1; uint32_t reserved_22_22 : 1; uint32_t fbb : 1; uint32_t mdpe : 1; uint32_t devt : 2; uint32_t sta : 1; uint32_t rta : 1; uint32_t rma : 1; uint32_t sse : 1; uint32_t dpe : 1; #endif } s; struct cvmx_pci_cfg01_s cn30xx; struct cvmx_pci_cfg01_s cn31xx; struct cvmx_pci_cfg01_s cn38xx; struct cvmx_pci_cfg01_s cn38xxp2; struct cvmx_pci_cfg01_s cn50xx; struct cvmx_pci_cfg01_s cn58xx; struct cvmx_pci_cfg01_s cn58xxp1; } cvmx_pci_cfg01_t; /** * cvmx_pci_cfg02 * * PCI_CFG02 = Third 32-bits of PCI config space (Class Code / Revision ID) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg02_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t cc : 24; /**< Class Code (Processor/MIPS) (was 0x100000 in pass 1 and pass 2) */ uint32_t rid : 8; /**< Revision ID (0 in pass 1, 1 in pass 1.1, 8 in pass 2.0) */ #else uint32_t rid : 8; uint32_t cc : 24; #endif } s; struct cvmx_pci_cfg02_s cn30xx; struct cvmx_pci_cfg02_s cn31xx; struct cvmx_pci_cfg02_s cn38xx; struct cvmx_pci_cfg02_s cn38xxp2; struct cvmx_pci_cfg02_s cn50xx; struct cvmx_pci_cfg02_s cn58xx; struct cvmx_pci_cfg02_s cn58xxp1; } cvmx_pci_cfg02_t; /** * cvmx_pci_cfg03 * * PCI_CFG03 = Fourth 32-bits of PCI config space (BIST, HEADER Type, Latency timer, line size) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg03_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t bcap : 1; /**< BIST Capable */ uint32_t brb : 1; /**< BIST Request/busy bit Note: OCTEON does not support PCI BIST, therefore this bit should remain zero. */ uint32_t reserved_28_29 : 2; uint32_t bcod : 4; /**< BIST Code */ uint32_t ht : 8; /**< Header Type (Type 0) */ uint32_t lt : 8; /**< Latency Timer (0=PCI) (0=PCI) (0x40=PCIX) (0x40=PCIX) */ uint32_t cls : 8; /**< Cache Line Size */ #else uint32_t cls : 8; uint32_t lt : 8; uint32_t ht : 8; uint32_t bcod : 4; uint32_t reserved_28_29 : 2; uint32_t brb : 1; uint32_t bcap : 1; #endif } s; struct cvmx_pci_cfg03_s cn30xx; struct cvmx_pci_cfg03_s cn31xx; struct cvmx_pci_cfg03_s cn38xx; struct cvmx_pci_cfg03_s cn38xxp2; struct cvmx_pci_cfg03_s cn50xx; struct cvmx_pci_cfg03_s cn58xx; struct cvmx_pci_cfg03_s cn58xxp1; } cvmx_pci_cfg03_t; /** * cvmx_pci_cfg04 * * PCI_CFG04 = Fifth 32-bits of PCI config space (Base Address Register 0 - Low) * * Description: BAR0: 4KB 64-bit Prefetchable Memory Space * [0]: 0 (Memory Space) * [2:1]: 2 (64bit memory decoder) * [3]: 1 (Prefetchable) * [11:4]: RAZ (to imply 4KB space) * [31:12]: RW (User may define base address) */ typedef union { uint32_t u32; struct cvmx_pci_cfg04_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lbase : 20; /**< Base Address[31:12] Base Address[30:12] read as zero if PCI_CTL_STATUS_2[BB0] is set (in pass 3+) */ uint32_t lbasez : 8; /**< Base Address[11:4] (Read as Zero) */ uint32_t pf : 1; /**< Prefetchable Space */ uint32_t typ : 2; /**< Type (00=32b/01=below 1MB/10=64b/11=RSV) */ uint32_t mspc : 1; /**< Memory Space Indicator */ #else uint32_t mspc : 1; uint32_t typ : 2; uint32_t pf : 1; uint32_t lbasez : 8; uint32_t lbase : 20; #endif } s; struct cvmx_pci_cfg04_s cn30xx; struct cvmx_pci_cfg04_s cn31xx; struct cvmx_pci_cfg04_s cn38xx; struct cvmx_pci_cfg04_s cn38xxp2; struct cvmx_pci_cfg04_s cn50xx; struct cvmx_pci_cfg04_s cn58xx; struct cvmx_pci_cfg04_s cn58xxp1; } cvmx_pci_cfg04_t; /** * cvmx_pci_cfg05 * * PCI_CFG05 = Sixth 32-bits of PCI config space (Base Address Register 0 - High) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg05_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t hbase : 32; /**< Base Address[63:32] */ #else uint32_t hbase : 32; #endif } s; struct cvmx_pci_cfg05_s cn30xx; struct cvmx_pci_cfg05_s cn31xx; struct cvmx_pci_cfg05_s cn38xx; struct cvmx_pci_cfg05_s cn38xxp2; struct cvmx_pci_cfg05_s cn50xx; struct cvmx_pci_cfg05_s cn58xx; struct cvmx_pci_cfg05_s cn58xxp1; } cvmx_pci_cfg05_t; /** * cvmx_pci_cfg06 * * PCI_CFG06 = Seventh 32-bits of PCI config space (Base Address Register 1 - Low) * * Description: BAR1: 128MB 64-bit Prefetchable Memory Space * [0]: 0 (Memory Space) * [2:1]: 2 (64bit memory decoder) * [3]: 1 (Prefetchable) * [26:4]: RAZ (to imply 128MB space) * [31:27]: RW (User may define base address) */ typedef union { uint32_t u32; struct cvmx_pci_cfg06_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lbase : 5; /**< Base Address[31:27] In pass 3+: Base Address[29:27] read as zero if PCI_CTL_STATUS_2[BB1] is set Base Address[30] reads as zero if PCI_CTL_STATUS_2[BB1] is set and PCI_CTL_STATUS_2[BB1_SIZE] is set */ uint32_t lbasez : 23; /**< Base Address[26:4] (Read as Zero) */ uint32_t pf : 1; /**< Prefetchable Space */ uint32_t typ : 2; /**< Type (00=32b/01=below 1MB/10=64b/11=RSV) */ uint32_t mspc : 1; /**< Memory Space Indicator */ #else uint32_t mspc : 1; uint32_t typ : 2; uint32_t pf : 1; uint32_t lbasez : 23; uint32_t lbase : 5; #endif } s; struct cvmx_pci_cfg06_s cn30xx; struct cvmx_pci_cfg06_s cn31xx; struct cvmx_pci_cfg06_s cn38xx; struct cvmx_pci_cfg06_s cn38xxp2; struct cvmx_pci_cfg06_s cn50xx; struct cvmx_pci_cfg06_s cn58xx; struct cvmx_pci_cfg06_s cn58xxp1; } cvmx_pci_cfg06_t; /** * cvmx_pci_cfg07 * * PCI_CFG07 = Eighth 32-bits of PCI config space (Base Address Register 1 - High) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg07_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t hbase : 32; /**< Base Address[63:32] */ #else uint32_t hbase : 32; #endif } s; struct cvmx_pci_cfg07_s cn30xx; struct cvmx_pci_cfg07_s cn31xx; struct cvmx_pci_cfg07_s cn38xx; struct cvmx_pci_cfg07_s cn38xxp2; struct cvmx_pci_cfg07_s cn50xx; struct cvmx_pci_cfg07_s cn58xx; struct cvmx_pci_cfg07_s cn58xxp1; } cvmx_pci_cfg07_t; /** * cvmx_pci_cfg08 * * PCI_CFG08 = Ninth 32-bits of PCI config space (Base Address Register 2 - Low) * * Description: BAR1: 2^39 (512GB) 64-bit Prefetchable Memory Space * [0]: 0 (Memory Space) * [2:1]: 2 (64bit memory decoder) * [3]: 1 (Prefetchable) * [31:4]: RAZ */ typedef union { uint32_t u32; struct cvmx_pci_cfg08_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lbasez : 28; /**< Base Address[31:4] (Read as Zero) */ uint32_t pf : 1; /**< Prefetchable Space */ uint32_t typ : 2; /**< Type (00=32b/01=below 1MB/10=64b/11=RSV) */ uint32_t mspc : 1; /**< Memory Space Indicator */ #else uint32_t mspc : 1; uint32_t typ : 2; uint32_t pf : 1; uint32_t lbasez : 28; #endif } s; struct cvmx_pci_cfg08_s cn30xx; struct cvmx_pci_cfg08_s cn31xx; struct cvmx_pci_cfg08_s cn38xx; struct cvmx_pci_cfg08_s cn38xxp2; struct cvmx_pci_cfg08_s cn50xx; struct cvmx_pci_cfg08_s cn58xx; struct cvmx_pci_cfg08_s cn58xxp1; } cvmx_pci_cfg08_t; /** * cvmx_pci_cfg09 * * PCI_CFG09 = Tenth 32-bits of PCI config space (Base Address Register 2 - High) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg09_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t hbase : 25; /**< Base Address[63:39] */ uint32_t hbasez : 7; /**< Base Address[38:31] (Read as Zero) */ #else uint32_t hbasez : 7; uint32_t hbase : 25; #endif } s; struct cvmx_pci_cfg09_s cn30xx; struct cvmx_pci_cfg09_s cn31xx; struct cvmx_pci_cfg09_s cn38xx; struct cvmx_pci_cfg09_s cn38xxp2; struct cvmx_pci_cfg09_s cn50xx; struct cvmx_pci_cfg09_s cn58xx; struct cvmx_pci_cfg09_s cn58xxp1; } cvmx_pci_cfg09_t; /** * cvmx_pci_cfg10 * * PCI_CFG10 = Eleventh 32-bits of PCI config space (Card Bus CIS Pointer) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg10_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t cisp : 32; /**< CardBus CIS Pointer (UNUSED) */ #else uint32_t cisp : 32; #endif } s; struct cvmx_pci_cfg10_s cn30xx; struct cvmx_pci_cfg10_s cn31xx; struct cvmx_pci_cfg10_s cn38xx; struct cvmx_pci_cfg10_s cn38xxp2; struct cvmx_pci_cfg10_s cn50xx; struct cvmx_pci_cfg10_s cn58xx; struct cvmx_pci_cfg10_s cn58xxp1; } cvmx_pci_cfg10_t; /** * cvmx_pci_cfg11 * * PCI_CFG11 = Twelfth 32-bits of PCI config space (SubSystem ID/Subsystem Vendor ID Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg11_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ssid : 16; /**< SubSystem ID */ uint32_t ssvid : 16; /**< Subsystem Vendor ID */ #else uint32_t ssvid : 16; uint32_t ssid : 16; #endif } s; struct cvmx_pci_cfg11_s cn30xx; struct cvmx_pci_cfg11_s cn31xx; struct cvmx_pci_cfg11_s cn38xx; struct cvmx_pci_cfg11_s cn38xxp2; struct cvmx_pci_cfg11_s cn50xx; struct cvmx_pci_cfg11_s cn58xx; struct cvmx_pci_cfg11_s cn58xxp1; } cvmx_pci_cfg11_t; /** * cvmx_pci_cfg12 * * PCI_CFG12 = Thirteenth 32-bits of PCI config space (Expansion ROM Base Address Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg12_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t erbar : 16; /**< Expansion ROM Base Address[31:16] 64KB in size */ uint32_t erbarz : 5; /**< Expansion ROM Base Base Address (Read as Zero) */ uint32_t reserved_1_10 : 10; uint32_t erbar_en : 1; /**< Expansion ROM Address Decode Enable */ #else uint32_t erbar_en : 1; uint32_t reserved_1_10 : 10; uint32_t erbarz : 5; uint32_t erbar : 16; #endif } s; struct cvmx_pci_cfg12_s cn30xx; struct cvmx_pci_cfg12_s cn31xx; struct cvmx_pci_cfg12_s cn38xx; struct cvmx_pci_cfg12_s cn38xxp2; struct cvmx_pci_cfg12_s cn50xx; struct cvmx_pci_cfg12_s cn58xx; struct cvmx_pci_cfg12_s cn58xxp1; } cvmx_pci_cfg12_t; /** * cvmx_pci_cfg13 * * PCI_CFG13 = Fourteenth 32-bits of PCI config space (Capabilities Pointer Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg13_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_8_31 : 24; uint32_t cp : 8; /**< Capabilities Pointer */ #else uint32_t cp : 8; uint32_t reserved_8_31 : 24; #endif } s; struct cvmx_pci_cfg13_s cn30xx; struct cvmx_pci_cfg13_s cn31xx; struct cvmx_pci_cfg13_s cn38xx; struct cvmx_pci_cfg13_s cn38xxp2; struct cvmx_pci_cfg13_s cn50xx; struct cvmx_pci_cfg13_s cn58xx; struct cvmx_pci_cfg13_s cn58xxp1; } cvmx_pci_cfg13_t; /** * cvmx_pci_cfg15 * * PCI_CFG15 = Sixteenth 32-bits of PCI config space (INT/ARB/LATENCY Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg15_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ml : 8; /**< Maximum Latency */ uint32_t mg : 8; /**< Minimum Grant */ uint32_t inta : 8; /**< Interrupt Pin (INTA#) */ uint32_t il : 8; /**< Interrupt Line */ #else uint32_t il : 8; uint32_t inta : 8; uint32_t mg : 8; uint32_t ml : 8; #endif } s; struct cvmx_pci_cfg15_s cn30xx; struct cvmx_pci_cfg15_s cn31xx; struct cvmx_pci_cfg15_s cn38xx; struct cvmx_pci_cfg15_s cn38xxp2; struct cvmx_pci_cfg15_s cn50xx; struct cvmx_pci_cfg15_s cn58xx; struct cvmx_pci_cfg15_s cn58xxp1; } cvmx_pci_cfg15_t; /** * cvmx_pci_cfg16 * * PCI_CFG16 = Seventeenth 32-bits of PCI config space (Target Implementation Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg16_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t trdnpr : 1; /**< Target Read Delayed Transaction for I/O and non-prefetchable regions discarded. */ uint32_t trdard : 1; /**< Target Read Delayed Transaction for all regions discarded. */ uint32_t rdsati : 1; /**< Target(I/O and Memory) Read Delayed/Split at timeout/immediately (default timeout). Note: OCTEON requires that this bit MBZ(must be zero). */ uint32_t trdrs : 1; /**< Target(I/O and Memory) Read Delayed/Split or Retry select (of the application interface is not ready) 0 = Delayed Split Transaction 1 = Retry Transaction (always Immediate Retry, no AT_REQ to application). */ uint32_t trtae : 1; /**< Target(I/O and Memory) Read Target Abort Enable (if application interface is not ready at the latency timeout). Note: OCTEON as target will never target-abort, therefore this bit should never be set. */ uint32_t twsei : 1; /**< Target(I/O) Write Split Enable (at timeout / immediately; default timeout) */ uint32_t twsen : 1; /**< T(I/O) write split Enable (if the application interface is not ready) */ uint32_t twtae : 1; /**< Target(I/O and Memory) Write Target Abort Enable (if the application interface is not ready at the start of the cycle). Note: OCTEON as target will never target-abort, therefore this bit should never be set. */ uint32_t tmae : 1; /**< Target(Read/Write) Master Abort Enable; check at the start of each transaction. Note: This bit can be used to force a Master Abort when OCTEON is acting as the intended target device. */ uint32_t tslte : 3; /**< Target Subsequent(2nd-last) Latency Timeout Enable Valid range: [1..7] and 0=8. */ uint32_t tilt : 4; /**< Target Initial(1st data) Latency Timeout in PCI ModeValid range: [8..15] and 0=16. */ uint32_t pbe : 12; /**< Programmable Boundary Enable to disconnect/prefetch for target burst read cycles to prefetchable region in PCI. A value of 1 indicates end of boundary (64 KB down to 16 Bytes). */ uint32_t dppmr : 1; /**< Disconnect/Prefetch to prefetchable memory regions Enable. Prefetchable memory regions are always disconnected on a region boundary. Non-prefetchable regions for PCI are always disconnected on the first transfer. Note: OCTEON as target will never target-disconnect, therefore this bit should never be set. */ uint32_t reserved_2_2 : 1; uint32_t tswc : 1; /**< Target Split Write Control 0 = Blocks all requests except PMW 1 = Blocks all requests including PMW until split completion occurs. */ uint32_t mltd : 1; /**< Master Latency Timer Disable Note: For OCTEON, it is recommended that this bit be set(to disable the Master Latency timer). */ #else uint32_t mltd : 1; uint32_t tswc : 1; uint32_t reserved_2_2 : 1; uint32_t dppmr : 1; uint32_t pbe : 12; uint32_t tilt : 4; uint32_t tslte : 3; uint32_t tmae : 1; uint32_t twtae : 1; uint32_t twsen : 1; uint32_t twsei : 1; uint32_t trtae : 1; uint32_t trdrs : 1; uint32_t rdsati : 1; uint32_t trdard : 1; uint32_t trdnpr : 1; #endif } s; struct cvmx_pci_cfg16_s cn30xx; struct cvmx_pci_cfg16_s cn31xx; struct cvmx_pci_cfg16_s cn38xx; struct cvmx_pci_cfg16_s cn38xxp2; struct cvmx_pci_cfg16_s cn50xx; struct cvmx_pci_cfg16_s cn58xx; struct cvmx_pci_cfg16_s cn58xxp1; } cvmx_pci_cfg16_t; /** * cvmx_pci_cfg17 * * PCI_CFG17 = Eighteenth 32-bits of PCI config space (Target Split Completion Message * Enable Register) */ typedef union { uint32_t u32; struct cvmx_pci_cfg17_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t tscme : 32; /**< Target Split Completion Message Enable [31:30]: 00 [29]: Split Completion Error Indication [28]: 0 [27:20]: Split Completion Message Index [19:0]: 0x00000 For OCTEON, this register is intended for debug use only. (as such, it is recommended NOT to be written with anything other than ZEROES). */ #else uint32_t tscme : 32; #endif } s; struct cvmx_pci_cfg17_s cn30xx; struct cvmx_pci_cfg17_s cn31xx; struct cvmx_pci_cfg17_s cn38xx; struct cvmx_pci_cfg17_s cn38xxp2; struct cvmx_pci_cfg17_s cn50xx; struct cvmx_pci_cfg17_s cn58xx; struct cvmx_pci_cfg17_s cn58xxp1; } cvmx_pci_cfg17_t; /** * cvmx_pci_cfg18 * * PCI_CFG18 = Nineteenth 32-bits of PCI config space (Target Delayed/Split Request * Pending Sequences) */ typedef union { uint32_t u32; struct cvmx_pci_cfg18_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t tdsrps : 32; /**< Target Delayed/Split Request Pending Sequences The application uses this address to remove a pending split sequence from the target queue by clearing the appropriate bit. Example: Clearing [14] clears the pending sequence \#14. An application or configuration write to this address can clear this register. For OCTEON, this register is intended for debug use only and MUST NEVER be written with anything other than ZEROES. */ #else uint32_t tdsrps : 32; #endif } s; struct cvmx_pci_cfg18_s cn30xx; struct cvmx_pci_cfg18_s cn31xx; struct cvmx_pci_cfg18_s cn38xx; struct cvmx_pci_cfg18_s cn38xxp2; struct cvmx_pci_cfg18_s cn50xx; struct cvmx_pci_cfg18_s cn58xx; struct cvmx_pci_cfg18_s cn58xxp1; } cvmx_pci_cfg18_t; /** * cvmx_pci_cfg19 * * PCI_CFG19 = Twentieth 32-bits of PCI config space (Master/Target Implementation Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg19_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t mrbcm : 1; /**< Master Request (Memory Read) Byte Count/Byte Enable select. 0 = Byte Enables valid. In PCI mode, a burst transaction cannot be performed using Memory Read command=4'h6. 1 = DWORD Byte Count valid (default). In PCI Mode, the memory read byte enables are automatically generated by the core. NOTE: For OCTEON, this bit must always be one for proper operation. */ uint32_t mrbci : 1; /**< Master Request (I/O and CR cycles) byte count/byte enable select. 0 = Byte Enables valid (default) 1 = DWORD byte count valid NOTE: For OCTEON, this bit must always be zero for proper operation (in support of Type0/1 Cfg Space accesses which require byte enable generation directly from a read mask). */ uint32_t mdwe : 1; /**< Master (Retry) Deferred Write Enable (allow read requests to pass). NOTE: Applicable to PCI Mode I/O and memory transactions only. 0 = New read requests are NOT accepted until the current write cycle completes. [Reads cannot pass writes] 1 = New read requests are accepted, even when there is a write cycle pending [Reads can pass writes]. NOTE: For OCTEON, this bit must always be zero for proper operation. */ uint32_t mdre : 1; /**< Master (Retry) Deferred Read Enable (Allows read/write requests to pass). NOTE: Applicable to PCI mode I/O and memory transactions only. 0 = New read/write requests are NOT accepted until the current read cycle completes. [Read/write requests CANNOT pass reads] 1 = New read/write requests are accepted, even when there is a read cycle pending. [Read/write requests CAN pass reads] NOTE: For OCTEON, this bit must always be zero for proper operation. */ uint32_t mdrimc : 1; /**< Master I/O Deferred/Split Request Outstanding Maximum Count 0 = MDRRMC[26:24] 1 = 1 */ uint32_t mdrrmc : 3; /**< Master Deferred Read Request Outstanding Max Count (PCI only). CR4C[26:24] Max SAC cycles MAX DAC cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101 5 2 110 6 3 111 7 3 For example, if these bits are programmed to 100, the core can support 2 DAC cycles, 4 SAC cycles or a combination of 1 DAC and 2 SAC cycles. NOTE: For the PCI-X maximum outstanding split transactions, refer to CRE0[22:20] */ uint32_t tmes : 8; /**< Target/Master Error Sequence \# */ uint32_t teci : 1; /**< Target Error Command Indication 0 = Delayed/Split 1 = Others */ uint32_t tmei : 1; /**< Target/Master Error Indication 0 = Target 1 = Master */ uint32_t tmse : 1; /**< Target/Master System Error. This bit is set whenever ATM_SERR_O is active. */ uint32_t tmdpes : 1; /**< Target/Master Data PERR# error status. This bit is set whenever ATM_DATA_PERR_O is active. */ uint32_t tmapes : 1; /**< Target/Master Address PERR# error status. This bit is set whenever ATM_ADDR_PERR_O is active. */ uint32_t reserved_9_10 : 2; uint32_t tibcd : 1; /**< Target Illegal I/O DWORD byte combinations detected. */ uint32_t tibde : 1; /**< Target Illegal I/O DWORD byte detection enable */ uint32_t reserved_6_6 : 1; uint32_t tidomc : 1; /**< Target I/O Delayed/Split request outstanding maximum count. 0 = TDOMC[4:0] 1 = 1 */ uint32_t tdomc : 5; /**< Target Delayed/Split request outstanding maximum count. [1..31] and 0=32. NOTE: If the user programs these bits beyond the Designed Maximum outstanding count, then the designed maximum table depth will be used instead. No additional Deferred/Split transactions will be accepted if this outstanding maximum count is reached. Furthermore, no additional deferred/split transactions will be accepted if the I/O delay/ I/O Split Request outstanding maximum is reached. NOTE: For OCTEON in PCI Mode, this field MUST BE programmed to 1. (OCTEON can only handle 1 delayed read at a time). For OCTEON in PCIX Mode, this field can range from 1-4. (The designed maximum table depth is 4 for PCIX mode splits). */ #else uint32_t tdomc : 5; uint32_t tidomc : 1; uint32_t reserved_6_6 : 1; uint32_t tibde : 1; uint32_t tibcd : 1; uint32_t reserved_9_10 : 2; uint32_t tmapes : 1; uint32_t tmdpes : 1; uint32_t tmse : 1; uint32_t tmei : 1; uint32_t teci : 1; uint32_t tmes : 8; uint32_t mdrrmc : 3; uint32_t mdrimc : 1; uint32_t mdre : 1; uint32_t mdwe : 1; uint32_t mrbci : 1; uint32_t mrbcm : 1; #endif } s; struct cvmx_pci_cfg19_s cn30xx; struct cvmx_pci_cfg19_s cn31xx; struct cvmx_pci_cfg19_s cn38xx; struct cvmx_pci_cfg19_s cn38xxp2; struct cvmx_pci_cfg19_s cn50xx; struct cvmx_pci_cfg19_s cn58xx; struct cvmx_pci_cfg19_s cn58xxp1; } cvmx_pci_cfg19_t; /** * cvmx_pci_cfg20 * * PCI_CFG20 = Twenty-first 32-bits of PCI config space (Master Deferred/Split Sequence Pending) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg20_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t mdsp : 32; /**< Master Deferred/Split sequence Pending For OCTEON, this register is intended for debug use only and MUST NEVER be written with anything other than ZEROES. */ #else uint32_t mdsp : 32; #endif } s; struct cvmx_pci_cfg20_s cn30xx; struct cvmx_pci_cfg20_s cn31xx; struct cvmx_pci_cfg20_s cn38xx; struct cvmx_pci_cfg20_s cn38xxp2; struct cvmx_pci_cfg20_s cn50xx; struct cvmx_pci_cfg20_s cn58xx; struct cvmx_pci_cfg20_s cn58xxp1; } cvmx_pci_cfg20_t; /** * cvmx_pci_cfg21 * * PCI_CFG21 = Twenty-second 32-bits of PCI config space (Master Split Completion Message Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg21_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t scmre : 32; /**< Master Split Completion message received with error message. For OCTEON, this register is intended for debug use only and MUST NEVER be written with anything other than ZEROES. */ #else uint32_t scmre : 32; #endif } s; struct cvmx_pci_cfg21_s cn30xx; struct cvmx_pci_cfg21_s cn31xx; struct cvmx_pci_cfg21_s cn38xx; struct cvmx_pci_cfg21_s cn38xxp2; struct cvmx_pci_cfg21_s cn50xx; struct cvmx_pci_cfg21_s cn58xx; struct cvmx_pci_cfg21_s cn58xxp1; } cvmx_pci_cfg21_t; /** * cvmx_pci_cfg22 * * PCI_CFG22 = Twenty-third 32-bits of PCI config space (Master Arbiter Control Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg22_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t mac : 7; /**< Master Arbiter Control [31:26]: Used only in Fixed Priority mode (when [25]=1) [31:30]: MSI Request 00 = Highest Priority 01 = Medium Priority 10 = Lowest Priority 11 = RESERVED [29:28]: Target Split Completion 00 = Highest Priority 01 = Medium Priority 10 = Lowest Priority 11 = RESERVED [27:26]: New Request; Deferred Read,Deferred Write 00 = Highest Priority 01 = Medium Priority 10 = Lowest Priority 11 = RESERVED [25]: Fixed/Round Robin Priority Selector 0 = Round Robin 1 = Fixed NOTE: When [25]=1(fixed priority), the three levels [31:26] MUST BE programmed to have mutually exclusive priority levels for proper operation. (Failure to do so may result in PCI hangs). */ uint32_t reserved_19_24 : 6; uint32_t flush : 1; /**< AM_DO_FLUSH_I control NOTE: This bit MUST BE ONE for proper OCTEON operation */ uint32_t mra : 1; /**< Master Retry Aborted */ uint32_t mtta : 1; /**< Master TRDY timeout aborted */ uint32_t mrv : 8; /**< Master Retry Value [1..255] and 0=infinite */ uint32_t mttv : 8; /**< Master TRDY timeout value [1..255] and 0=disabled NOTE: For OCTEON, this bit must always be zero for proper operation. (OCTEON does not support master TRDY timeout - target is expected to be well behaved). */ #else uint32_t mttv : 8; uint32_t mrv : 8; uint32_t mtta : 1; uint32_t mra : 1; uint32_t flush : 1; uint32_t reserved_19_24 : 6; uint32_t mac : 7; #endif } s; struct cvmx_pci_cfg22_s cn30xx; struct cvmx_pci_cfg22_s cn31xx; struct cvmx_pci_cfg22_s cn38xx; struct cvmx_pci_cfg22_s cn38xxp2; struct cvmx_pci_cfg22_s cn50xx; struct cvmx_pci_cfg22_s cn58xx; struct cvmx_pci_cfg22_s cn58xxp1; } cvmx_pci_cfg22_t; /** * cvmx_pci_cfg56 * * PCI_CFG56 = Fifty-seventh 32-bits of PCI config space (PCIX Capabilities Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg56_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_23_31 : 9; uint32_t most : 3; /**< Maximum outstanding Split transactions Encoded Value \#Max outstanding splits 000 1 001 2 010 3 011 4 100 8 101 8(clamped) 110 8(clamped) 111 8(clamped) NOTE: OCTEON only supports upto a MAXIMUM of 8 outstanding master split transactions. */ uint32_t mmbc : 2; /**< Maximum Memory Byte Count [0=512B,1=1024B,2=2048B,3=4096B] NOTE: OCTEON does not support this field and has no effect on limiting the maximum memory byte count. */ uint32_t roe : 1; /**< Relaxed Ordering Enable */ uint32_t dpere : 1; /**< Data Parity Error Recovery Enable */ uint32_t ncp : 8; /**< Next Capability Pointer */ uint32_t pxcid : 8; /**< PCI-X Capability ID */ #else uint32_t pxcid : 8; uint32_t ncp : 8; uint32_t dpere : 1; uint32_t roe : 1; uint32_t mmbc : 2; uint32_t most : 3; uint32_t reserved_23_31 : 9; #endif } s; struct cvmx_pci_cfg56_s cn30xx; struct cvmx_pci_cfg56_s cn31xx; struct cvmx_pci_cfg56_s cn38xx; struct cvmx_pci_cfg56_s cn38xxp2; struct cvmx_pci_cfg56_s cn50xx; struct cvmx_pci_cfg56_s cn58xx; struct cvmx_pci_cfg56_s cn58xxp1; } cvmx_pci_cfg56_t; /** * cvmx_pci_cfg57 * * PCI_CFG57 = Fifty-eigth 32-bits of PCI config space (PCIX Status Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg57_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_30_31 : 2; uint32_t scemr : 1; /**< Split Completion Error Message Received */ uint32_t mcrsd : 3; /**< Maximum Cumulative Read Size designed */ uint32_t mostd : 3; /**< Maximum Outstanding Split transaction designed */ uint32_t mmrbcd : 2; /**< Maximum Memory Read byte count designed */ uint32_t dc : 1; /**< Device Complexity 0 = Simple Device 1 = Bridge Device */ uint32_t usc : 1; /**< Unexpected Split Completion */ uint32_t scd : 1; /**< Split Completion Discarded */ uint32_t m133 : 1; /**< 133MHz Capable */ uint32_t w64 : 1; /**< Indicates a 32b(=0) or 64b(=1) device */ uint32_t bn : 8; /**< Bus Number. Updated on all configuration write (0x11=PCI) cycles. Its value is dependent upon the PCI/X (0xFF=PCIX) mode. */ uint32_t dn : 5; /**< Device Number. Updated on all configuration write cycles. */ uint32_t fn : 3; /**< Function Number */ #else uint32_t fn : 3; uint32_t dn : 5; uint32_t bn : 8; uint32_t w64 : 1; uint32_t m133 : 1; uint32_t scd : 1; uint32_t usc : 1; uint32_t dc : 1; uint32_t mmrbcd : 2; uint32_t mostd : 3; uint32_t mcrsd : 3; uint32_t scemr : 1; uint32_t reserved_30_31 : 2; #endif } s; struct cvmx_pci_cfg57_s cn30xx; struct cvmx_pci_cfg57_s cn31xx; struct cvmx_pci_cfg57_s cn38xx; struct cvmx_pci_cfg57_s cn38xxp2; struct cvmx_pci_cfg57_s cn50xx; struct cvmx_pci_cfg57_s cn58xx; struct cvmx_pci_cfg57_s cn58xxp1; } cvmx_pci_cfg57_t; /** * cvmx_pci_cfg58 * * PCI_CFG58 = Fifty-ninth 32-bits of PCI config space (Power Management Capabilities Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg58_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pmes : 5; /**< PME Support (D0 to D3cold) */ uint32_t d2s : 1; /**< D2_Support */ uint32_t d1s : 1; /**< D1_Support */ uint32_t auxc : 3; /**< AUX_Current (0..375mA) */ uint32_t dsi : 1; /**< Device Specific Initialization */ uint32_t reserved_20_20 : 1; uint32_t pmec : 1; /**< PME Clock */ uint32_t pcimiv : 3; /**< Indicates the version of the PCI Management Interface Specification with which the core complies. 010b = Complies with PCI Management Interface Specification Revision 1.1 */ uint32_t ncp : 8; /**< Next Capability Pointer */ uint32_t pmcid : 8; /**< Power Management Capability ID */ #else uint32_t pmcid : 8; uint32_t ncp : 8; uint32_t pcimiv : 3; uint32_t pmec : 1; uint32_t reserved_20_20 : 1; uint32_t dsi : 1; uint32_t auxc : 3; uint32_t d1s : 1; uint32_t d2s : 1; uint32_t pmes : 5; #endif } s; struct cvmx_pci_cfg58_s cn30xx; struct cvmx_pci_cfg58_s cn31xx; struct cvmx_pci_cfg58_s cn38xx; struct cvmx_pci_cfg58_s cn38xxp2; struct cvmx_pci_cfg58_s cn50xx; struct cvmx_pci_cfg58_s cn58xx; struct cvmx_pci_cfg58_s cn58xxp1; } cvmx_pci_cfg58_t; /** * cvmx_pci_cfg59 * * PCI_CFG59 = Sixtieth 32-bits of PCI config space (Power Management Data/PMCSR Register(s)) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg59_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pmdia : 8; /**< Power Management data input from application (PME_DATA) */ uint32_t bpccen : 1; /**< BPCC_En (bus power/clock control) enable */ uint32_t bd3h : 1; /**< B2_B3\#, B2/B3 Support for D3hot */ uint32_t reserved_16_21 : 6; uint32_t pmess : 1; /**< PME_Status sticky bit */ uint32_t pmedsia : 2; /**< PME_Data_Scale input from application Device (PME_DATA_SCALE[1:0]) Specific */ uint32_t pmds : 4; /**< Power Management Data_select */ uint32_t pmeens : 1; /**< PME_En sticky bit */ uint32_t reserved_2_7 : 6; uint32_t ps : 2; /**< Power State (D0 to D3) The N2 DOES NOT support D1/D2 Power Management states, therefore writing to this register has no effect (please refer to the PCI Power Management Specification v1.1 for further details about it?s R/W nature. This is not a conventional R/W style register. */ #else uint32_t ps : 2; uint32_t reserved_2_7 : 6; uint32_t pmeens : 1; uint32_t pmds : 4; uint32_t pmedsia : 2; uint32_t pmess : 1; uint32_t reserved_16_21 : 6; uint32_t bd3h : 1; uint32_t bpccen : 1; uint32_t pmdia : 8; #endif } s; struct cvmx_pci_cfg59_s cn30xx; struct cvmx_pci_cfg59_s cn31xx; struct cvmx_pci_cfg59_s cn38xx; struct cvmx_pci_cfg59_s cn38xxp2; struct cvmx_pci_cfg59_s cn50xx; struct cvmx_pci_cfg59_s cn58xx; struct cvmx_pci_cfg59_s cn58xxp1; } cvmx_pci_cfg59_t; /** * cvmx_pci_cfg60 * * PCI_CFG60 = Sixty-first 32-bits of PCI config space (MSI Capabilities Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg60_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_24_31 : 8; uint32_t m64 : 1; /**< 32/64 b message */ uint32_t mme : 3; /**< Multiple Message Enable(1,2,4,8,16,32) */ uint32_t mmc : 3; /**< Multiple Message Capable(0=1,1=2,2=4,3=8,4=16,5=32) */ uint32_t msien : 1; /**< MSI Enable */ uint32_t ncp : 8; /**< Next Capability Pointer */ uint32_t msicid : 8; /**< MSI Capability ID */ #else uint32_t msicid : 8; uint32_t ncp : 8; uint32_t msien : 1; uint32_t mmc : 3; uint32_t mme : 3; uint32_t m64 : 1; uint32_t reserved_24_31 : 8; #endif } s; struct cvmx_pci_cfg60_s cn30xx; struct cvmx_pci_cfg60_s cn31xx; struct cvmx_pci_cfg60_s cn38xx; struct cvmx_pci_cfg60_s cn38xxp2; struct cvmx_pci_cfg60_s cn50xx; struct cvmx_pci_cfg60_s cn58xx; struct cvmx_pci_cfg60_s cn58xxp1; } cvmx_pci_cfg60_t; /** * cvmx_pci_cfg61 * * PCI_CFG61 = Sixty-second 32-bits of PCI config space (MSI Lower Address Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg61_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t msi31t2 : 30; /**< App Specific MSI Address [31:2] */ uint32_t reserved_0_1 : 2; #else uint32_t reserved_0_1 : 2; uint32_t msi31t2 : 30; #endif } s; struct cvmx_pci_cfg61_s cn30xx; struct cvmx_pci_cfg61_s cn31xx; struct cvmx_pci_cfg61_s cn38xx; struct cvmx_pci_cfg61_s cn38xxp2; struct cvmx_pci_cfg61_s cn50xx; struct cvmx_pci_cfg61_s cn58xx; struct cvmx_pci_cfg61_s cn58xxp1; } cvmx_pci_cfg61_t; /** * cvmx_pci_cfg62 * * PCI_CFG62 = Sixty-third 32-bits of PCI config space (MSI Upper Address Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg62_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t msi : 32; /**< MSI Address [63:32] */ #else uint32_t msi : 32; #endif } s; struct cvmx_pci_cfg62_s cn30xx; struct cvmx_pci_cfg62_s cn31xx; struct cvmx_pci_cfg62_s cn38xx; struct cvmx_pci_cfg62_s cn38xxp2; struct cvmx_pci_cfg62_s cn50xx; struct cvmx_pci_cfg62_s cn58xx; struct cvmx_pci_cfg62_s cn58xxp1; } cvmx_pci_cfg62_t; /** * cvmx_pci_cfg63 * * PCI_CFG63 = Sixty-fourth 32-bits of PCI config space (MSI Message Data Register) * */ typedef union { uint32_t u32; struct cvmx_pci_cfg63_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_16_31 : 16; uint32_t msimd : 16; /**< MSI Message Data */ #else uint32_t msimd : 16; uint32_t reserved_16_31 : 16; #endif } s; struct cvmx_pci_cfg63_s cn30xx; struct cvmx_pci_cfg63_s cn31xx; struct cvmx_pci_cfg63_s cn38xx; struct cvmx_pci_cfg63_s cn38xxp2; struct cvmx_pci_cfg63_s cn50xx; struct cvmx_pci_cfg63_s cn58xx; struct cvmx_pci_cfg63_s cn58xxp1; } cvmx_pci_cfg63_t; /** * cvmx_pci_cnt_reg * * PCI_CNT_REG = PCI Clock Count Register * * This register is provided to software as a means to determine PCI Bus Type/Speed. */ typedef union { uint64_t u64; struct cvmx_pci_cnt_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t hm_pcix : 1; /**< PCI Host Mode Sampled Bus Type (0:PCI/1:PCIX) This field represents what OCTEON(in Host mode) sampled as the 'intended' PCI Bus Type based on the PCI_PCIXCAP pin. (see HM_SPEED Bus Type/Speed encoding table). */ uint64_t hm_speed : 2; /**< PCI Host Mode Sampled Bus Speed This field represents what OCTEON(in Host mode) sampled as the 'intended' PCI Bus Speed based on the PCI100, PCI_M66EN and PCI_PCIXCAP pins. NOTE: This DOES NOT reflect what the actual PCI Bus Type/Speed values are. They only indicate what OCTEON sampled as the 'intended' values. PCI Host Mode Sampled Bus Type/Speed Table: M66EN | PCIXCAP | PCI100 | HM_PCIX | HM_SPEED[1:0] ---------+---------+---------+----------+------------- 0 | 0 | 0 | 0=PCI | 00=33 MHz 0 | 0 | 1 | 0=PCI | 00=33 MHz 0 | Z | 0 | 0=PCI | 01=66 MHz 0 | Z | 1 | 0=PCI | 01=66 MHz 1 | 0 | 0 | 0=PCI | 01=66 MHz 1 | 0 | 1 | 0=PCI | 01=66 MHz 1 | Z | 0 | 0=PCI | 01=66 MHz 1 | Z | 1 | 0=PCI | 01=66 MHz 0 | 1 | 1 | 1=PCIX | 10=100 MHz 1 | 1 | 1 | 1=PCIX | 10=100 MHz 0 | 1 | 0 | 1=PCIX | 11=133 MHz 1 | 1 | 0 | 1=PCIX | 11=133 MHz NOTE: PCIXCAP has tri-level value (0,1,Z). See PCI specification for more details on board level hookup to achieve these values. NOTE: Software can use the NPI_PCI_INT_ARB_CFG[PCI_OVR] to override the 'sampled' PCI Bus Type/Speed. NOTE: Software can also use the PCI_CNT_REG[PCICNT] to determine the exact PCI(X) Bus speed. Example: PCI_REF_CLKIN=133MHz PCI_HOST_MODE=1 PCI_M66EN=0 PCI_PCIXCAP=1 PCI_PCI100=1 For this example, OCTEON will generate PCI_CLK_OUT=100MHz and drive the proper PCI Initialization sequence (DEVSEL#=Deasserted, STOP#=Asserted, TRDY#=Asserted) during PCI_RST_N deassertion. NOTE: The HM_SPEED field is only valid after PLL_REF_CLK is active and PLL_DCOK is asserted. (see HRM description for power-on/reset sequence). NOTE: PCI_REF_CLKIN input must be 133MHz (and is used to generate the PCI_CLK_OUT pin in Host Mode). *** NOTE: O9N PASS1 Addition */ uint64_t ap_pcix : 1; /**< PCI(X) Bus Type (0:PCI/1:PCIX) At PCI_RST_N de-assertion, the PCI Initialization pattern(PCI_DEVSEL_N, PCI_STOP_N, PCI_TRDY_N) is captured to provide information to software regarding the PCI Bus Type(PCI/PCIX) and PCI Bus Speed Range. */ uint64_t ap_speed : 2; /**< PCI(X) Bus Speed (0:33/1:66/2:100/3:133) At PCI_RST_N de-assertion, the PCI Initialization pattern(PCI_DEVSEL_N, PCI_STOP_N, PCI_TRDY_N) is captured to provide information to software regarding the PCI Bus Type(PCI/PCIX) and PCI Bus Speed Range. PCI-X Initialization Pattern(see PCIX Spec): PCI_DEVSEL_N PCI_STOP_N PCI_TRDY_N Mode MaxClk(ns) MinClk(ns) MinClk(MHz) MaxClk(MHz) -------------+----------+----------+-------+---------+----------+----------+------------------ Deasserted Deasserted Deasserted PCI 33 -- 30 0 33 PCI 66 30 15 33 66 Deasserted Deasserted Asserted PCI-X 20 15 50 66 Deasserted Asserted Deasserted PCI-X 15 10 66 100 Deasserted Asserted Asserted PCI-X 10 7.5 100 133 Asserted Deasserted Deasserted PCI-X Reserved Reserved Reserved Reserved Asserted Deasserted Asserted PCI-X Reserved Reserved Reserved Reserved Asserted Asserted Deasserted PCI-X Reserved Reserved Reserved Reserved Asserted Asserted Asserted PCI-X Reserved Reserved Reserved Reserved NOTE: The PCI Bus speed 'assumed' from the initialization pattern is really intended for an operational range. For example: If PINIT=100, this indicates PCI-X in the 100-133MHz range. The PCI_CNT field can be used to further determine a more exacting PCI Bus frequency value if required. *** NOTE: O9N PASS1 Addition */ uint64_t pcicnt : 32; /**< Free Running PCI Clock counter. At PCI Reset, the PCICNT=0, and is auto-incremented on every PCI clock and will auto-wrap back to zero when saturated. NOTE: Writes override the auto-increment to allow software to preload any initial value. The PCICNT field is provided to software as a means to determine the PCI Bus Speed. Assuming software has knowledge of the core frequency (eclk), this register can be written with a value X, wait 'n' core clocks(eclk) and then read later(Y) to determine \#PCI clocks(Y-X) have elapsed within 'n' core clocks to determine the PCI input Clock frequency. *** NOTE: O9N PASS1 Addition */ #else uint64_t pcicnt : 32; uint64_t ap_speed : 2; uint64_t ap_pcix : 1; uint64_t hm_speed : 2; uint64_t hm_pcix : 1; uint64_t reserved_38_63 : 26; #endif } s; struct cvmx_pci_cnt_reg_s cn50xx; struct cvmx_pci_cnt_reg_s cn58xx; struct cvmx_pci_cnt_reg_s cn58xxp1; } cvmx_pci_cnt_reg_t; /** * cvmx_pci_ctl_status_2 * * PCI_CTL_STATUS_2 = PCI Control Status 2 Register * * Control status register accessable from both PCI and NCB. */ typedef union { uint32_t u32; struct cvmx_pci_ctl_status_2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_29_31 : 3; uint32_t bb1_hole : 3; /**< Big BAR 1 Hole NOT IN PASS 1 NOR PASS 2 When PCI_CTL_STATUS_2[BB1]=1, this field defines an encoded size of the upper BAR1 region which OCTEON will mask out (ie: not respond to). (see definition of BB1_HOLE and BB1_SIZ encodings in the PCI_CTL_STATUS_2[BB1] definition below). */ uint32_t bb1_siz : 1; /**< Big BAR 1 Size NOT IN PASS 1 NOR PASS 2 When PCI_CTL_STATUS_2[BB1]=1, this field defines the programmable SIZE of BAR 1. - 0: 1GB / 1: 2GB */ uint32_t bb_ca : 1; /**< Set to '1' for Big Bar Mode to do STT/LDT L2C operations. NOT IN PASS 1 NOR PASS 2 */ uint32_t bb_es : 2; /**< Big Bar Node Endian Swap Mode - 0: No Swizzle - 1: Byte Swizzle (per-QW) - 2: Byte Swizzle (per-LW) - 3: LongWord Swizzle NOT IN PASS 1 NOR PASS 2 */ uint32_t bb1 : 1; /**< Big Bar 1 Enable NOT IN PASS 1 NOR PASS 2 When PCI_CTL_STATUS_2[BB1] is set, the following differences occur: - OCTEON's BAR1 becomes somewhere in the range 512-2048 MB rather than the default 128MB. - The following table indicates the effective size of BAR1 when BB1 is set: BB1_SIZ BB1_HOLE Effective size Comment +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 0 0 1024 MB Normal 1GB BAR 0 1 1008 MB 1 GB, 16 MB hole 0 2 992 MB 1 GB, 32 MB hole 0 3 960 MB 1 GB, 64 MB hole 0 4 896 MB 1 GB,128 MB hole 0 5 768 MB 1 GB,256 MB hole 0 6 512 MB 1 GB,512 MB hole 0 7 Illegal 1 0 2048 MB Normal 2GB BAR 1 1 2032 MB 2 GB, 16 MB hole 1 2 2016 MB 2 GB, 32 MB hole 1 3 1984 MB 2 GB, 64 MB hole 1 4 1920 MB 2 GB,128 MB hole 1 5 1792 MB 2 GB,256 MB hole 1 6 1536 MB 2 GB,512 MB hole 1 7 Illegal - When BB1_SIZ is 0: PCI_CFG06[LBASE<2:0>] reads as zero and are ignored on write. BAR1 is an entirely ordinary 1 GB (power-of-two) BAR in all aspects when BB1_HOLE is 0. When BB1_HOLE is not zero, BAR1 addresses are programmed as if the BAR were 1GB, but, OCTEON does not respond to addresses in the programmed holes. - When BB1_SIZ is 1: PCI_CFG06[LBASE<3:0>] reads as zero and are ignored on write. BAR1 is an entirely ordinary 2 GB (power-of-two) BAR in all aspects when BB1_HOLE is 0. When BB1_HOLE is not zero, BAR1 addresses are programmed as if the BAR were 2GB, but, OCTEON does not respond to addresses in the programmed holes. - Note that the BB1_HOLE value has no effect on the PCI_CFG06[LBASE] behavior. BB1_HOLE only affects whether OCTEON accepts an address. BB1_SIZ does affect PCI_CFG06[LBASE] behavior, however. - The first 128MB, i.e. addresses on the PCI bus in the range BAR1+0 .. BAR1+0x07FFFFFF access OCTEON's DRAM addresses with PCI_BAR1_INDEX CSR's as before - The remaining address space, i.e. addresses on the PCI bus in the range BAR1+0x08000000 .. BAR1+size-1, where size is the size of BAR1 as selected by the above table (based on the BB1_SIZ and BB1_HOLE values), are mapped to OCTEON physical DRAM addresses as follows: PCI Address Range OCTEON Physical Address Range ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ BAR1+0x08000000 .. BAR1+size-1 | 0x88000000 .. 0x7FFFFFFF+size and PCI_CTL_STATUS_2[BB_ES] is the endian-swap and PCI_CTL_STATUS_2[BB_CA] is the L2 cache allocation bit for these references. The consequences of any burst that crosses the end of the PCI Address Range for BAR1 are unpredicable. - The consequences of any burst access that crosses the boundary between BAR1+0x07FFFFFF and BAR1+0x08000000 are unpredictable in PCI-X mode. OCTEON may disconnect PCI references at this boundary. */ uint32_t bb0 : 1; /**< Big Bar 0 Enable NOT IN PASS 1 NOR PASS 2 When PCI_CTL_STATUS_2[BB0] is set, the following differences occur: - OCTEON's BAR0 becomes 2GB rather than the default 4KB. PCI_CFG04[LBASE<18:0>] reads as zero and is ignored on write. - OCTEON's BAR0 becomes burstable. (When BB0 is clear, OCTEON single-phase disconnects PCI BAR0 reads and PCI/PCI-X BAR0 writes, and splits (burstably) PCI-X BAR0 reads.) - The first 4KB, i.e. addresses on the PCI bus in the range BAR0+0 .. BAR0+0xFFF access OCTEON's PCI-type CSR's as when BB0 is clear. - The remaining address space, i.e. addresses on the PCI bus in the range BAR0+0x1000 .. BAR0+0x7FFFFFFF are mapped to OCTEON physical DRAM addresses as follows: PCI Address Range OCTEON Physical Address Range ------------------------------------+------------------------------ BAR0+0x00001000 .. BAR0+0x0FFFFFFF | 0x000001000 .. 0x00FFFFFFF BAR0+0x10000000 .. BAR0+0x1FFFFFFF | 0x410000000 .. 0x41FFFFFFF BAR0+0x20000000 .. BAR0+0x7FFFFFFF | 0x020000000 .. 0x07FFFFFFF and PCI_CTL_STATUS_2[BB_ES] is the endian-swap and PCI_CTL_STATUS_2[BB_CA] is the L2 cache allocation bit for these references. The consequences of any burst that crosses the end of the PCI Address Range for BAR0 are unpredicable. - The consequences of any burst access that crosses the boundary between BAR0+0xFFF and BAR0+0x1000 are unpredictable in PCI-X mode. OCTEON may disconnect PCI references at this boundary. - The results of any burst read that crosses the boundary between BAR0+0x0FFFFFFF and BAR0+0x10000000 are unpredictable. The consequences of any burst write that crosses this same boundary are unpredictable. - The results of any burst read that crosses the boundary between BAR0+0x1FFFFFFF and BAR0+0x20000000 are unpredictable. The consequences of any burst write that crosses this same boundary are unpredictable. */ uint32_t erst_n : 1; /**< Reset active Low. PASS-2 */ uint32_t bar2pres : 1; /**< From fuse block. When fuse(MIO_FUS_DAT3[BAR2_EN]) is NOT blown the value of this field is '0' after reset and BAR2 is NOT present. When the fuse IS blown the value of this field is '1' after reset and BAR2 is present. Note that SW can change this field after reset. This is a PASS-2 field. */ uint32_t scmtyp : 1; /**< Split Completion Message CMD Type (0=RD/1=WR) When SCM=1, SCMTYP specifies the CMD intent (R/W) */ uint32_t scm : 1; /**< Split Completion Message Detected (Read or Write) */ uint32_t en_wfilt : 1; /**< When '1' the window-access filter is enabled. Unfilter writes are: MIO, SubId0 MIO, SubId7 NPI, SubId0 NPI, SubId7 POW, SubId7 DFA, SubId7 IPD, SubId7 Unfiltered Reads are: MIO, SubId0 MIO, SubId7 NPI, SubId0 NPI, SubId7 POW, SubId1 POW, SubId2 POW, SubId3 POW, SubId7 DFA, SubId7 IPD, SubId7 */ uint32_t reserved_14_14 : 1; uint32_t ap_pcix : 1; /**< PCX Core Mode status (0=PCI Bus/1=PCIX) If one or more of PCI_DEVSEL_N, PCI_STOP_N, and PCI_TRDY_N are asserted at the rising edge of PCI_RST_N, the device enters PCI-X mode. Otherwise, the device enters conventional PCI mode at the rising edge of RST#. */ uint32_t ap_64ad : 1; /**< PCX Core Bus status (0=32b Bus/1=64b Bus) When PCI_RST_N pin is de-asserted, the state of PCI_REQ64_N(driven by central agent) determines the width of the PCI/X bus. */ uint32_t b12_bist : 1; /**< Bist Status For Memeory In B12 */ uint32_t pmo_amod : 1; /**< PMO-ARB Mode (0=FP[HP=CMD1,LP=CMD0]/1=RR) */ uint32_t pmo_fpc : 3; /**< PMO-ARB Fixed Priority Counter When PMO_AMOD=0 (FP mode), this field represents the \# of CMD1 requests that are issued (at higher priority) before a single lower priority CMD0 is allowed to issue (to ensure foward progress). - 0: 1 CMD1 Request issued before CMD0 allowed - ... - 7: 8 CMD1 Requests issued before CMD0 allowed */ uint32_t tsr_hwm : 3; /**< Target Split-Read ADB(allowable disconnect boundary) High Water Mark. Specifies the number of ADBs(128 Byte aligned chunks) that are accumulated(pending) BEFORE the Target Split completion is attempted on the PCI bus. - 0: RESERVED/ILLEGAL - 1: 2 Pending ADBs (129B-256B) - 2: 3 Pending ADBs (257B-384B) - 3: 4 Pending ADBs (385B-512B) - 4: 5 Pending ADBs (513B-640B) - 5: 6 Pending ADBs (641B-768B) - 6: 7 Pending ADBs (769B-896B) - 7: 8 Pending ADBs (897B-1024B) Example: Suppose a 1KB target memory request with starting byte offset address[6:0]=0x7F is split by the OCTEON and the TSR_HWM=1(2 ADBs). The OCTEON will start the target split completion on the PCI Bus after 1B(1st ADB)+128B(2nd ADB)=129B of data have been received from memory (even though the remaining 895B has not yet been received). The OCTEON will continue the split completion until it has consumed all of the pended split data. If the full transaction length(1KB) of data was NOT entirely transferred, then OCTEON will terminate the split completion and again wait for another 2 ADB-aligned data chunks(256B) of pended split data to be received from memory before starting another split completion request. This allows Octeon (as split completer), to send back multiple split completions for a given large split transaction without having to wait for the entire transaction length to be received from memory. NOTE: For split transaction sizes 'smaller' than the specified TSR_HWM value, the split completion is started when the last datum has been received from memory. NOTE: It is IMPERATIVE that this field NEVER BE written to a ZERO value. A value of zero is reserved/illegal and can result in PCIX bus hangs). */ uint32_t bar2_enb : 1; /**< When set '1' BAR2 is enable and will respond when clear '0' BAR2 access will be target-aborted. */ uint32_t bar2_esx : 2; /**< Value will be XORed with pci-address[37:36] to determine the endian swap mode. */ uint32_t bar2_cax : 1; /**< Value will be XORed with pci-address[38] to determine the L2 cache attribute. When XOR result is 1, not cached in L2 */ #else uint32_t bar2_cax : 1; uint32_t bar2_esx : 2; uint32_t bar2_enb : 1; uint32_t tsr_hwm : 3; uint32_t pmo_fpc : 3; uint32_t pmo_amod : 1; uint32_t b12_bist : 1; uint32_t ap_64ad : 1; uint32_t ap_pcix : 1; uint32_t reserved_14_14 : 1; uint32_t en_wfilt : 1; uint32_t scm : 1; uint32_t scmtyp : 1; uint32_t bar2pres : 1; uint32_t erst_n : 1; uint32_t bb0 : 1; uint32_t bb1 : 1; uint32_t bb_es : 2; uint32_t bb_ca : 1; uint32_t bb1_siz : 1; uint32_t bb1_hole : 3; uint32_t reserved_29_31 : 3; #endif } s; struct cvmx_pci_ctl_status_2_s cn30xx; struct cvmx_pci_ctl_status_2_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_20_31 : 12; uint32_t erst_n : 1; /**< Reset active Low. */ uint32_t bar2pres : 1; /**< From fuse block. When fuse(MIO_FUS_DAT3[BAR2_EN]) is NOT blown the value of this field is '0' after reset and BAR2 is NOT present. When the fuse IS blown the value of this field is '1' after reset and BAR2 is present. Note that SW can change this field after reset. */ uint32_t scmtyp : 1; /**< Split Completion Message CMD Type (0=RD/1=WR) When SCM=1, SCMTYP specifies the CMD intent (R/W) */ uint32_t scm : 1; /**< Split Completion Message Detected (Read or Write) */ uint32_t en_wfilt : 1; /**< When '1' the window-access filter is enabled. Unfilter writes are: MIO, SubId0 MIO, SubId7 NPI, SubId0 NPI, SubId7 POW, SubId7 DFA, SubId7 IPD, SubId7 USBN, SubId7 Unfiltered Reads are: MIO, SubId0 MIO, SubId7 NPI, SubId0 NPI, SubId7 POW, SubId1 POW, SubId2 POW, SubId3 POW, SubId7 DFA, SubId7 IPD, SubId7 USBN, SubId7 */ uint32_t reserved_14_14 : 1; uint32_t ap_pcix : 1; /**< PCX Core Mode status (0=PCI Bus/1=PCIX) */ uint32_t ap_64ad : 1; /**< PCX Core Bus status (0=32b Bus/1=64b Bus) */ uint32_t b12_bist : 1; /**< Bist Status For Memeory In B12 */ uint32_t pmo_amod : 1; /**< PMO-ARB Mode (0=FP[HP=CMD1,LP=CMD0]/1=RR) */ uint32_t pmo_fpc : 3; /**< PMO-ARB Fixed Priority Counter When PMO_AMOD=0 (FP mode), this field represents the \# of CMD1 requests that are issued (at higher priority) before a single lower priority CMD0 is allowed to issue (to ensure foward progress). - 0: 1 CMD1 Request issued before CMD0 allowed - ... - 7: 8 CMD1 Requests issued before CMD0 allowed */ uint32_t tsr_hwm : 3; /**< Target Split-Read ADB(allowable disconnect boundary) High Water Mark. Specifies the number of ADBs(128 Byte aligned chunks) that are accumulated(pending) BEFORE the Target Split completion is attempted on the PCI bus. - 0: RESERVED/ILLEGAL - 1: 2 Pending ADBs (129B-256B) - 2: 3 Pending ADBs (257B-384B) - 3: 4 Pending ADBs (385B-512B) - 4: 5 Pending ADBs (513B-640B) - 5: 6 Pending ADBs (641B-768B) - 6: 7 Pending ADBs (769B-896B) - 7: 8 Pending ADBs (897B-1024B) Example: Suppose a 1KB target memory request with starting byte offset address[6:0]=0x7F is split by the OCTEON and the TSR_HWM=1(2 ADBs). The OCTEON will start the target split completion on the PCI Bus after 1B(1st ADB)+128B(2nd ADB)=129B of data have been received from memory (even though the remaining 895B has not yet been received). The OCTEON will continue the split completion until it has consumed all of the pended split data. If the full transaction length(1KB) of data was NOT entirely transferred, then OCTEON will terminate the split completion and again wait for another 2 ADB-aligned data chunks(256B) of pended split data to be received from memory before starting another split completion request. This allows Octeon (as split completer), to send back multiple split completions for a given large split transaction without having to wait for the entire transaction length to be received from memory. NOTE: For split transaction sizes 'smaller' than the specified TSR_HWM value, the split completion is started when the last datum has been received from memory. NOTE: It is IMPERATIVE that this field NEVER BE written to a ZERO value. A value of zero is reserved/illegal and can result in PCIX bus hangs). */ uint32_t bar2_enb : 1; /**< When set '1' BAR2 is enable and will respond when clear '0' BAR2 access will be target-aborted. */ uint32_t bar2_esx : 2; /**< Value will be XORed with pci-address[37:36] to determine the endian swap mode. */ uint32_t bar2_cax : 1; /**< Value will be XORed with pci-address[38] to determine the L2 cache attribute. When XOR result is 1, not allocated in L2 cache */ #else uint32_t bar2_cax : 1; uint32_t bar2_esx : 2; uint32_t bar2_enb : 1; uint32_t tsr_hwm : 3; uint32_t pmo_fpc : 3; uint32_t pmo_amod : 1; uint32_t b12_bist : 1; uint32_t ap_64ad : 1; uint32_t ap_pcix : 1; uint32_t reserved_14_14 : 1; uint32_t en_wfilt : 1; uint32_t scm : 1; uint32_t scmtyp : 1; uint32_t bar2pres : 1; uint32_t erst_n : 1; uint32_t reserved_20_31 : 12; #endif } cn31xx; struct cvmx_pci_ctl_status_2_s cn38xx; struct cvmx_pci_ctl_status_2_cn31xx cn38xxp2; struct cvmx_pci_ctl_status_2_s cn50xx; struct cvmx_pci_ctl_status_2_s cn58xx; struct cvmx_pci_ctl_status_2_s cn58xxp1; } cvmx_pci_ctl_status_2_t; /** * cvmx_pci_dbell# * * PCI_DBELL0 = PCI Doorbell-0 * * The value to write to the doorbell 0 register. The value in this register is acted upon when the * least-significant-byte of this register is written. */ typedef union { uint32_t u32; struct cvmx_pci_dbellx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_16_31 : 16; uint32_t inc_val : 16; /**< Software writes this register with the number of new Instructions to be processed on the Instruction Queue. When read this register contains the last write value. */ #else uint32_t inc_val : 16; uint32_t reserved_16_31 : 16; #endif } s; struct cvmx_pci_dbellx_s cn30xx; struct cvmx_pci_dbellx_s cn31xx; struct cvmx_pci_dbellx_s cn38xx; struct cvmx_pci_dbellx_s cn38xxp2; struct cvmx_pci_dbellx_s cn50xx; struct cvmx_pci_dbellx_s cn58xx; struct cvmx_pci_dbellx_s cn58xxp1; } cvmx_pci_dbellx_t; /** * cvmx_pci_dma_cnt# * * PCI_DMA_CNT0 = PCI DMA Count0 * * Keeps track of the number of DMAs or bytes sent by DMAs. The value in this register is acted upon when the * least-significant-byte of this register is written. */ typedef union { uint32_t u32; struct cvmx_pci_dma_cntx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dma_cnt : 32; /**< Update with the number of DMAs completed or the number of bytes sent for DMA's associated with this counter. When this register is written the value written to [15:0] will be subtracted from the value in this register. */ #else uint32_t dma_cnt : 32; #endif } s; struct cvmx_pci_dma_cntx_s cn30xx; struct cvmx_pci_dma_cntx_s cn31xx; struct cvmx_pci_dma_cntx_s cn38xx; struct cvmx_pci_dma_cntx_s cn38xxp2; struct cvmx_pci_dma_cntx_s cn50xx; struct cvmx_pci_dma_cntx_s cn58xx; struct cvmx_pci_dma_cntx_s cn58xxp1; } cvmx_pci_dma_cntx_t; /** * cvmx_pci_dma_int_lev# * * PCI_DMA_INT_LEV0 = PCI DMA Sent Interrupt Level For DMA 0 * * Interrupt when the value in PCI_DMA_CNT0 is equal to or greater than the register value. */ typedef union { uint32_t u32; struct cvmx_pci_dma_int_levx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pkt_cnt : 32; /**< When PCI_DMA_CNT0 exceeds the value in this DCNT0 will be set in PCI_INT_SUM and PCI_INT_SUM2. */ #else uint32_t pkt_cnt : 32; #endif } s; struct cvmx_pci_dma_int_levx_s cn30xx; struct cvmx_pci_dma_int_levx_s cn31xx; struct cvmx_pci_dma_int_levx_s cn38xx; struct cvmx_pci_dma_int_levx_s cn38xxp2; struct cvmx_pci_dma_int_levx_s cn50xx; struct cvmx_pci_dma_int_levx_s cn58xx; struct cvmx_pci_dma_int_levx_s cn58xxp1; } cvmx_pci_dma_int_levx_t; /** * cvmx_pci_dma_time# * * PCI_DMA_TIME0 = PCI DMA Sent Timer For DMA0 * * Time to wait from DMA being sent before issuing an interrupt. */ typedef union { uint32_t u32; struct cvmx_pci_dma_timex_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dma_time : 32; /**< Number of PCI clock cycle to wait before setting DTIME0 in PCI_INT_SUM and PCI_INT_SUM2. After PCI_DMA_CNT0 becomes non-zero. The timer is reset when the PCI_INT_SUM[27] register is cleared. */ #else uint32_t dma_time : 32; #endif } s; struct cvmx_pci_dma_timex_s cn30xx; struct cvmx_pci_dma_timex_s cn31xx; struct cvmx_pci_dma_timex_s cn38xx; struct cvmx_pci_dma_timex_s cn38xxp2; struct cvmx_pci_dma_timex_s cn50xx; struct cvmx_pci_dma_timex_s cn58xx; struct cvmx_pci_dma_timex_s cn58xxp1; } cvmx_pci_dma_timex_t; /** * cvmx_pci_instr_count# * * PCI_INSTR_COUNT0 = PCI Instructions Outstanding Request Count * * The number of instructions to be fetched by the Instruction-0 Engine. */ typedef union { uint32_t u32; struct cvmx_pci_instr_countx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t icnt : 32; /**< Number of Instructions to be fetched by the Instruction Engine. A write of any non zero value to this register will clear the value of this register. */ #else uint32_t icnt : 32; #endif } s; struct cvmx_pci_instr_countx_s cn30xx; struct cvmx_pci_instr_countx_s cn31xx; struct cvmx_pci_instr_countx_s cn38xx; struct cvmx_pci_instr_countx_s cn38xxp2; struct cvmx_pci_instr_countx_s cn50xx; struct cvmx_pci_instr_countx_s cn58xx; struct cvmx_pci_instr_countx_s cn58xxp1; } cvmx_pci_instr_countx_t; /** * cvmx_pci_int_enb * * PCI_INT_ENB = PCI Interrupt Enable * * Enables interrupt bits in the PCI_INT_SUM register. */ typedef union { uint64_t u64; struct cvmx_pci_int_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[33] */ uint64_t ill_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[32] */ uint64_t win_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[31] */ uint64_t dma1_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[30] */ uint64_t dma0_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[29] */ uint64_t idtime1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[28] */ uint64_t idtime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[27] */ uint64_t idcnt1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[26] */ uint64_t idcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[25] */ uint64_t iptime3 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[24] */ uint64_t iptime2 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[23] */ uint64_t iptime1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[22] */ uint64_t iptime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[21] */ uint64_t ipcnt3 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[20] */ uint64_t ipcnt2 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[19] */ uint64_t ipcnt1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[18] */ uint64_t ipcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[17] */ uint64_t irsl_int : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[16] */ uint64_t ill_rrd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[15] */ uint64_t ill_rwr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[14] */ uint64_t idperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[13] */ uint64_t iaperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[12] */ uint64_t iserr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[11] */ uint64_t itsr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[10] */ uint64_t imsc_msg : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[9] */ uint64_t imsi_mabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[8] */ uint64_t imsi_tabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[7] */ uint64_t imsi_per : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[6] */ uint64_t imr_tto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[5] */ uint64_t imr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[4] */ uint64_t itr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[3] */ uint64_t imr_wtto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[2] */ uint64_t imr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[1] */ uint64_t itr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[0] */ #else uint64_t itr_wabt : 1; uint64_t imr_wabt : 1; uint64_t imr_wtto : 1; uint64_t itr_abt : 1; uint64_t imr_abt : 1; uint64_t imr_tto : 1; uint64_t imsi_per : 1; uint64_t imsi_tabt : 1; uint64_t imsi_mabt : 1; uint64_t imsc_msg : 1; uint64_t itsr_abt : 1; uint64_t iserr : 1; uint64_t iaperr : 1; uint64_t idperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t irsl_int : 1; uint64_t ipcnt0 : 1; uint64_t ipcnt1 : 1; uint64_t ipcnt2 : 1; uint64_t ipcnt3 : 1; uint64_t iptime0 : 1; uint64_t iptime1 : 1; uint64_t iptime2 : 1; uint64_t iptime3 : 1; uint64_t idcnt0 : 1; uint64_t idcnt1 : 1; uint64_t idtime0 : 1; uint64_t idtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_pci_int_enb_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[33] */ uint64_t ill_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[32] */ uint64_t win_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[31] */ uint64_t dma1_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[30] */ uint64_t dma0_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[29] */ uint64_t idtime1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[28] */ uint64_t idtime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[27] */ uint64_t idcnt1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[26] */ uint64_t idcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[25] */ uint64_t reserved_22_24 : 3; uint64_t iptime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[21] */ uint64_t reserved_18_20 : 3; uint64_t ipcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[17] */ uint64_t irsl_int : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[16] */ uint64_t ill_rrd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[15] */ uint64_t ill_rwr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[14] */ uint64_t idperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[13] */ uint64_t iaperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[12] */ uint64_t iserr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[11] */ uint64_t itsr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[10] */ uint64_t imsc_msg : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[9] */ uint64_t imsi_mabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[8] */ uint64_t imsi_tabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[7] */ uint64_t imsi_per : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[6] */ uint64_t imr_tto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[5] */ uint64_t imr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[4] */ uint64_t itr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[3] */ uint64_t imr_wtto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[2] */ uint64_t imr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[1] */ uint64_t itr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[0] */ #else uint64_t itr_wabt : 1; uint64_t imr_wabt : 1; uint64_t imr_wtto : 1; uint64_t itr_abt : 1; uint64_t imr_abt : 1; uint64_t imr_tto : 1; uint64_t imsi_per : 1; uint64_t imsi_tabt : 1; uint64_t imsi_mabt : 1; uint64_t imsc_msg : 1; uint64_t itsr_abt : 1; uint64_t iserr : 1; uint64_t iaperr : 1; uint64_t idperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t irsl_int : 1; uint64_t ipcnt0 : 1; uint64_t reserved_18_20 : 3; uint64_t iptime0 : 1; uint64_t reserved_22_24 : 3; uint64_t idcnt0 : 1; uint64_t idcnt1 : 1; uint64_t idtime0 : 1; uint64_t idtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } cn30xx; struct cvmx_pci_int_enb_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[33] */ uint64_t ill_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[32] */ uint64_t win_wr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[31] */ uint64_t dma1_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[30] */ uint64_t dma0_fi : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[29] */ uint64_t idtime1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[28] */ uint64_t idtime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[27] */ uint64_t idcnt1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[26] */ uint64_t idcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[25] */ uint64_t reserved_23_24 : 2; uint64_t iptime1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[22] */ uint64_t iptime0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[21] */ uint64_t reserved_19_20 : 2; uint64_t ipcnt1 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[18] */ uint64_t ipcnt0 : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[17] */ uint64_t irsl_int : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[16] */ uint64_t ill_rrd : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[15] */ uint64_t ill_rwr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[14] */ uint64_t idperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[13] */ uint64_t iaperr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[12] */ uint64_t iserr : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[11] */ uint64_t itsr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[10] */ uint64_t imsc_msg : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[9] */ uint64_t imsi_mabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[8] */ uint64_t imsi_tabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[7] */ uint64_t imsi_per : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[6] */ uint64_t imr_tto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[5] */ uint64_t imr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[4] */ uint64_t itr_abt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[3] */ uint64_t imr_wtto : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[2] */ uint64_t imr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[1] */ uint64_t itr_wabt : 1; /**< INTA# Pin Interrupt Enable for PCI_INT_SUM[0] */ #else uint64_t itr_wabt : 1; uint64_t imr_wabt : 1; uint64_t imr_wtto : 1; uint64_t itr_abt : 1; uint64_t imr_abt : 1; uint64_t imr_tto : 1; uint64_t imsi_per : 1; uint64_t imsi_tabt : 1; uint64_t imsi_mabt : 1; uint64_t imsc_msg : 1; uint64_t itsr_abt : 1; uint64_t iserr : 1; uint64_t iaperr : 1; uint64_t idperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t irsl_int : 1; uint64_t ipcnt0 : 1; uint64_t ipcnt1 : 1; uint64_t reserved_19_20 : 2; uint64_t iptime0 : 1; uint64_t iptime1 : 1; uint64_t reserved_23_24 : 2; uint64_t idcnt0 : 1; uint64_t idcnt1 : 1; uint64_t idtime0 : 1; uint64_t idtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } cn31xx; struct cvmx_pci_int_enb_s cn38xx; struct cvmx_pci_int_enb_s cn38xxp2; struct cvmx_pci_int_enb_cn31xx cn50xx; struct cvmx_pci_int_enb_s cn58xx; struct cvmx_pci_int_enb_s cn58xxp1; } cvmx_pci_int_enb_t; /** * cvmx_pci_int_enb2 * * PCI_INT_ENB2 = PCI Interrupt Enable2 Register * * Enables interrupt bits in the PCI_INT_SUM2 register. */ typedef union { uint64_t u64; struct cvmx_pci_int_enb2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[33] */ uint64_t ill_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[32] */ uint64_t win_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[31] */ uint64_t dma1_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[30] */ uint64_t dma0_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[29] */ uint64_t rdtime1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[28] */ uint64_t rdtime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[27] */ uint64_t rdcnt1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[26] */ uint64_t rdcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[25] */ uint64_t rptime3 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[24] */ uint64_t rptime2 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[23] */ uint64_t rptime1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[22] */ uint64_t rptime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[21] */ uint64_t rpcnt3 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[20] */ uint64_t rpcnt2 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[19] */ uint64_t rpcnt1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[18] */ uint64_t rpcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[17] */ uint64_t rrsl_int : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[16] */ uint64_t ill_rrd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[15] */ uint64_t ill_rwr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[14] */ uint64_t rdperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[13] */ uint64_t raperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[12] */ uint64_t rserr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[11] */ uint64_t rtsr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[10] */ uint64_t rmsc_msg : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[9] */ uint64_t rmsi_mabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[8] */ uint64_t rmsi_tabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[7] */ uint64_t rmsi_per : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[6] */ uint64_t rmr_tto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[5] */ uint64_t rmr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[4] */ uint64_t rtr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[3] */ uint64_t rmr_wtto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[2] */ uint64_t rmr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[1] */ uint64_t rtr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[0] */ #else uint64_t rtr_wabt : 1; uint64_t rmr_wabt : 1; uint64_t rmr_wtto : 1; uint64_t rtr_abt : 1; uint64_t rmr_abt : 1; uint64_t rmr_tto : 1; uint64_t rmsi_per : 1; uint64_t rmsi_tabt : 1; uint64_t rmsi_mabt : 1; uint64_t rmsc_msg : 1; uint64_t rtsr_abt : 1; uint64_t rserr : 1; uint64_t raperr : 1; uint64_t rdperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t rrsl_int : 1; uint64_t rpcnt0 : 1; uint64_t rpcnt1 : 1; uint64_t rpcnt2 : 1; uint64_t rpcnt3 : 1; uint64_t rptime0 : 1; uint64_t rptime1 : 1; uint64_t rptime2 : 1; uint64_t rptime3 : 1; uint64_t rdcnt0 : 1; uint64_t rdcnt1 : 1; uint64_t rdtime0 : 1; uint64_t rdtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_pci_int_enb2_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[33] */ uint64_t ill_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[32] */ uint64_t win_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[31] */ uint64_t dma1_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[30] */ uint64_t dma0_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[29] */ uint64_t rdtime1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[28] */ uint64_t rdtime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[27] */ uint64_t rdcnt1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[26] */ uint64_t rdcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[25] */ uint64_t reserved_22_24 : 3; uint64_t rptime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[21] */ uint64_t reserved_18_20 : 3; uint64_t rpcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[17] */ uint64_t rrsl_int : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[16] */ uint64_t ill_rrd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[15] */ uint64_t ill_rwr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[14] */ uint64_t rdperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[13] */ uint64_t raperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[12] */ uint64_t rserr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[11] */ uint64_t rtsr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[10] */ uint64_t rmsc_msg : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[9] */ uint64_t rmsi_mabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[8] */ uint64_t rmsi_tabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[7] */ uint64_t rmsi_per : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[6] */ uint64_t rmr_tto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[5] */ uint64_t rmr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[4] */ uint64_t rtr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[3] */ uint64_t rmr_wtto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[2] */ uint64_t rmr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[1] */ uint64_t rtr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[0] */ #else uint64_t rtr_wabt : 1; uint64_t rmr_wabt : 1; uint64_t rmr_wtto : 1; uint64_t rtr_abt : 1; uint64_t rmr_abt : 1; uint64_t rmr_tto : 1; uint64_t rmsi_per : 1; uint64_t rmsi_tabt : 1; uint64_t rmsi_mabt : 1; uint64_t rmsc_msg : 1; uint64_t rtsr_abt : 1; uint64_t rserr : 1; uint64_t raperr : 1; uint64_t rdperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t rrsl_int : 1; uint64_t rpcnt0 : 1; uint64_t reserved_18_20 : 3; uint64_t rptime0 : 1; uint64_t reserved_22_24 : 3; uint64_t rdcnt0 : 1; uint64_t rdcnt1 : 1; uint64_t rdtime0 : 1; uint64_t rdtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } cn30xx; struct cvmx_pci_int_enb2_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[33] */ uint64_t ill_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[32] */ uint64_t win_wr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[31] */ uint64_t dma1_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[30] */ uint64_t dma0_fi : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[29] */ uint64_t rdtime1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[28] */ uint64_t rdtime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[27] */ uint64_t rdcnt1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[26] */ uint64_t rdcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[25] */ uint64_t reserved_23_24 : 2; uint64_t rptime1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[22] */ uint64_t rptime0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[21] */ uint64_t reserved_19_20 : 2; uint64_t rpcnt1 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[18] */ uint64_t rpcnt0 : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[17] */ uint64_t rrsl_int : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[16] */ uint64_t ill_rrd : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[15] */ uint64_t ill_rwr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[14] */ uint64_t rdperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[13] */ uint64_t raperr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[12] */ uint64_t rserr : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[11] */ uint64_t rtsr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[10] */ uint64_t rmsc_msg : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[9] */ uint64_t rmsi_mabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[8] */ uint64_t rmsi_tabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[7] */ uint64_t rmsi_per : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[6] */ uint64_t rmr_tto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[5] */ uint64_t rmr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[4] */ uint64_t rtr_abt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[3] */ uint64_t rmr_wtto : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[2] */ uint64_t rmr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[1] */ uint64_t rtr_wabt : 1; /**< RSL Chain Interrupt Enable for PCI_INT_SUM2[0] */ #else uint64_t rtr_wabt : 1; uint64_t rmr_wabt : 1; uint64_t rmr_wtto : 1; uint64_t rtr_abt : 1; uint64_t rmr_abt : 1; uint64_t rmr_tto : 1; uint64_t rmsi_per : 1; uint64_t rmsi_tabt : 1; uint64_t rmsi_mabt : 1; uint64_t rmsc_msg : 1; uint64_t rtsr_abt : 1; uint64_t rserr : 1; uint64_t raperr : 1; uint64_t rdperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t rrsl_int : 1; uint64_t rpcnt0 : 1; uint64_t rpcnt1 : 1; uint64_t reserved_19_20 : 2; uint64_t rptime0 : 1; uint64_t rptime1 : 1; uint64_t reserved_23_24 : 2; uint64_t rdcnt0 : 1; uint64_t rdcnt1 : 1; uint64_t rdtime0 : 1; uint64_t rdtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } cn31xx; struct cvmx_pci_int_enb2_s cn38xx; struct cvmx_pci_int_enb2_s cn38xxp2; struct cvmx_pci_int_enb2_cn31xx cn50xx; struct cvmx_pci_int_enb2_s cn58xx; struct cvmx_pci_int_enb2_s cn58xxp1; } cvmx_pci_int_enb2_t; /** * cvmx_pci_int_sum * * PCI_INT_SUM = PCI Interrupt Summary * * The PCI Interrupt Summary Register. */ typedef union { uint64_t u64; struct cvmx_pci_int_sum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or Read-Address Register took place. */ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 1. */ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 0. */ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1 register is not 0 the DMA_CNT1 timer counts. When the DMA1_CNT timer has a value greater than the PCI_DMA_TIME1 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0 register is not 0 the DMA_CNT0 timer counts. When the DMA0_CNT timer has a value greater than the PCI_DMA_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1 value is greater than the value in the PCI_DMA_INT_LEV1 register. */ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0 value is greater than the value in the PCI_DMA_INT_LEV0 register. */ uint64_t ptime3 : 1; /**< When the value in the PCI_PKTS_SENT3 register is not 0 the Sent-3 timer counts. When the Sent-3 timer has a value greater than the PCI_PKTS_SENT_TIME3 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t ptime2 : 1; /**< When the value in the PCI_PKTS_SENT2 register is not 0 the Sent-2 timer counts. When the Sent-2 timer has a value greater than the PCI_PKTS_SENT_TIME2 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t ptime1 : 1; /**< When the value in the PCI_PKTS_SENT1 register is not 0 the Sent-1 timer counts. When the Sent-1 timer has a value greater than the PCI_PKTS_SENT_TIME1 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0 register is not 0 the Sent-0 timer counts. When the Sent-0 timer has a value greater than the PCI_PKTS_SENT_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t pcnt3 : 1; /**< This bit indicates that PCI_PKTS_SENT3 value is greater than the value in the PCI_PKTS_SENT_INT_LEV3 register. */ uint64_t pcnt2 : 1; /**< This bit indicates that PCI_PKTS_SENT2 value is greater than the value in the PCI_PKTS_SENT_INT_LEV2 register. */ uint64_t pcnt1 : 1; /**< This bit indicates that PCI_PKTS_SENT1 value is greater than the value in the PCI_PKTS_SENT_INT_LEV1 register. */ uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0 value is greater than the value in the PCI_PKTS_SENT_INT_LEV0 register. */ uint64_t rsl_int : 1; /**< This bit is set when the mio_pci_inta_dr wire is asserted by the MIO. */ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */ uint64_t serr : 1; /**< SERR# detected by PCX Core */ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected O9N (as completer), has encountered an error which prevents the split transaction from completing. In this event, the O9N (as completer), sends a SCM (Split Completion Message) to the initiator. See: PCIX Spec v1.0a Fig 2-40. [31:28]: Message Class = 2(completer error) [27:20]: Message Index = 0x80 [18:12]: Remaining Lower Address [11:0]: Remaining Byte Count */ uint64_t msc_msg : 1; /**< Master Split Completion Message (SCM) Detected for either a Split-Read/Write error case. Set if: a) A Split-Write SCM is detected with SCE=1. b) A Split-Read SCM is detected (regardless of SCE status). The Split completion message(SCM) is also latched into the PCI_SCM_REG[SCM] to assist SW with error recovery. */ uint64_t msi_mabt : 1; /**< PCI Master Abort on Master MSI */ uint64_t msi_tabt : 1; /**< PCI Target-Abort on Master MSI */ uint64_t msi_per : 1; /**< PCI Parity Error on Master MSI */ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Master-Read */ uint64_t mr_abt : 1; /**< PCI Master Abort On Master-Read */ uint64_t tr_abt : 1; /**< PCI Target Abort On Master-Read */ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on Master-write */ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on Master-write */ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on Master-write */ #else uint64_t tr_wabt : 1; uint64_t mr_wabt : 1; uint64_t mr_wtto : 1; uint64_t tr_abt : 1; uint64_t mr_abt : 1; uint64_t mr_tto : 1; uint64_t msi_per : 1; uint64_t msi_tabt : 1; uint64_t msi_mabt : 1; uint64_t msc_msg : 1; uint64_t tsr_abt : 1; uint64_t serr : 1; uint64_t aperr : 1; uint64_t dperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t rsl_int : 1; uint64_t pcnt0 : 1; uint64_t pcnt1 : 1; uint64_t pcnt2 : 1; uint64_t pcnt3 : 1; uint64_t ptime0 : 1; uint64_t ptime1 : 1; uint64_t ptime2 : 1; uint64_t ptime3 : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_pci_int_sum_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or Read-Address Register took place. */ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 1. */ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 0. */ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1 register is not 0 the DMA_CNT1 timer counts. When the DMA1_CNT timer has a value greater than the PCI_DMA_TIME1 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0 register is not 0 the DMA_CNT0 timer counts. When the DMA0_CNT timer has a value greater than the PCI_DMA_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1 value is greater than the value in the PCI_DMA_INT_LEV1 register. */ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0 value is greater than the value in the PCI_DMA_INT_LEV0 register. */ uint64_t reserved_22_24 : 3; uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0 register is not 0 the Sent-0 timer counts. When the Sent-0 timer has a value greater than the PCI_PKTS_SENT_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t reserved_18_20 : 3; uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0 value is greater than the value in the PCI_PKTS_SENT_INT_LEV0 register. */ uint64_t rsl_int : 1; /**< This bit is set when the mio_pci_inta_dr wire is asserted by the MIO */ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */ uint64_t serr : 1; /**< SERR# detected by PCX Core */ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected N3K (as completer), has encountered an error which prevents the split transaction from completing. In this event, the N3K (as completer), sends a SCM (Split Completion Message) to the initiator. See: PCIX Spec v1.0a Fig 2-40. [31:28]: Message Class = 2(completer error) [27:20]: Message Index = 0x80 [18:12]: Remaining Lower Address [11:0]: Remaining Byte Count */ uint64_t msc_msg : 1; /**< Master Split Completion Message (SCM) Detected for either a Split-Read/Write error case. Set if: a) A Split-Write SCM is detected with SCE=1. b) A Split-Read SCM is detected (regardless of SCE status). The Split completion message(SCM) is also latched into the PCI_SCM_REG[SCM] to assist SW with error recovery. */ uint64_t msi_mabt : 1; /**< PCI Master Abort on Master MSI */ uint64_t msi_tabt : 1; /**< PCI Target-Abort on Master MSI */ uint64_t msi_per : 1; /**< PCI Parity Error on Master MSI */ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Master-Read */ uint64_t mr_abt : 1; /**< PCI Master Abort On Master-Read */ uint64_t tr_abt : 1; /**< PCI Target Abort On Master-Read */ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on Master-write */ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on Master-write */ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on Master-write */ #else uint64_t tr_wabt : 1; uint64_t mr_wabt : 1; uint64_t mr_wtto : 1; uint64_t tr_abt : 1; uint64_t mr_abt : 1; uint64_t mr_tto : 1; uint64_t msi_per : 1; uint64_t msi_tabt : 1; uint64_t msi_mabt : 1; uint64_t msc_msg : 1; uint64_t tsr_abt : 1; uint64_t serr : 1; uint64_t aperr : 1; uint64_t dperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t rsl_int : 1; uint64_t pcnt0 : 1; uint64_t reserved_18_20 : 3; uint64_t ptime0 : 1; uint64_t reserved_22_24 : 3; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } cn30xx; struct cvmx_pci_int_sum_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or Read-Address Register took place. */ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 1. */ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 0. */ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1 register is not 0 the DMA_CNT1 timer counts. When the DMA1_CNT timer has a value greater than the PCI_DMA_TIME1 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0 register is not 0 the DMA_CNT0 timer counts. When the DMA0_CNT timer has a value greater than the PCI_DMA_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1 value is greater than the value in the PCI_DMA_INT_LEV1 register. */ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0 value is greater than the value in the PCI_DMA_INT_LEV0 register. */ uint64_t reserved_23_24 : 2; uint64_t ptime1 : 1; /**< When the value in the PCI_PKTS_SENT1 register is not 0 the Sent-1 timer counts. When the Sent-1 timer has a value greater than the PCI_PKTS_SENT_TIME1 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0 register is not 0 the Sent-0 timer counts. When the Sent-0 timer has a value greater than the PCI_PKTS_SENT_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t reserved_19_20 : 2; uint64_t pcnt1 : 1; /**< This bit indicates that PCI_PKTS_SENT1 value is greater than the value in the PCI_PKTS_SENT_INT_LEV1 register. */ uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0 value is greater than the value in the PCI_PKTS_SENT_INT_LEV0 register. */ uint64_t rsl_int : 1; /**< This bit is set when the mio_pci_inta_dr wire is asserted by the MIO */ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */ uint64_t serr : 1; /**< SERR# detected by PCX Core */ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected N3K (as completer), has encountered an error which prevents the split transaction from completing. In this event, the N3K (as completer), sends a SCM (Split Completion Message) to the initiator. See: PCIX Spec v1.0a Fig 2-40. [31:28]: Message Class = 2(completer error) [27:20]: Message Index = 0x80 [18:12]: Remaining Lower Address [11:0]: Remaining Byte Count */ uint64_t msc_msg : 1; /**< Master Split Completion Message (SCM) Detected for either a Split-Read/Write error case. Set if: a) A Split-Write SCM is detected with SCE=1. b) A Split-Read SCM is detected (regardless of SCE status). The Split completion message(SCM) is also latched into the PCI_SCM_REG[SCM] to assist SW with error recovery. */ uint64_t msi_mabt : 1; /**< PCI Master Abort on Master MSI */ uint64_t msi_tabt : 1; /**< PCI Target-Abort on Master MSI */ uint64_t msi_per : 1; /**< PCI Parity Error on Master MSI */ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Master-Read */ uint64_t mr_abt : 1; /**< PCI Master Abort On Master-Read */ uint64_t tr_abt : 1; /**< PCI Target Abort On Master-Read */ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on Master-write */ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on Master-write */ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on Master-write */ #else uint64_t tr_wabt : 1; uint64_t mr_wabt : 1; uint64_t mr_wtto : 1; uint64_t tr_abt : 1; uint64_t mr_abt : 1; uint64_t mr_tto : 1; uint64_t msi_per : 1; uint64_t msi_tabt : 1; uint64_t msi_mabt : 1; uint64_t msc_msg : 1; uint64_t tsr_abt : 1; uint64_t serr : 1; uint64_t aperr : 1; uint64_t dperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t rsl_int : 1; uint64_t pcnt0 : 1; uint64_t pcnt1 : 1; uint64_t reserved_19_20 : 2; uint64_t ptime0 : 1; uint64_t ptime1 : 1; uint64_t reserved_23_24 : 2; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } cn31xx; struct cvmx_pci_int_sum_s cn38xx; struct cvmx_pci_int_sum_s cn38xxp2; struct cvmx_pci_int_sum_cn31xx cn50xx; struct cvmx_pci_int_sum_s cn58xx; struct cvmx_pci_int_sum_s cn58xxp1; } cvmx_pci_int_sum_t; /** * cvmx_pci_int_sum2 * * PCI_INT_SUM2 = PCI Interrupt Summary2 Register * * The PCI Interrupt Summary2 Register copy used for RSL interrupts. */ typedef union { uint64_t u64; struct cvmx_pci_int_sum2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or Read-Address Register took place. */ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 1. */ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 0. */ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1 register is not 0 the DMA_CNT1 timer counts. When the DMA1_CNT timer has a value greater than the PCI_DMA_TIME1 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0 register is not 0 the DMA_CNT0 timer counts. When the DMA0_CNT timer has a value greater than the PCI_DMA_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1 value is greater than the value in the PCI_DMA_INT_LEV1 register. */ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0 value is greater than the value in the PCI_DMA_INT_LEV0 register. */ uint64_t ptime3 : 1; /**< When the value in the PCI_PKTS_SENT3 register is not 0 the Sent-3 timer counts. When the Sent-3 timer has a value greater than the PCI_PKTS_SENT_TIME3 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t ptime2 : 1; /**< When the value in the PCI_PKTS_SENT2 register is not 0 the Sent-2 timer counts. When the Sent-2 timer has a value greater than the PCI_PKTS_SENT_TIME2 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t ptime1 : 1; /**< When the value in the PCI_PKTS_SENT1 register is not 0 the Sent-1 timer counts. When the Sent-1 timer has a value greater than the PCI_PKTS_SENT_TIME1 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0 register is not 0 the Sent-0 timer counts. When the Sent-0 timer has a value greater than the PCI_PKTS_SENT_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t pcnt3 : 1; /**< This bit indicates that PCI_PKTS_SENT3 value is greater than the value in the PCI_PKTS_SENT_INT_LEV3 register. */ uint64_t pcnt2 : 1; /**< This bit indicates that PCI_PKTS_SENT2 value is greater than the value in the PCI_PKTS_SENT_INT_LEV2 register. */ uint64_t pcnt1 : 1; /**< This bit indicates that PCI_PKTS_SENT1 value is greater than the value in the PCI_PKTS_SENT_INT_LEV1 register. */ uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0 value is greater than the value in the PCI_PKTS_SENT_INT_LEV0 register. */ uint64_t rsl_int : 1; /**< This bit is set when the RSL Chain has generated an interrupt. */ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */ uint64_t serr : 1; /**< SERR# detected by PCX Core */ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected */ uint64_t msc_msg : 1; /**< Master Split Completion Message Detected */ uint64_t msi_mabt : 1; /**< PCI MSI Master Abort. */ uint64_t msi_tabt : 1; /**< PCI MSI Target Abort. */ uint64_t msi_per : 1; /**< PCI MSI Parity Error. */ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Read. */ uint64_t mr_abt : 1; /**< PCI Master Abort On Read. */ uint64_t tr_abt : 1; /**< PCI Target Abort On Read. */ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on write. */ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on write. */ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on write. */ #else uint64_t tr_wabt : 1; uint64_t mr_wabt : 1; uint64_t mr_wtto : 1; uint64_t tr_abt : 1; uint64_t mr_abt : 1; uint64_t mr_tto : 1; uint64_t msi_per : 1; uint64_t msi_tabt : 1; uint64_t msi_mabt : 1; uint64_t msc_msg : 1; uint64_t tsr_abt : 1; uint64_t serr : 1; uint64_t aperr : 1; uint64_t dperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t rsl_int : 1; uint64_t pcnt0 : 1; uint64_t pcnt1 : 1; uint64_t pcnt2 : 1; uint64_t pcnt3 : 1; uint64_t ptime0 : 1; uint64_t ptime1 : 1; uint64_t ptime2 : 1; uint64_t ptime3 : 1; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } s; struct cvmx_pci_int_sum2_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or Read-Address Register took place. */ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 1. */ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 0. */ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1 register is not 0 the DMA_CNT1 timer counts. When the DMA1_CNT timer has a value greater than the PCI_DMA_TIME1 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0 register is not 0 the DMA_CNT0 timer counts. When the DMA0_CNT timer has a value greater than the PCI_DMA_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1 value is greater than the value in the PCI_DMA_INT_LEV1 register. */ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0 value is greater than the value in the PCI_DMA_INT_LEV0 register. */ uint64_t reserved_22_24 : 3; uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0 register is not 0 the Sent-0 timer counts. When the Sent-0 timer has a value greater than the PCI_PKTS_SENT_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t reserved_18_20 : 3; uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0 value is greater than the value in the PCI_PKTS_SENT_INT_LEV0 register. */ uint64_t rsl_int : 1; /**< This bit is set when the RSL Chain has generated an interrupt. */ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */ uint64_t serr : 1; /**< SERR# detected by PCX Core */ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected */ uint64_t msc_msg : 1; /**< Master Split Completion Message Detected */ uint64_t msi_mabt : 1; /**< PCI MSI Master Abort. */ uint64_t msi_tabt : 1; /**< PCI MSI Target Abort. */ uint64_t msi_per : 1; /**< PCI MSI Parity Error. */ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Read. */ uint64_t mr_abt : 1; /**< PCI Master Abort On Read. */ uint64_t tr_abt : 1; /**< PCI Target Abort On Read. */ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on write. */ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on write. */ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on write. */ #else uint64_t tr_wabt : 1; uint64_t mr_wabt : 1; uint64_t mr_wtto : 1; uint64_t tr_abt : 1; uint64_t mr_abt : 1; uint64_t mr_tto : 1; uint64_t msi_per : 1; uint64_t msi_tabt : 1; uint64_t msi_mabt : 1; uint64_t msc_msg : 1; uint64_t tsr_abt : 1; uint64_t serr : 1; uint64_t aperr : 1; uint64_t dperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t rsl_int : 1; uint64_t pcnt0 : 1; uint64_t reserved_18_20 : 3; uint64_t ptime0 : 1; uint64_t reserved_22_24 : 3; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } cn30xx; struct cvmx_pci_int_sum2_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_34_63 : 30; uint64_t ill_rd : 1; /**< A read to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t ill_wr : 1; /**< A write to a disabled area of bar1 or bar2, when the mem area is disabled. */ uint64_t win_wr : 1; /**< A write to the disabled Window Write Data or Read-Address Register took place. */ uint64_t dma1_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 1. */ uint64_t dma0_fi : 1; /**< A DMA operation operation finished that was required to set the FORCE-INT bit for counter 0. */ uint64_t dtime1 : 1; /**< When the value in the PCI_DMA_CNT1 register is not 0 the DMA_CNT1 timer counts. When the DMA1_CNT timer has a value greater than the PCI_DMA_TIME1 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dtime0 : 1; /**< When the value in the PCI_DMA_CNT0 register is not 0 the DMA_CNT0 timer counts. When the DMA0_CNT timer has a value greater than the PCI_DMA_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t dcnt1 : 1; /**< This bit indicates that PCI_DMA_CNT1 value is greater than the value in the PCI_DMA_INT_LEV1 register. */ uint64_t dcnt0 : 1; /**< This bit indicates that PCI_DMA_CNT0 value is greater than the value in the PCI_DMA_INT_LEV0 register. */ uint64_t reserved_23_24 : 2; uint64_t ptime1 : 1; /**< When the value in the PCI_PKTS_SENT1 register is not 0 the Sent-1 timer counts. When the Sent-1 timer has a value greater than the PCI_PKTS_SENT_TIME1 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t ptime0 : 1; /**< When the value in the PCI_PKTS_SENT0 register is not 0 the Sent-0 timer counts. When the Sent-0 timer has a value greater than the PCI_PKTS_SENT_TIME0 register this bit is set. The timer is reset when bit is written with a one. */ uint64_t reserved_19_20 : 2; uint64_t pcnt1 : 1; /**< This bit indicates that PCI_PKTS_SENT1 value is greater than the value in the PCI_PKTS_SENT_INT_LEV1 register. */ uint64_t pcnt0 : 1; /**< This bit indicates that PCI_PKTS_SENT0 value is greater than the value in the PCI_PKTS_SENT_INT_LEV0 register. */ uint64_t rsl_int : 1; /**< This bit is set when the RSL Chain has generated an interrupt. */ uint64_t ill_rrd : 1; /**< A read to the disabled PCI registers took place. */ uint64_t ill_rwr : 1; /**< A write to the disabled PCI registers took place. */ uint64_t dperr : 1; /**< Data Parity Error detected by PCX Core */ uint64_t aperr : 1; /**< Address Parity Error detected by PCX Core */ uint64_t serr : 1; /**< SERR# detected by PCX Core */ uint64_t tsr_abt : 1; /**< Target Split-Read Abort Detected */ uint64_t msc_msg : 1; /**< Master Split Completion Message Detected */ uint64_t msi_mabt : 1; /**< PCI MSI Master Abort. */ uint64_t msi_tabt : 1; /**< PCI MSI Target Abort. */ uint64_t msi_per : 1; /**< PCI MSI Parity Error. */ uint64_t mr_tto : 1; /**< PCI Master Retry Timeout On Read. */ uint64_t mr_abt : 1; /**< PCI Master Abort On Read. */ uint64_t tr_abt : 1; /**< PCI Target Abort On Read. */ uint64_t mr_wtto : 1; /**< PCI Master Retry Timeout on write. */ uint64_t mr_wabt : 1; /**< PCI Master Abort detected on write. */ uint64_t tr_wabt : 1; /**< PCI Target Abort detected on write. */ #else uint64_t tr_wabt : 1; uint64_t mr_wabt : 1; uint64_t mr_wtto : 1; uint64_t tr_abt : 1; uint64_t mr_abt : 1; uint64_t mr_tto : 1; uint64_t msi_per : 1; uint64_t msi_tabt : 1; uint64_t msi_mabt : 1; uint64_t msc_msg : 1; uint64_t tsr_abt : 1; uint64_t serr : 1; uint64_t aperr : 1; uint64_t dperr : 1; uint64_t ill_rwr : 1; uint64_t ill_rrd : 1; uint64_t rsl_int : 1; uint64_t pcnt0 : 1; uint64_t pcnt1 : 1; uint64_t reserved_19_20 : 2; uint64_t ptime0 : 1; uint64_t ptime1 : 1; uint64_t reserved_23_24 : 2; uint64_t dcnt0 : 1; uint64_t dcnt1 : 1; uint64_t dtime0 : 1; uint64_t dtime1 : 1; uint64_t dma0_fi : 1; uint64_t dma1_fi : 1; uint64_t win_wr : 1; uint64_t ill_wr : 1; uint64_t ill_rd : 1; uint64_t reserved_34_63 : 30; #endif } cn31xx; struct cvmx_pci_int_sum2_s cn38xx; struct cvmx_pci_int_sum2_s cn38xxp2; struct cvmx_pci_int_sum2_cn31xx cn50xx; struct cvmx_pci_int_sum2_s cn58xx; struct cvmx_pci_int_sum2_s cn58xxp1; } cvmx_pci_int_sum2_t; /** * cvmx_pci_msi_rcv * * PCI_MSI_RCV = PCI's MSI Received Vector Register * * A bit is set in this register relative to the vector received during a MSI. The value in this * register is acted upon when the least-significant-byte of this register is written. */ typedef union { uint32_t u32; struct cvmx_pci_msi_rcv_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_6_31 : 26; uint32_t intr : 6; /**< When an MSI is received on the PCI the bit selected by data [5:0] will be set in this register. To clear this bit a write must take place to the NPI_MSI_RCV register where any bit set to 1 is cleared. Reading this address will return an unpredicatable value. */ #else uint32_t intr : 6; uint32_t reserved_6_31 : 26; #endif } s; struct cvmx_pci_msi_rcv_s cn30xx; struct cvmx_pci_msi_rcv_s cn31xx; struct cvmx_pci_msi_rcv_s cn38xx; struct cvmx_pci_msi_rcv_s cn38xxp2; struct cvmx_pci_msi_rcv_s cn50xx; struct cvmx_pci_msi_rcv_s cn58xx; struct cvmx_pci_msi_rcv_s cn58xxp1; } cvmx_pci_msi_rcv_t; /** * cvmx_pci_pkt_credits# * * PCI_PKT_CREDITS0 = PCI Packet Credits For Output 0 * * Used to decrease the number of packets to be processed by the host from Output-0 and return * buffer/info pointer pairs to OCTEON Output-0. The value in this register is acted upon when the * least-significant-byte of this register is written. */ typedef union { uint32_t u32; struct cvmx_pci_pkt_creditsx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pkt_cnt : 16; /**< The value written to this field will be subtracted from PCI_PKTS_SENT0[PKT_CNT]. */ uint32_t ptr_cnt : 16; /**< This field value is added to the NPI's internal Buffer/Info Pointer Pair count. */ #else uint32_t ptr_cnt : 16; uint32_t pkt_cnt : 16; #endif } s; struct cvmx_pci_pkt_creditsx_s cn30xx; struct cvmx_pci_pkt_creditsx_s cn31xx; struct cvmx_pci_pkt_creditsx_s cn38xx; struct cvmx_pci_pkt_creditsx_s cn38xxp2; struct cvmx_pci_pkt_creditsx_s cn50xx; struct cvmx_pci_pkt_creditsx_s cn58xx; struct cvmx_pci_pkt_creditsx_s cn58xxp1; } cvmx_pci_pkt_creditsx_t; /** * cvmx_pci_pkts_sent# * * PCI_PKTS_SENT0 = PCI Packets Sent 0 * * Number of packets sent to the host memory from PCI Output 0 */ typedef union { uint32_t u32; struct cvmx_pci_pkts_sentx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pkt_cnt : 32; /**< Each time a packet is written to the memory via PCI from PCI Output 0, this counter is incremented by 1 or the byte count of the packet as set in NPI_OUTPUT_CONTROL[P0_BMODE]. */ #else uint32_t pkt_cnt : 32; #endif } s; struct cvmx_pci_pkts_sentx_s cn30xx; struct cvmx_pci_pkts_sentx_s cn31xx; struct cvmx_pci_pkts_sentx_s cn38xx; struct cvmx_pci_pkts_sentx_s cn38xxp2; struct cvmx_pci_pkts_sentx_s cn50xx; struct cvmx_pci_pkts_sentx_s cn58xx; struct cvmx_pci_pkts_sentx_s cn58xxp1; } cvmx_pci_pkts_sentx_t; /** * cvmx_pci_pkts_sent_int_lev# * * PCI_PKTS_SENT_INT_LEV0 = PCI Packets Sent Interrupt Level For Output 0 * * Interrupt when number of packets sent is equal to or greater than the register value. */ typedef union { uint32_t u32; struct cvmx_pci_pkts_sent_int_levx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pkt_cnt : 32; /**< When corresponding port's PCI_PKTS_SENT0 value exceeds the value in this register, PCNT0 of the PCI_INT_SUM and PCI_INT_SUM2 will be set. */ #else uint32_t pkt_cnt : 32; #endif } s; struct cvmx_pci_pkts_sent_int_levx_s cn30xx; struct cvmx_pci_pkts_sent_int_levx_s cn31xx; struct cvmx_pci_pkts_sent_int_levx_s cn38xx; struct cvmx_pci_pkts_sent_int_levx_s cn38xxp2; struct cvmx_pci_pkts_sent_int_levx_s cn50xx; struct cvmx_pci_pkts_sent_int_levx_s cn58xx; struct cvmx_pci_pkts_sent_int_levx_s cn58xxp1; } cvmx_pci_pkts_sent_int_levx_t; /** * cvmx_pci_pkts_sent_time# * * PCI_PKTS_SENT_TIME0 = PCI Packets Sent Timer For Output-0 * * Time to wait from packet being sent to host from Output-0 before issuing an interrupt. */ typedef union { uint32_t u32; struct cvmx_pci_pkts_sent_timex_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pkt_time : 32; /**< Number of PCI clock cycle to wait before issuing an interrupt to the host when a packet from this port has been sent to the host. The timer is reset when the PCI_INT_SUM[21] register is cleared. */ #else uint32_t pkt_time : 32; #endif } s; struct cvmx_pci_pkts_sent_timex_s cn30xx; struct cvmx_pci_pkts_sent_timex_s cn31xx; struct cvmx_pci_pkts_sent_timex_s cn38xx; struct cvmx_pci_pkts_sent_timex_s cn38xxp2; struct cvmx_pci_pkts_sent_timex_s cn50xx; struct cvmx_pci_pkts_sent_timex_s cn58xx; struct cvmx_pci_pkts_sent_timex_s cn58xxp1; } cvmx_pci_pkts_sent_timex_t; /** * cvmx_pci_read_cmd_6 * * PCI_READ_CMD_6 = PCI Read Command 6 Register * * Contains control inforamtion related to a received PCI Command 6. */ typedef union { uint32_t u32; struct cvmx_pci_read_cmd_6_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_9_31 : 23; uint32_t min_data : 6; /**< The number of words to have buffered in the PNI before informing the PCIX-Core that we have read data available for the outstanding Delayed read. 0 is treated as a 64. For reads to the expansion this value is not used. */ uint32_t prefetch : 3; /**< Control the amount of data to be preteched when this type of bhmstREAD command is received. 0 = 1 32/64 bit word. 1 = From address to end of 128B block. 2 = From address to end of 128B block plus 128B. 3 = From address to end of 128B block plus 256B. 4 = From address to end of 128B block plus 384B. For reads to the expansion this value is not used. */ #else uint32_t prefetch : 3; uint32_t min_data : 6; uint32_t reserved_9_31 : 23; #endif } s; struct cvmx_pci_read_cmd_6_s cn30xx; struct cvmx_pci_read_cmd_6_s cn31xx; struct cvmx_pci_read_cmd_6_s cn38xx; struct cvmx_pci_read_cmd_6_s cn38xxp2; struct cvmx_pci_read_cmd_6_s cn50xx; struct cvmx_pci_read_cmd_6_s cn58xx; struct cvmx_pci_read_cmd_6_s cn58xxp1; } cvmx_pci_read_cmd_6_t; /** * cvmx_pci_read_cmd_c * * PCI_READ_CMD_C = PCI Read Command C Register * * Contains control inforamtion related to a received PCI Command C. */ typedef union { uint32_t u32; struct cvmx_pci_read_cmd_c_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_9_31 : 23; uint32_t min_data : 6; /**< The number of words to have buffered in the PNI before informing the PCIX-Core that we have read data available for the outstanding Delayed read. 0 is treated as a 64. For reads to the expansion this value is not used. */ uint32_t prefetch : 3; /**< Control the amount of data to be preteched when this type of READ command is received. 0 = 1 32/64 bit word. 1 = From address to end of 128B block. 2 = From address to end of 128B block plus 128B. 3 = From address to end of 128B block plus 256B. 4 = From address to end of 128B block plus 384B. For reads to the expansion this value is not used. */ #else uint32_t prefetch : 3; uint32_t min_data : 6; uint32_t reserved_9_31 : 23; #endif } s; struct cvmx_pci_read_cmd_c_s cn30xx; struct cvmx_pci_read_cmd_c_s cn31xx; struct cvmx_pci_read_cmd_c_s cn38xx; struct cvmx_pci_read_cmd_c_s cn38xxp2; struct cvmx_pci_read_cmd_c_s cn50xx; struct cvmx_pci_read_cmd_c_s cn58xx; struct cvmx_pci_read_cmd_c_s cn58xxp1; } cvmx_pci_read_cmd_c_t; /** * cvmx_pci_read_cmd_e * * PCI_READ_CMD_E = PCI Read Command E Register * * Contains control inforamtion related to a received PCI Command 6. */ typedef union { uint32_t u32; struct cvmx_pci_read_cmd_e_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_9_31 : 23; uint32_t min_data : 6; /**< The number of words to have buffered in the PNI before informaing the PCIX-Core that we have read data available for the outstanding Delayed read. 0 is treated as a 64. For reads to the expansion this value is not used. */ uint32_t prefetch : 3; /**< Control the amount of data to be preteched when this type of READ command is received. 0 = 1 32/64 bit word. 1 = From address to end of 128B block. 2 = From address to end of 128B block plus 128B. 3 = From address to end of 128B block plus 256B. 4 = From address to end of 128B block plus 384B. For reads to the expansion this value is not used. */ #else uint32_t prefetch : 3; uint32_t min_data : 6; uint32_t reserved_9_31 : 23; #endif } s; struct cvmx_pci_read_cmd_e_s cn30xx; struct cvmx_pci_read_cmd_e_s cn31xx; struct cvmx_pci_read_cmd_e_s cn38xx; struct cvmx_pci_read_cmd_e_s cn38xxp2; struct cvmx_pci_read_cmd_e_s cn50xx; struct cvmx_pci_read_cmd_e_s cn58xx; struct cvmx_pci_read_cmd_e_s cn58xxp1; } cvmx_pci_read_cmd_e_t; /** * cvmx_pci_read_timeout * * PCI_READ_TIMEOUT = PCI Read Timeour Register * * The address to start reading Instructions from for Input-3. */ typedef union { uint64_t u64; struct cvmx_pci_read_timeout_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t enb : 1; /**< Enable the use of the Timeout function. */ uint64_t cnt : 31; /**< The number of eclk cycles to wait after issuing a read request to the PNI before setting a timeout and not expecting the data to return. This is considered a fatal condition by the NPI. */ #else uint64_t cnt : 31; uint64_t enb : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pci_read_timeout_s cn30xx; struct cvmx_pci_read_timeout_s cn31xx; struct cvmx_pci_read_timeout_s cn38xx; struct cvmx_pci_read_timeout_s cn38xxp2; struct cvmx_pci_read_timeout_s cn50xx; struct cvmx_pci_read_timeout_s cn58xx; struct cvmx_pci_read_timeout_s cn58xxp1; } cvmx_pci_read_timeout_t; /** * cvmx_pci_scm_reg * * PCI_SCM_REG = PCI Master Split Completion Message Register * * This register contains the Master Split Completion Message(SCM) generated when a master split * transaction is aborted. */ typedef union { uint64_t u64; struct cvmx_pci_scm_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t scm : 32; /**< Contains the Split Completion Message (SCM) driven when a master-split transaction is aborted. [31:28]: Message Class [27:20]: Message Index [19]: Reserved [18:12]: Remaining Lower Address [11:8]: Upper Remaining Byte Count [7:0]: Lower Remaining Byte Count Refer to the PCIX1.0a specification, Fig 2-40 for additional details for the split completion message format. */ #else uint64_t scm : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pci_scm_reg_s cn30xx; struct cvmx_pci_scm_reg_s cn31xx; struct cvmx_pci_scm_reg_s cn38xx; struct cvmx_pci_scm_reg_s cn38xxp2; struct cvmx_pci_scm_reg_s cn50xx; struct cvmx_pci_scm_reg_s cn58xx; struct cvmx_pci_scm_reg_s cn58xxp1; } cvmx_pci_scm_reg_t; /** * cvmx_pci_tsr_reg * * PCI_TSR_REG = PCI Target Split Attribute Register * * This register contains the Attribute field Master Split Completion Message(SCM) generated when a master split * transaction is aborted. */ typedef union { uint64_t u64; struct cvmx_pci_tsr_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t tsr : 36; /**< Contains the Target Split Attribute field when a target-split transaction is aborted. [35:32]: Upper Byte Count [31]: BCM=Byte Count Modified [30]: SCE=Split Completion Error [29]: SCM=Split Completion Message [28:24]: RESERVED [23:16]: Completer Bus Number [15:11]: Completer Device Number [10:8]: Completer Function Number [7:0]: Lower Byte Count Refer to the PCIX1.0a specification, Fig 2-39 for additional details on the completer attribute bit assignments. */ #else uint64_t tsr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_pci_tsr_reg_s cn30xx; struct cvmx_pci_tsr_reg_s cn31xx; struct cvmx_pci_tsr_reg_s cn38xx; struct cvmx_pci_tsr_reg_s cn38xxp2; struct cvmx_pci_tsr_reg_s cn50xx; struct cvmx_pci_tsr_reg_s cn58xx; struct cvmx_pci_tsr_reg_s cn58xxp1; } cvmx_pci_tsr_reg_t; /** * cvmx_pci_win_rd_addr * * PCI_WIN_RD_ADDR = PCI Window Read Address Register * * Writing the least-significant-byte of this register will cause a read operation to take place, * UNLESS, a read operation is already taking place. A read is consider to end when the PCI_WIN_RD_DATA * register is read. */ typedef union { uint64_t u64; struct cvmx_pci_win_rd_addr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always read as '0'. */ uint64_t reserved_0_47 : 48; #else uint64_t reserved_0_47 : 48; uint64_t iobit : 1; uint64_t reserved_49_63 : 15; #endif } s; struct cvmx_pci_win_rd_addr_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always read as '0'. */ uint64_t rd_addr : 46; /**< The address to be read from. Whenever the LSB of this register is written, the Read Operation will take place. [47:40] = NCB_ID [39:3] = Address When [47:43] == NPI & [42:0] == 0 bits [39:0] are: [39:32] == x, Not Used [31:27] == RSL_ID [12:2] == RSL Register Offset [1:0] == x, Not Used */ uint64_t reserved_0_1 : 2; #else uint64_t reserved_0_1 : 2; uint64_t rd_addr : 46; uint64_t iobit : 1; uint64_t reserved_49_63 : 15; #endif } cn30xx; struct cvmx_pci_win_rd_addr_cn30xx cn31xx; struct cvmx_pci_win_rd_addr_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always read as '0'. */ uint64_t rd_addr : 45; /**< The address to be read from. Whenever the LSB of this register is written, the Read Operation will take place. [47:40] = NCB_ID [39:3] = Address When [47:43] == NPI & [42:0] == 0 bits [39:0] are: [39:32] == x, Not Used [31:27] == RSL_ID [12:3] == RSL Register Offset [2:0] == x, Not Used */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t rd_addr : 45; uint64_t iobit : 1; uint64_t reserved_49_63 : 15; #endif } cn38xx; struct cvmx_pci_win_rd_addr_cn38xx cn38xxp2; struct cvmx_pci_win_rd_addr_cn30xx cn50xx; struct cvmx_pci_win_rd_addr_cn38xx cn58xx; struct cvmx_pci_win_rd_addr_cn38xx cn58xxp1; } cvmx_pci_win_rd_addr_t; /** * cvmx_pci_win_rd_data * * PCI_WIN_RD_DATA = PCI Window Read Data Register * * Contains the result from the read operation that took place when the LSB of the PCI_WIN_RD_ADDR * register was written. */ typedef union { uint64_t u64; struct cvmx_pci_win_rd_data_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t rd_data : 64; /**< The read data. */ #else uint64_t rd_data : 64; #endif } s; struct cvmx_pci_win_rd_data_s cn30xx; struct cvmx_pci_win_rd_data_s cn31xx; struct cvmx_pci_win_rd_data_s cn38xx; struct cvmx_pci_win_rd_data_s cn38xxp2; struct cvmx_pci_win_rd_data_s cn50xx; struct cvmx_pci_win_rd_data_s cn58xx; struct cvmx_pci_win_rd_data_s cn58xxp1; } cvmx_pci_win_rd_data_t; /** * cvmx_pci_win_wr_addr * * PCI_WIN_WR_ADDR = PCI Window Write Address Register * * Contains the address to be writen to when a write operation is started by writing the * PCI_WIN_WR_DATA register (see below). */ typedef union { uint64_t u64; struct cvmx_pci_win_wr_addr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t iobit : 1; /**< A 1 or 0 can be written here but this will always read as '0'. */ uint64_t wr_addr : 45; /**< The address that will be written to when the PCI_WIN_WR_DATA register is written. [47:40] = NCB_ID [39:3] = Address When [47:43] == NPI & [42:0] == 0 bits [39:0] are: [39:32] == x, Not Used [31:27] == RSL_ID [12:3] == RSL Register Offset [2:0] == x, Not Used */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t wr_addr : 45; uint64_t iobit : 1; uint64_t reserved_49_63 : 15; #endif } s; struct cvmx_pci_win_wr_addr_s cn30xx; struct cvmx_pci_win_wr_addr_s cn31xx; struct cvmx_pci_win_wr_addr_s cn38xx; struct cvmx_pci_win_wr_addr_s cn38xxp2; struct cvmx_pci_win_wr_addr_s cn50xx; struct cvmx_pci_win_wr_addr_s cn58xx; struct cvmx_pci_win_wr_addr_s cn58xxp1; } cvmx_pci_win_wr_addr_t; /** * cvmx_pci_win_wr_data * * PCI_WIN_WR_DATA = PCI Window Write Data Register * * Contains the data to write to the address located in the PCI_WIN_WR_ADDR Register. * Writing the least-significant-byte of this register will cause a write operation to take place. */ typedef union { uint64_t u64; struct cvmx_pci_win_wr_data_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t wr_data : 64; /**< The data to be written. Whenever the LSB of this register is written, the Window Write will take place. */ #else uint64_t wr_data : 64; #endif } s; struct cvmx_pci_win_wr_data_s cn30xx; struct cvmx_pci_win_wr_data_s cn31xx; struct cvmx_pci_win_wr_data_s cn38xx; struct cvmx_pci_win_wr_data_s cn38xxp2; struct cvmx_pci_win_wr_data_s cn50xx; struct cvmx_pci_win_wr_data_s cn58xx; struct cvmx_pci_win_wr_data_s cn58xxp1; } cvmx_pci_win_wr_data_t; /** * cvmx_pci_win_wr_mask * * PCI_WIN_WR_MASK = PCI Window Write Mask Register * * Contains the mask for the data in the PCI_WIN_WR_DATA Register. */ typedef union { uint64_t u64; struct cvmx_pci_win_wr_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t wr_mask : 8; /**< The data to be written. When a bit is set '1' the corresponding byte will not be written. */ #else uint64_t wr_mask : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_pci_win_wr_mask_s cn30xx; struct cvmx_pci_win_wr_mask_s cn31xx; struct cvmx_pci_win_wr_mask_s cn38xx; struct cvmx_pci_win_wr_mask_s cn38xxp2; struct cvmx_pci_win_wr_mask_s cn50xx; struct cvmx_pci_win_wr_mask_s cn58xx; struct cvmx_pci_win_wr_mask_s cn58xxp1; } cvmx_pci_win_wr_mask_t; /** * cvmx_pcieep_cfg000 * * PCIE_CFG000 = First 32-bits of PCIE type 0 config space (Device ID and Vendor ID Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg000_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t devid : 16; /**< Device ID, writable through the DBI However, the application must not change this field. For EEPROM loads also see VENDID of this register. */ uint32_t vendid : 16; /**< Vendor ID, writable through the DBI However, the application must not change this field. During and EPROM Load is a value of 0xFFFF is loaded to this field and a value of 0xFFFF is loaded to the DEVID field of this register, the value will not be loaded, EEPROM load will stop, and the FastLinkEnable bit will be set in the PCIE_CFG452 register. */ #else uint32_t vendid : 16; uint32_t devid : 16; #endif } s; struct cvmx_pcieep_cfg000_s cn52xx; struct cvmx_pcieep_cfg000_s cn52xxp1; struct cvmx_pcieep_cfg000_s cn56xx; struct cvmx_pcieep_cfg000_s cn56xxp1; } cvmx_pcieep_cfg000_t; /** * cvmx_pcieep_cfg001 * * PCIE_CFG001 = Second 32-bits of PCIE type 0 config space (Command/Status Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg001_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dpe : 1; /**< Detected Parity Error */ uint32_t sse : 1; /**< Signaled System Error */ uint32_t rma : 1; /**< Received Master Abort */ uint32_t rta : 1; /**< Received Target Abort */ uint32_t sta : 1; /**< Signaled Target Abort */ uint32_t devt : 2; /**< DEVSEL Timing Not applicable for PCI Express. Hardwired to 0. */ uint32_t mdpe : 1; /**< Master Data Parity Error */ uint32_t fbb : 1; /**< Fast Back-to-Back Capable Not applicable for PCI Express. Hardwired to 0. */ uint32_t reserved_22_22 : 1; uint32_t m66 : 1; /**< 66 MHz Capable Not applicable for PCI Express. Hardwired to 0. */ uint32_t cl : 1; /**< Capabilities List Indicates presence of an extended capability item. Hardwired to 1. */ uint32_t i_stat : 1; /**< INTx Status */ uint32_t reserved_11_18 : 8; uint32_t i_dis : 1; /**< INTx Assertion Disable */ uint32_t fbbe : 1; /**< Fast Back-to-Back Enable Not applicable for PCI Express. Must be hardwired to 0. */ uint32_t see : 1; /**< SERR# Enable */ uint32_t ids_wcc : 1; /**< IDSEL Stepping/Wait Cycle Control Not applicable for PCI Express. Must be hardwired to 0 */ uint32_t per : 1; /**< Parity Error Response */ uint32_t vps : 1; /**< VGA Palette Snoop Not applicable for PCI Express. Must be hardwired to 0. */ uint32_t mwice : 1; /**< Memory Write and Invalidate Not applicable for PCI Express. Must be hardwired to 0. */ uint32_t scse : 1; /**< Special Cycle Enable Not applicable for PCI Express. Must be hardwired to 0. */ uint32_t me : 1; /**< Bus Master Enable */ uint32_t msae : 1; /**< Memory Space Enable */ uint32_t isae : 1; /**< I/O Space Enable */ #else uint32_t isae : 1; uint32_t msae : 1; uint32_t me : 1; uint32_t scse : 1; uint32_t mwice : 1; uint32_t vps : 1; uint32_t per : 1; uint32_t ids_wcc : 1; uint32_t see : 1; uint32_t fbbe : 1; uint32_t i_dis : 1; uint32_t reserved_11_18 : 8; uint32_t i_stat : 1; uint32_t cl : 1; uint32_t m66 : 1; uint32_t reserved_22_22 : 1; uint32_t fbb : 1; uint32_t mdpe : 1; uint32_t devt : 2; uint32_t sta : 1; uint32_t rta : 1; uint32_t rma : 1; uint32_t sse : 1; uint32_t dpe : 1; #endif } s; struct cvmx_pcieep_cfg001_s cn52xx; struct cvmx_pcieep_cfg001_s cn52xxp1; struct cvmx_pcieep_cfg001_s cn56xx; struct cvmx_pcieep_cfg001_s cn56xxp1; } cvmx_pcieep_cfg001_t; /** * cvmx_pcieep_cfg002 * * PCIE_CFG002 = Third 32-bits of PCIE type 0 config space (Revision ID/Class Code Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg002_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t bcc : 8; /**< Base Class Code, writable through the DBI However, the application must not change this field. */ uint32_t sc : 8; /**< Subclass Code, writable through the DBI However, the application must not change this field. */ uint32_t pi : 8; /**< Programming Interface, writable through the DBI However, the application must not change this field. */ uint32_t rid : 8; /**< Revision ID, writable through the DBI However, the application must not change this field. */ #else uint32_t rid : 8; uint32_t pi : 8; uint32_t sc : 8; uint32_t bcc : 8; #endif } s; struct cvmx_pcieep_cfg002_s cn52xx; struct cvmx_pcieep_cfg002_s cn52xxp1; struct cvmx_pcieep_cfg002_s cn56xx; struct cvmx_pcieep_cfg002_s cn56xxp1; } cvmx_pcieep_cfg002_t; /** * cvmx_pcieep_cfg003 * * PCIE_CFG003 = Fourth 32-bits of PCIE type 0 config space (Cache Line Size/Master Latency Timer/Header Type Register/BIST Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg003_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t bist : 8; /**< The BIST register functions are not supported. All 8 bits of the BIST register are hardwired to 0. */ uint32_t mfd : 1; /**< Multi Function Device The Multi Function Device bit is writable through the DBI. However, this is a single function device. Therefore, the application must not write a 1 to this bit. */ uint32_t chf : 7; /**< Configuration Header Format Hardwired to 0 for type 0. */ uint32_t lt : 8; /**< Master Latency Timer Not applicable for PCI Express, hardwired to 0. */ uint32_t cls : 8; /**< Cache Line Size The Cache Line Size register is RW for legacy compatibility purposes and is not applicable to PCI Express device functionality. Writing to the Cache Line Size register does not impact functionality. */ #else uint32_t cls : 8; uint32_t lt : 8; uint32_t chf : 7; uint32_t mfd : 1; uint32_t bist : 8; #endif } s; struct cvmx_pcieep_cfg003_s cn52xx; struct cvmx_pcieep_cfg003_s cn52xxp1; struct cvmx_pcieep_cfg003_s cn56xx; struct cvmx_pcieep_cfg003_s cn56xxp1; } cvmx_pcieep_cfg003_t; /** * cvmx_pcieep_cfg004 * * PCIE_CFG004 = Fifth 32-bits of PCIE type 0 config space (Base Address Register 0 - Low) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg004_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lbab : 18; /**< Lower bits of the BAR 0 base address */ uint32_t reserved_4_13 : 10; uint32_t pf : 1; /**< Prefetchable This field is writable through the DBI. However, the application must not change this field. */ uint32_t typ : 2; /**< BAR type o 00 = 32-bit BAR o 10 = 64-bit BAR This field is writable through the DBI. However, the application must not change this field. */ uint32_t mspc : 1; /**< Memory Space Indicator o 0 = BAR 0 is a memory BAR o 1 = BAR 0 is an I/O BAR This field is writable through the DBI. However, the application must not change this field. */ #else uint32_t mspc : 1; uint32_t typ : 2; uint32_t pf : 1; uint32_t reserved_4_13 : 10; uint32_t lbab : 18; #endif } s; struct cvmx_pcieep_cfg004_s cn52xx; struct cvmx_pcieep_cfg004_s cn52xxp1; struct cvmx_pcieep_cfg004_s cn56xx; struct cvmx_pcieep_cfg004_s cn56xxp1; } cvmx_pcieep_cfg004_t; /** * cvmx_pcieep_cfg004_mask * * PCIE_CFG004_MASK (BAR Mask 0 - Low) * The BAR 0 Mask register is invisible to host software and not readable from the application. * The BAR 0 Mask register is only writable through the DBI. */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg004_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lmask : 31; /**< Bar Mask Low */ uint32_t enb : 1; /**< Bar Enable o 0: BAR 0 is disabled o 1: BAR 0 is enabled Bit 0 is interpreted as BAR Enable when writing to the BAR Mask register rather than as a mask bit because bit 0 of a BAR is always masked from writing by host software. Bit 0 must be written prior to writing the other mask bits. */ #else uint32_t enb : 1; uint32_t lmask : 31; #endif } s; struct cvmx_pcieep_cfg004_mask_s cn52xx; struct cvmx_pcieep_cfg004_mask_s cn52xxp1; struct cvmx_pcieep_cfg004_mask_s cn56xx; struct cvmx_pcieep_cfg004_mask_s cn56xxp1; } cvmx_pcieep_cfg004_mask_t; /** * cvmx_pcieep_cfg005 * * PCIE_CFG005 = Sixth 32-bits of PCIE type 0 config space (Base Address Register 0 - High) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg005_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ubab : 32; /**< Contains the upper 32 bits of the BAR 0 base address. */ #else uint32_t ubab : 32; #endif } s; struct cvmx_pcieep_cfg005_s cn52xx; struct cvmx_pcieep_cfg005_s cn52xxp1; struct cvmx_pcieep_cfg005_s cn56xx; struct cvmx_pcieep_cfg005_s cn56xxp1; } cvmx_pcieep_cfg005_t; /** * cvmx_pcieep_cfg005_mask * * PCIE_CFG005_MASK = (BAR Mask 0 - High) * The BAR 0 Mask register is invisible to host software and not readable from the application. * The BAR 0 Mask register is only writable through the DBI. */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg005_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t umask : 32; /**< Bar Mask High */ #else uint32_t umask : 32; #endif } s; struct cvmx_pcieep_cfg005_mask_s cn52xx; struct cvmx_pcieep_cfg005_mask_s cn52xxp1; struct cvmx_pcieep_cfg005_mask_s cn56xx; struct cvmx_pcieep_cfg005_mask_s cn56xxp1; } cvmx_pcieep_cfg005_mask_t; /** * cvmx_pcieep_cfg006 * * PCIE_CFG006 = Seventh 32-bits of PCIE type 0 config space (Base Address Register 1 - Low) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg006_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lbab : 6; /**< Lower bits of the BAR 1 base address */ uint32_t reserved_4_25 : 22; uint32_t pf : 1; /**< Prefetchable This field is writable through the DBI. However, the application must not change this field. */ uint32_t typ : 2; /**< BAR type o 00 = 32-bit BAR o 10 = 64-bit BAR This field is writable through the DBI. However, the application must not change this field. */ uint32_t mspc : 1; /**< Memory Space Indicator o 0 = BAR 0 is a memory BAR o 1 = BAR 0 is an I/O BAR This field is writable through the DBI. However, the application must not change this field. */ #else uint32_t mspc : 1; uint32_t typ : 2; uint32_t pf : 1; uint32_t reserved_4_25 : 22; uint32_t lbab : 6; #endif } s; struct cvmx_pcieep_cfg006_s cn52xx; struct cvmx_pcieep_cfg006_s cn52xxp1; struct cvmx_pcieep_cfg006_s cn56xx; struct cvmx_pcieep_cfg006_s cn56xxp1; } cvmx_pcieep_cfg006_t; /** * cvmx_pcieep_cfg006_mask * * PCIE_CFG006_MASK (BAR Mask 1 - Low) * The BAR 1 Mask register is invisible to host software and not readable from the application. * The BAR 1 Mask register is only writable through the DBI. */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg006_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lmask : 31; /**< Bar Mask Low */ uint32_t enb : 1; /**< Bar Enable o 0: BAR 1 is disabled o 1: BAR 1 is enabled Bit 0 is interpreted as BAR Enable when writing to the BAR Mask register rather than as a mask bit because bit 0 of a BAR is always masked from writing by host software. Bit 0 must be written prior to writing the other mask bits. */ #else uint32_t enb : 1; uint32_t lmask : 31; #endif } s; struct cvmx_pcieep_cfg006_mask_s cn52xx; struct cvmx_pcieep_cfg006_mask_s cn52xxp1; struct cvmx_pcieep_cfg006_mask_s cn56xx; struct cvmx_pcieep_cfg006_mask_s cn56xxp1; } cvmx_pcieep_cfg006_mask_t; /** * cvmx_pcieep_cfg007 * * PCIE_CFG007 = Eighth 32-bits of PCIE type 0 config space (Base Address Register 1 - High) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg007_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ubab : 32; /**< Contains the upper 32 bits of the BAR 1 base address. */ #else uint32_t ubab : 32; #endif } s; struct cvmx_pcieep_cfg007_s cn52xx; struct cvmx_pcieep_cfg007_s cn52xxp1; struct cvmx_pcieep_cfg007_s cn56xx; struct cvmx_pcieep_cfg007_s cn56xxp1; } cvmx_pcieep_cfg007_t; /** * cvmx_pcieep_cfg007_mask * * PCIE_CFG007_MASK (BAR Mask 1 - High) * The BAR 1 Mask register is invisible to host software and not readable from the application. * The BAR 1 Mask register is only writable through the DBI. */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg007_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t umask : 32; /**< Bar Mask High */ #else uint32_t umask : 32; #endif } s; struct cvmx_pcieep_cfg007_mask_s cn52xx; struct cvmx_pcieep_cfg007_mask_s cn52xxp1; struct cvmx_pcieep_cfg007_mask_s cn56xx; struct cvmx_pcieep_cfg007_mask_s cn56xxp1; } cvmx_pcieep_cfg007_mask_t; /** * cvmx_pcieep_cfg008 * * PCIE_CFG008 = Ninth 32-bits of PCIE type 0 config space (Base Address Register 2 - Low) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg008_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_4_31 : 28; uint32_t pf : 1; /**< Prefetchable This field is writable through the DBI. However, the application must not change this field. */ uint32_t typ : 2; /**< BAR type o 00 = 32-bit BAR o 10 = 64-bit BAR This field is writable through the DBI. However, the application must not change this field. */ uint32_t mspc : 1; /**< Memory Space Indicator o 0 = BAR 0 is a memory BAR o 1 = BAR 0 is an I/O BAR This field is writable through the DBI. However, the application must not change this field. */ #else uint32_t mspc : 1; uint32_t typ : 2; uint32_t pf : 1; uint32_t reserved_4_31 : 28; #endif } s; struct cvmx_pcieep_cfg008_s cn52xx; struct cvmx_pcieep_cfg008_s cn52xxp1; struct cvmx_pcieep_cfg008_s cn56xx; struct cvmx_pcieep_cfg008_s cn56xxp1; } cvmx_pcieep_cfg008_t; /** * cvmx_pcieep_cfg008_mask * * PCIE_CFG008_MASK (BAR Mask 2 - Low) * The BAR 2 Mask register is invisible to host software and not readable from the application. * The BAR 2 Mask register is only writable through the DBI. */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg008_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lmask : 31; /**< Bar Mask Low */ uint32_t enb : 1; /**< Bar Enable o 0: BAR 2 is disabled o 1: BAR 2 is enabled Bit 0 is interpreted as BAR Enable when writing to the BAR Mask register rather than as a mask bit because bit 0 of a BAR is always masked from writing by host software. Bit 0 must be written prior to writing the other mask bits. */ #else uint32_t enb : 1; uint32_t lmask : 31; #endif } s; struct cvmx_pcieep_cfg008_mask_s cn52xx; struct cvmx_pcieep_cfg008_mask_s cn52xxp1; struct cvmx_pcieep_cfg008_mask_s cn56xx; struct cvmx_pcieep_cfg008_mask_s cn56xxp1; } cvmx_pcieep_cfg008_mask_t; /** * cvmx_pcieep_cfg009 * * PCIE_CFG009 = Tenth 32-bits of PCIE type 0 config space (Base Address Register 2 - High) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg009_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ubab : 25; /**< Contains the upper 32 bits of the BAR 2 base address. */ uint32_t reserved_0_6 : 7; #else uint32_t reserved_0_6 : 7; uint32_t ubab : 25; #endif } s; struct cvmx_pcieep_cfg009_s cn52xx; struct cvmx_pcieep_cfg009_s cn52xxp1; struct cvmx_pcieep_cfg009_s cn56xx; struct cvmx_pcieep_cfg009_s cn56xxp1; } cvmx_pcieep_cfg009_t; /** * cvmx_pcieep_cfg009_mask * * PCIE_CFG009_MASK (BAR Mask 2 - High) * The BAR 2 Mask register is invisible to host software and not readable from the application. * The BAR 2 Mask register is only writable through the DBI. */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg009_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t umask : 32; /**< Bar Mask High */ #else uint32_t umask : 32; #endif } s; struct cvmx_pcieep_cfg009_mask_s cn52xx; struct cvmx_pcieep_cfg009_mask_s cn52xxp1; struct cvmx_pcieep_cfg009_mask_s cn56xx; struct cvmx_pcieep_cfg009_mask_s cn56xxp1; } cvmx_pcieep_cfg009_mask_t; /** * cvmx_pcieep_cfg010 * * PCIE_CFG010 = Eleventh 32-bits of PCIE type 0 config space (CardBus CIS Pointer Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg010_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t cisp : 32; /**< CardBus CIS Pointer Optional, writable through the DBI. */ #else uint32_t cisp : 32; #endif } s; struct cvmx_pcieep_cfg010_s cn52xx; struct cvmx_pcieep_cfg010_s cn52xxp1; struct cvmx_pcieep_cfg010_s cn56xx; struct cvmx_pcieep_cfg010_s cn56xxp1; } cvmx_pcieep_cfg010_t; /** * cvmx_pcieep_cfg011 * * PCIE_CFG011 = Twelfth 32-bits of PCIE type 0 config space (Subsystem ID and Subsystem Vendor ID Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg011_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ssid : 16; /**< Subsystem ID Assigned by PCI-SIG, writable through the DBI. However, the application must not change this field. */ uint32_t ssvid : 16; /**< Subsystem Vendor ID Assigned by PCI-SIG, writable through the DBI. However, the application must not change this field. */ #else uint32_t ssvid : 16; uint32_t ssid : 16; #endif } s; struct cvmx_pcieep_cfg011_s cn52xx; struct cvmx_pcieep_cfg011_s cn52xxp1; struct cvmx_pcieep_cfg011_s cn56xx; struct cvmx_pcieep_cfg011_s cn56xxp1; } cvmx_pcieep_cfg011_t; /** * cvmx_pcieep_cfg012 * * PCIE_CFG012 = Thirteenth 32-bits of PCIE type 0 config space (Expansion ROM Base Address Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg012_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t eraddr : 16; /**< Expansion ROM Address */ uint32_t reserved_1_15 : 15; uint32_t er_en : 1; /**< Expansion ROM Enable */ #else uint32_t er_en : 1; uint32_t reserved_1_15 : 15; uint32_t eraddr : 16; #endif } s; struct cvmx_pcieep_cfg012_s cn52xx; struct cvmx_pcieep_cfg012_s cn52xxp1; struct cvmx_pcieep_cfg012_s cn56xx; struct cvmx_pcieep_cfg012_s cn56xxp1; } cvmx_pcieep_cfg012_t; /** * cvmx_pcieep_cfg012_mask * * PCIE_CFG012_MASK (Exapansion ROM BAR Mask) * The ROM Mask register is invisible to host software and not readable from the application. * The ROM Mask register is only writable through the DBI. */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg012_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t mask : 31; /**< Bar Mask Low */ uint32_t enb : 1; /**< Bar Enable o 0: BAR ROM is disabled o 1: BAR ROM is enabled Bit 0 is interpreted as BAR Enable when writing to the BAR Mask register rather than as a mask bit because bit 0 of a BAR is always masked from writing by host software. Bit 0 must be written prior to writing the other mask bits. */ #else uint32_t enb : 1; uint32_t mask : 31; #endif } s; struct cvmx_pcieep_cfg012_mask_s cn52xx; struct cvmx_pcieep_cfg012_mask_s cn52xxp1; struct cvmx_pcieep_cfg012_mask_s cn56xx; struct cvmx_pcieep_cfg012_mask_s cn56xxp1; } cvmx_pcieep_cfg012_mask_t; /** * cvmx_pcieep_cfg013 * * PCIE_CFG013 = Fourteenth 32-bits of PCIE type 0 config space (Capability Pointer Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg013_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_8_31 : 24; uint32_t cp : 8; /**< First Capability Pointer. Points to Power Management Capability structure by default, writable through the DBI. However, the application must not change this field. */ #else uint32_t cp : 8; uint32_t reserved_8_31 : 24; #endif } s; struct cvmx_pcieep_cfg013_s cn52xx; struct cvmx_pcieep_cfg013_s cn52xxp1; struct cvmx_pcieep_cfg013_s cn56xx; struct cvmx_pcieep_cfg013_s cn56xxp1; } cvmx_pcieep_cfg013_t; /** * cvmx_pcieep_cfg015 * * PCIE_CFG015 = Sixteenth 32-bits of PCIE type 0 config space (Interrupt Line Register/Interrupt Pin/Bridge Control Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg015_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ml : 8; /**< Maximum Latency (Hardwired to 0) */ uint32_t mg : 8; /**< Minimum Grant (Hardwired to 0) */ uint32_t inta : 8; /**< Interrupt Pin Identifies the legacy interrupt Message that the device (or device function) uses. The Interrupt Pin register is writable through the DBI. In a single-function configuration, only INTA is used. Therefore, the application must not change this field. */ uint32_t il : 8; /**< Interrupt Line */ #else uint32_t il : 8; uint32_t inta : 8; uint32_t mg : 8; uint32_t ml : 8; #endif } s; struct cvmx_pcieep_cfg015_s cn52xx; struct cvmx_pcieep_cfg015_s cn52xxp1; struct cvmx_pcieep_cfg015_s cn56xx; struct cvmx_pcieep_cfg015_s cn56xxp1; } cvmx_pcieep_cfg015_t; /** * cvmx_pcieep_cfg016 * * PCIE_CFG016 = Seventeenth 32-bits of PCIE type 0 config space * (Power Management Capability ID/ * Power Management Next Item Pointer/ * Power Management Capabilities Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg016_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pmes : 5; /**< PME_Support o Bit 11: If set, PME Messages can be generated from D0 o Bit 12: If set, PME Messages can be generated from D1 o Bit 13: If set, PME Messages can be generated from D2 o Bit 14: If set, PME Messages can be generated from D3hot o Bit 15: If set, PME Messages can be generated from D3cold The PME_Support field is writable through the DBI. However, the application must not change this field. */ uint32_t d2s : 1; /**< D2 Support, writable through the DBI However, the application must not change this field. */ uint32_t d1s : 1; /**< D1 Support, writable through the DBI However, the application must not change this field. */ uint32_t auxc : 3; /**< AUX Current, writable through the DBI However, the application must not change this field. */ uint32_t dsi : 1; /**< Device Specific Initialization (DSI), writable through the DBI However, the application must not change this field. */ uint32_t reserved_20_20 : 1; uint32_t pme_clock : 1; /**< PME Clock, hardwired to 0 */ uint32_t pmsv : 3; /**< Power Management Specification Version, writable through the DBI However, the application must not change this field. */ uint32_t ncp : 8; /**< Next Capability Pointer Points to the MSI capabilities by default, writable through the DBI. However, the application must not change this field. */ uint32_t pmcid : 8; /**< Power Management Capability ID */ #else uint32_t pmcid : 8; uint32_t ncp : 8; uint32_t pmsv : 3; uint32_t pme_clock : 1; uint32_t reserved_20_20 : 1; uint32_t dsi : 1; uint32_t auxc : 3; uint32_t d1s : 1; uint32_t d2s : 1; uint32_t pmes : 5; #endif } s; struct cvmx_pcieep_cfg016_s cn52xx; struct cvmx_pcieep_cfg016_s cn52xxp1; struct cvmx_pcieep_cfg016_s cn56xx; struct cvmx_pcieep_cfg016_s cn56xxp1; } cvmx_pcieep_cfg016_t; /** * cvmx_pcieep_cfg017 * * PCIE_CFG017 = Eighteenth 32-bits of PCIE type 0 config space (Power Management Control and Status Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg017_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pmdia : 8; /**< Data register for additional information (not supported) */ uint32_t bpccee : 1; /**< Bus Power/Clock Control Enable, hardwired to 0 */ uint32_t bd3h : 1; /**< B2/B3 Support, hardwired to 0 */ uint32_t reserved_16_21 : 6; uint32_t pmess : 1; /**< PME Status Indicates if a previously enabled PME event occurred or not. */ uint32_t pmedsia : 2; /**< Data Scale (not supported) */ uint32_t pmds : 4; /**< Data Select (not supported) */ uint32_t pmeens : 1; /**< PME Enable A value of 1 indicates that the device is enabled to generate PME. */ uint32_t reserved_4_7 : 4; uint32_t nsr : 1; /**< No Soft Reset, writable through the DBI However, the application must not change this field. */ uint32_t reserved_2_2 : 1; uint32_t ps : 2; /**< Power State Controls the device power state: o 00b: D0 o 01b: D1 o 10b: D2 o 11b: D3 The written value is ignored if the specific state is not supported. */ #else uint32_t ps : 2; uint32_t reserved_2_2 : 1; uint32_t nsr : 1; uint32_t reserved_4_7 : 4; uint32_t pmeens : 1; uint32_t pmds : 4; uint32_t pmedsia : 2; uint32_t pmess : 1; uint32_t reserved_16_21 : 6; uint32_t bd3h : 1; uint32_t bpccee : 1; uint32_t pmdia : 8; #endif } s; struct cvmx_pcieep_cfg017_s cn52xx; struct cvmx_pcieep_cfg017_s cn52xxp1; struct cvmx_pcieep_cfg017_s cn56xx; struct cvmx_pcieep_cfg017_s cn56xxp1; } cvmx_pcieep_cfg017_t; /** * cvmx_pcieep_cfg020 * * PCIE_CFG020 = Twenty-first 32-bits of PCIE type 0 config space * (MSI Capability ID/ * MSI Next Item Pointer/ * MSI Control Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg020_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_24_31 : 8; uint32_t m64 : 1; /**< 64-bit Address Capable, writable through the DBI However, the application must not change this field. */ uint32_t mme : 3; /**< Multiple Message Enabled Indicates that multiple Message mode is enabled by system software. The number of Messages enabled must be less than or equal to the Multiple Message Capable value. */ uint32_t mmc : 3; /**< Multiple Message Capable, writable through the DBI However, the application must not change this field. */ uint32_t msien : 1; /**< MSI Enabled When set, INTx must be disabled. */ uint32_t ncp : 8; /**< Next Capability Pointer Points to PCI Express Capabilities by default, writable through the DBI. However, the application must not change this field. */ uint32_t msicid : 8; /**< MSI Capability ID */ #else uint32_t msicid : 8; uint32_t ncp : 8; uint32_t msien : 1; uint32_t mmc : 3; uint32_t mme : 3; uint32_t m64 : 1; uint32_t reserved_24_31 : 8; #endif } s; struct cvmx_pcieep_cfg020_s cn52xx; struct cvmx_pcieep_cfg020_s cn52xxp1; struct cvmx_pcieep_cfg020_s cn56xx; struct cvmx_pcieep_cfg020_s cn56xxp1; } cvmx_pcieep_cfg020_t; /** * cvmx_pcieep_cfg021 * * PCIE_CFG021 = Twenty-second 32-bits of PCIE type 0 config space (MSI Lower 32 Bits Address Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg021_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lmsi : 30; /**< Lower 32-bit Address */ uint32_t reserved_0_1 : 2; #else uint32_t reserved_0_1 : 2; uint32_t lmsi : 30; #endif } s; struct cvmx_pcieep_cfg021_s cn52xx; struct cvmx_pcieep_cfg021_s cn52xxp1; struct cvmx_pcieep_cfg021_s cn56xx; struct cvmx_pcieep_cfg021_s cn56xxp1; } cvmx_pcieep_cfg021_t; /** * cvmx_pcieep_cfg022 * * PCIE_CFG022 = Twenty-third 32-bits of PCIE type 0 config space (MSI Upper 32 bits Address Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg022_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t umsi : 32; /**< Upper 32-bit Address */ #else uint32_t umsi : 32; #endif } s; struct cvmx_pcieep_cfg022_s cn52xx; struct cvmx_pcieep_cfg022_s cn52xxp1; struct cvmx_pcieep_cfg022_s cn56xx; struct cvmx_pcieep_cfg022_s cn56xxp1; } cvmx_pcieep_cfg022_t; /** * cvmx_pcieep_cfg023 * * PCIE_CFG023 = Twenty-fourth 32-bits of PCIE type 0 config space (MSI Data Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg023_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_16_31 : 16; uint32_t msimd : 16; /**< MSI Data Pattern assigned by system software, bits [4:0] are Or-ed with MSI_VECTOR to generate 32 MSI Messages per function. */ #else uint32_t msimd : 16; uint32_t reserved_16_31 : 16; #endif } s; struct cvmx_pcieep_cfg023_s cn52xx; struct cvmx_pcieep_cfg023_s cn52xxp1; struct cvmx_pcieep_cfg023_s cn56xx; struct cvmx_pcieep_cfg023_s cn56xxp1; } cvmx_pcieep_cfg023_t; /** * cvmx_pcieep_cfg028 * * PCIE_CFG028 = Twenty-ninth 32-bits of PCIE type 0 config space * (PCI Express Capabilities List Register/ * PCI Express Capabilities Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg028_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_30_31 : 2; uint32_t imn : 5; /**< Interrupt Message Number Updated by hardware, writable through the DBI. However, the application must not change this field. */ uint32_t si : 1; /**< Slot Implemented This bit is writable through the DBI. However, it must be 0 for an Endpoint device. Therefore, the application must not write a 1 to this bit. */ uint32_t dpt : 4; /**< Device Port Type */ uint32_t pciecv : 4; /**< PCI Express Capability Version */ uint32_t ncp : 8; /**< Next Capability Pointer Writable through the DBI. However, the application must not change this field. */ uint32_t pcieid : 8; /**< PCIE Capability ID */ #else uint32_t pcieid : 8; uint32_t ncp : 8; uint32_t pciecv : 4; uint32_t dpt : 4; uint32_t si : 1; uint32_t imn : 5; uint32_t reserved_30_31 : 2; #endif } s; struct cvmx_pcieep_cfg028_s cn52xx; struct cvmx_pcieep_cfg028_s cn52xxp1; struct cvmx_pcieep_cfg028_s cn56xx; struct cvmx_pcieep_cfg028_s cn56xxp1; } cvmx_pcieep_cfg028_t; /** * cvmx_pcieep_cfg029 * * PCIE_CFG029 = Thirtieth 32-bits of PCIE type 0 config space (Device Capabilities Register) * */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg029_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_28_31 : 4; uint32_t cspls : 2; /**< Captured Slot Power Limit Scale From Message from RC, upstream port only. */ uint32_t csplv : 8; /**< Captured Slot Power Limit Value From Message from RC, upstream port only. */ uint32_t reserved_16_17 : 2; uint32_t rber : 1; /**< Role-Based Error Reporting, writable through the DBI However, the application must not change this field. */ uint32_t reserved_12_14 : 3; uint32_t el1al : 3; /**< Endpoint L1 Acceptable Latency, writable through the DBI However, the application must not change this field. */ uint32_t el0al : 3; /**< Endpoint L0s Acceptable Latency, writable through the DBI However, the application must not change this field. */ uint32_t etfs : 1; /**< Extended Tag Field Supported This bit is writable through the DBI. However, the application must not write a 1 to this bit. */ uint32_t pfs : 2; /**< Phantom Function Supported This field is writable through the DBI. However, Phantom Function is not supported. Therefore, the application must not write any value other than 0x0 to this field. */ uint32_t mpss : 3; /**< Max_Payload_Size Supported, writable through the DBI However, the application must not change this field. */ #else uint32_t mpss : 3; uint32_t pfs : 2; uint32_t etfs : 1; uint32_t el0al : 3; uint32_t el1al : 3; uint32_t reserved_12_14 : 3; uint32_t rber : 1; uint32_t reserved_16_17 : 2; uint32_t csplv : 8; uint32_t cspls : 2; uint32_t reserved_28_31 : 4; #endif } s; struct cvmx_pcieep_cfg029_s cn52xx; struct cvmx_pcieep_cfg029_s cn52xxp1; struct cvmx_pcieep_cfg029_s cn56xx; struct cvmx_pcieep_cfg029_s cn56xxp1; } cvmx_pcieep_cfg029_t; /** * cvmx_pcieep_cfg030 * * PCIE_CFG030 = Thirty-first 32-bits of PCIE type 0 config space * (Device Control Register/Device Status Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg030_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_22_31 : 10; uint32_t tp : 1; /**< Transaction Pending Set to 1 when Non-Posted Requests are not yet completed and clear when they are completed. */ uint32_t ap_d : 1; /**< Aux Power Detected Set to 1 if Aux power detected. */ uint32_t ur_d : 1; /**< Unsupported Request Detected Errors are logged in this register regardless of whether error reporting is enabled in the Device Control register. UR_D occurs when we receive something we don't support. Unsupported requests are Nonfatal errors, so UR_D should cause NFE_D. Receiving a vendor defined message should cause an unsupported request. */ uint32_t fe_d : 1; /**< Fatal Error Detected Errors are logged in this register regardless of whether error reporting is enabled in the Device Control register. FE_D is set if receive any of the errors in PCIE_CFG066 that has a severity set to Fatal. Malformed TLP's generally fit into this category. */ uint32_t nfe_d : 1; /**< Non-Fatal Error detected Errors are logged in this register regardless of whether error reporting is enabled in the Device Control register. NFE_D is set if we receive any of the errors in PCIE_CFG066 that has a severity set to Nonfatal and does NOT meet Advisory Nonfatal criteria (PCIe 1.1 spec, Section 6.2.3.2.4), which most poisoned TLP's should be. */ uint32_t ce_d : 1; /**< Correctable Error Detected Errors are logged in this register regardless of whether error reporting is enabled in the Device Control register. CE_D is set if we receive any of the errors in PCIE_CFG068 for example a Replay Timer Timeout. Also, it can be set if we get any of the errors in PCIE_CFG066 that has a severity set to Nonfatal and meets the Advisory Nonfatal criteria (PCIe 1.1 spec, Section 6.2.3.2.4), which most ECRC errors should be. */ uint32_t reserved_15_15 : 1; uint32_t mrrs : 3; /**< Max Read Request Size 0 = 128B 1 = 256B 2 = 512B 3 = 1024B 4 = 2048B 5 = 4096B Note: NPEI_CTL_STATUS2[MRRS] also must be set properly. NPEI_CTL_STATUS2[MRRS] must not exceed the desired max read request size. */ uint32_t ns_en : 1; /**< Enable No Snoop */ uint32_t ap_en : 1; /**< AUX Power PM Enable */ uint32_t pf_en : 1; /**< Phantom Function Enable This bit should never be set - OCTEON requests never use phantom functions. */ uint32_t etf_en : 1; /**< Extended Tag Field Enable This bit should never be set - OCTEON requests never use extended tags. */ uint32_t mps : 3; /**< Max Payload Size Legal values: 0 = 128B 1 = 256B Larger sizes not supported by OCTEON. Note: NPEI_CTL_STATUS2[MPS] must be set to the same value for proper functionality. */ uint32_t ro_en : 1; /**< Enable Relaxed Ordering */ uint32_t ur_en : 1; /**< Unsupported Request Reporting Enable */ uint32_t fe_en : 1; /**< Fatal Error Reporting Enable */ uint32_t nfe_en : 1; /**< Non-Fatal Error Reporting Enable */ uint32_t ce_en : 1; /**< Correctable Error Reporting Enable */ #else uint32_t ce_en : 1; uint32_t nfe_en : 1; uint32_t fe_en : 1; uint32_t ur_en : 1; uint32_t ro_en : 1; uint32_t mps : 3; uint32_t etf_en : 1; uint32_t pf_en : 1; uint32_t ap_en : 1; uint32_t ns_en : 1; uint32_t mrrs : 3; uint32_t reserved_15_15 : 1; uint32_t ce_d : 1; uint32_t nfe_d : 1; uint32_t fe_d : 1; uint32_t ur_d : 1; uint32_t ap_d : 1; uint32_t tp : 1; uint32_t reserved_22_31 : 10; #endif } s; struct cvmx_pcieep_cfg030_s cn52xx; struct cvmx_pcieep_cfg030_s cn52xxp1; struct cvmx_pcieep_cfg030_s cn56xx; struct cvmx_pcieep_cfg030_s cn56xxp1; } cvmx_pcieep_cfg030_t; /** * cvmx_pcieep_cfg031 * * PCIE_CFG031 = Thirty-second 32-bits of PCIE type 0 config space * (Link Capabilities Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg031_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pnum : 8; /**< Port Number, writable through the DBI However, the application must not change this field. */ uint32_t reserved_22_23 : 2; uint32_t lbnc : 1; /**< Link Bandwith Notification Capability */ uint32_t dllarc : 1; /**< Data Link Layer Active Reporting Capable */ uint32_t sderc : 1; /**< Surprise Down Error Reporting Capable Not supported, hardwired to 0x0. */ uint32_t cpm : 1; /**< Clock Power Management The default value is the value you specify during hardware configuration, writable through the DBI. However, the application must not change this field. */ uint32_t l1el : 3; /**< L1 Exit Latency The default value is the value you specify during hardware configuration, writable through the DBI. However, the application must not change this field. */ uint32_t l0el : 3; /**< L0s Exit Latency The default value is the value you specify during hardware configuration, writable through the DBI. However, the application must not change this field. */ uint32_t aslpms : 2; /**< Active State Link PM Support The default value is the value you specify during hardware configuration, writable through the DBI. However, the application must not change this field. */ uint32_t mlw : 6; /**< Maximum Link Width The default value is the value you specify during hardware configuration (x1, x4, x8, or x16), writable through the DBI. */ uint32_t mls : 4; /**< Maximum Link Speed Default value is 0x1 for 2.5 Gbps Link. This field is writable through the DBI. However, 0x1 is the only supported value. Therefore, the application must not write any value other than 0x1 to this field. */ #else uint32_t mls : 4; uint32_t mlw : 6; uint32_t aslpms : 2; uint32_t l0el : 3; uint32_t l1el : 3; uint32_t cpm : 1; uint32_t sderc : 1; uint32_t dllarc : 1; uint32_t lbnc : 1; uint32_t reserved_22_23 : 2; uint32_t pnum : 8; #endif } s; struct cvmx_pcieep_cfg031_s cn52xx; struct cvmx_pcieep_cfg031_s cn52xxp1; struct cvmx_pcieep_cfg031_s cn56xx; struct cvmx_pcieep_cfg031_s cn56xxp1; } cvmx_pcieep_cfg031_t; /** * cvmx_pcieep_cfg032 * * PCIE_CFG032 = Thirty-third 32-bits of PCIE type 0 config space * (Link Control Register/Link Status Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg032_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_30_31 : 2; uint32_t dlla : 1; /**< Data Link Layer Active Not applicable for an upstream Port or Endpoint device, hardwired to 0. */ uint32_t scc : 1; /**< Slot Clock Configuration Indicates that the component uses the same physical reference clock that the platform provides on the connector. Writable through the DBI. However, the application must not change this field. */ uint32_t lt : 1; /**< Link Training Not applicable for an upstream Port or Endpoint device, hardwired to 0. */ uint32_t reserved_26_26 : 1; uint32_t nlw : 6; /**< Negotiated Link Width Set automatically by hardware after Link initialization. */ uint32_t ls : 4; /**< Link Speed The negotiated Link speed: 2.5 Gbps */ uint32_t reserved_10_15 : 6; uint32_t hawd : 1; /**< Hardware Autonomous Width Disable (Not Supported) */ uint32_t ecpm : 1; /**< Enable Clock Power Management Hardwired to 0 if Clock Power Management is disabled in the Link Capabilities register. */ uint32_t es : 1; /**< Extended Synch */ uint32_t ccc : 1; /**< Common Clock Configuration */ uint32_t rl : 1; /**< Retrain Link Not applicable for an upstream Port or Endpoint device, hardwired to 0. */ uint32_t ld : 1; /**< Link Disable Not applicable for an upstream Port or Endpoint device, hardwired to 0. */ uint32_t rcb : 1; /**< Read Completion Boundary (RCB) */ uint32_t reserved_2_2 : 1; uint32_t aslpc : 2; /**< Active State Link PM Control */ #else uint32_t aslpc : 2; uint32_t reserved_2_2 : 1; uint32_t rcb : 1; uint32_t ld : 1; uint32_t rl : 1; uint32_t ccc : 1; uint32_t es : 1; uint32_t ecpm : 1; uint32_t hawd : 1; uint32_t reserved_10_15 : 6; uint32_t ls : 4; uint32_t nlw : 6; uint32_t reserved_26_26 : 1; uint32_t lt : 1; uint32_t scc : 1; uint32_t dlla : 1; uint32_t reserved_30_31 : 2; #endif } s; struct cvmx_pcieep_cfg032_s cn52xx; struct cvmx_pcieep_cfg032_s cn52xxp1; struct cvmx_pcieep_cfg032_s cn56xx; struct cvmx_pcieep_cfg032_s cn56xxp1; } cvmx_pcieep_cfg032_t; /** * cvmx_pcieep_cfg033 * * PCIE_CFG033 = Thirty-fourth 32-bits of PCIE type 0 config space * (Slot Capabilities Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg033_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ps_num : 13; /**< Physical Slot Number, writable through the DBI However, the application must not change this field. */ uint32_t nccs : 1; /**< No Command Complete Support, writable through the DBI However, the application must not change this field. */ uint32_t emip : 1; /**< Electromechanical Interlock Present, writable through the DBI However, the application must not change this field. */ uint32_t sp_ls : 2; /**< Slot Power Limit Scale, writable through the DBI However, the application must not change this field. */ uint32_t sp_lv : 8; /**< Slot Power Limit Value, writable through the DBI However, the application must not change this field. */ uint32_t hp_c : 1; /**< Hot-Plug Capable, writable through the DBI However, the application must not change this field. */ uint32_t hp_s : 1; /**< Hot-Plug Surprise, writable through the DBI However, the application must not change this field. */ uint32_t pip : 1; /**< Power Indicator Present, writable through the DBI However, the application must not change this field. */ uint32_t aip : 1; /**< Attention Indicator Present, writable through the DBI However, the application must not change this field. */ uint32_t mrlsp : 1; /**< MRL Sensor Present, writable through the DBI However, the application must not change this field. */ uint32_t pcp : 1; /**< Power Controller Present, writable through the DBI However, the application must not change this field. */ uint32_t abp : 1; /**< Attention Button Present, writable through the DBI However, the application must not change this field. */ #else uint32_t abp : 1; uint32_t pcp : 1; uint32_t mrlsp : 1; uint32_t aip : 1; uint32_t pip : 1; uint32_t hp_s : 1; uint32_t hp_c : 1; uint32_t sp_lv : 8; uint32_t sp_ls : 2; uint32_t emip : 1; uint32_t nccs : 1; uint32_t ps_num : 13; #endif } s; struct cvmx_pcieep_cfg033_s cn52xx; struct cvmx_pcieep_cfg033_s cn52xxp1; struct cvmx_pcieep_cfg033_s cn56xx; struct cvmx_pcieep_cfg033_s cn56xxp1; } cvmx_pcieep_cfg033_t; /** * cvmx_pcieep_cfg034 * * PCIE_CFG034 = Thirty-fifth 32-bits of PCIE type 0 config space * (Slot Control Register/Slot Status Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg034_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_25_31 : 7; uint32_t dlls_c : 1; /**< Data Link Layer State Changed Not applicable for an upstream Port or Endpoint device, hardwired to 0. */ uint32_t emis : 1; /**< Electromechanical Interlock Status */ uint32_t pds : 1; /**< Presence Detect State */ uint32_t mrlss : 1; /**< MRL Sensor State */ uint32_t ccint_d : 1; /**< Command Completed */ uint32_t pd_c : 1; /**< Presence Detect Changed */ uint32_t mrls_c : 1; /**< MRL Sensor Changed */ uint32_t pf_d : 1; /**< Power Fault Detected */ uint32_t abp_d : 1; /**< Attention Button Pressed */ uint32_t reserved_13_15 : 3; uint32_t dlls_en : 1; /**< Data Link Layer State Changed Enable Not applicable for an upstream Port or Endpoint device, hardwired to 0. */ uint32_t emic : 1; /**< Electromechanical Interlock Control */ uint32_t pcc : 1; /**< Power Controller Control */ uint32_t pic : 2; /**< Power Indicator Control */ uint32_t aic : 2; /**< Attention Indicator Control */ uint32_t hpint_en : 1; /**< Hot-Plug Interrupt Enable */ uint32_t ccint_en : 1; /**< Command Completed Interrupt Enable */ uint32_t pd_en : 1; /**< Presence Detect Changed Enable */ uint32_t mrls_en : 1; /**< MRL Sensor Changed Enable */ uint32_t pf_en : 1; /**< Power Fault Detected Enable */ uint32_t abp_en : 1; /**< Attention Button Pressed Enable */ #else uint32_t abp_en : 1; uint32_t pf_en : 1; uint32_t mrls_en : 1; uint32_t pd_en : 1; uint32_t ccint_en : 1; uint32_t hpint_en : 1; uint32_t aic : 2; uint32_t pic : 2; uint32_t pcc : 1; uint32_t emic : 1; uint32_t dlls_en : 1; uint32_t reserved_13_15 : 3; uint32_t abp_d : 1; uint32_t pf_d : 1; uint32_t mrls_c : 1; uint32_t pd_c : 1; uint32_t ccint_d : 1; uint32_t mrlss : 1; uint32_t pds : 1; uint32_t emis : 1; uint32_t dlls_c : 1; uint32_t reserved_25_31 : 7; #endif } s; struct cvmx_pcieep_cfg034_s cn52xx; struct cvmx_pcieep_cfg034_s cn52xxp1; struct cvmx_pcieep_cfg034_s cn56xx; struct cvmx_pcieep_cfg034_s cn56xxp1; } cvmx_pcieep_cfg034_t; /** * cvmx_pcieep_cfg037 * * PCIE_CFG037 = Thirty-eighth 32-bits of PCIE type 0 config space * (Device Capabilities 2 Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg037_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_5_31 : 27; uint32_t ctds : 1; /**< Completion Timeout Disable Supported */ uint32_t ctrs : 4; /**< Completion Timeout Ranges Supported Value of 0 indicates that Completion Timeout Programming is not supported Completion timeout is 16.7ms. */ #else uint32_t ctrs : 4; uint32_t ctds : 1; uint32_t reserved_5_31 : 27; #endif } s; struct cvmx_pcieep_cfg037_s cn52xx; struct cvmx_pcieep_cfg037_s cn52xxp1; struct cvmx_pcieep_cfg037_s cn56xx; struct cvmx_pcieep_cfg037_s cn56xxp1; } cvmx_pcieep_cfg037_t; /** * cvmx_pcieep_cfg038 * * PCIE_CFG038 = Thirty-ninth 32-bits of PCIE type 0 config space * (Device Control 2 Register/Device Status 2 Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg038_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_5_31 : 27; uint32_t ctd : 1; /**< Completion Timeout Disable */ uint32_t ctv : 4; /**< Completion Timeout Value Completion Timeout Programming is not supported Completion timeout is 16.7ms. */ #else uint32_t ctv : 4; uint32_t ctd : 1; uint32_t reserved_5_31 : 27; #endif } s; struct cvmx_pcieep_cfg038_s cn52xx; struct cvmx_pcieep_cfg038_s cn52xxp1; struct cvmx_pcieep_cfg038_s cn56xx; struct cvmx_pcieep_cfg038_s cn56xxp1; } cvmx_pcieep_cfg038_t; /** * cvmx_pcieep_cfg039 * * PCIE_CFG039 = Fourtieth 32-bits of PCIE type 0 config space * (Link Capabilities 2 Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg039_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_0_31 : 32; #else uint32_t reserved_0_31 : 32; #endif } s; struct cvmx_pcieep_cfg039_s cn52xx; struct cvmx_pcieep_cfg039_s cn52xxp1; struct cvmx_pcieep_cfg039_s cn56xx; struct cvmx_pcieep_cfg039_s cn56xxp1; } cvmx_pcieep_cfg039_t; /** * cvmx_pcieep_cfg040 * * PCIE_CFG040 = Fourty-first 32-bits of PCIE type 0 config space * (Link Control 2 Register/Link Status 2 Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg040_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_0_31 : 32; #else uint32_t reserved_0_31 : 32; #endif } s; struct cvmx_pcieep_cfg040_s cn52xx; struct cvmx_pcieep_cfg040_s cn52xxp1; struct cvmx_pcieep_cfg040_s cn56xx; struct cvmx_pcieep_cfg040_s cn56xxp1; } cvmx_pcieep_cfg040_t; /** * cvmx_pcieep_cfg041 * * PCIE_CFG041 = Fourty-second 32-bits of PCIE type 0 config space * (Slot Capabilities 2 Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg041_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_0_31 : 32; #else uint32_t reserved_0_31 : 32; #endif } s; struct cvmx_pcieep_cfg041_s cn52xx; struct cvmx_pcieep_cfg041_s cn52xxp1; struct cvmx_pcieep_cfg041_s cn56xx; struct cvmx_pcieep_cfg041_s cn56xxp1; } cvmx_pcieep_cfg041_t; /** * cvmx_pcieep_cfg042 * * PCIE_CFG042 = Fourty-third 32-bits of PCIE type 0 config space * (Slot Control 2 Register/Slot Status 2 Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg042_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_0_31 : 32; #else uint32_t reserved_0_31 : 32; #endif } s; struct cvmx_pcieep_cfg042_s cn52xx; struct cvmx_pcieep_cfg042_s cn52xxp1; struct cvmx_pcieep_cfg042_s cn56xx; struct cvmx_pcieep_cfg042_s cn56xxp1; } cvmx_pcieep_cfg042_t; /** * cvmx_pcieep_cfg064 * * PCIE_CFG064 = Sixty-fifth 32-bits of PCIE type 0 config space * (PCI Express Enhanced Capability Header) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg064_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t nco : 12; /**< Next Capability Offset */ uint32_t cv : 4; /**< Capability Version */ uint32_t pcieec : 16; /**< PCIE Express Extended Capability */ #else uint32_t pcieec : 16; uint32_t cv : 4; uint32_t nco : 12; #endif } s; struct cvmx_pcieep_cfg064_s cn52xx; struct cvmx_pcieep_cfg064_s cn52xxp1; struct cvmx_pcieep_cfg064_s cn56xx; struct cvmx_pcieep_cfg064_s cn56xxp1; } cvmx_pcieep_cfg064_t; /** * cvmx_pcieep_cfg065 * * PCIE_CFG065 = Sixty-sixth 32-bits of PCIE type 0 config space * (Uncorrectable Error Status Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg065_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_21_31 : 11; uint32_t ures : 1; /**< Unsupported Request Error Status */ uint32_t ecrces : 1; /**< ECRC Error Status */ uint32_t mtlps : 1; /**< Malformed TLP Status */ uint32_t ros : 1; /**< Receiver Overflow Status */ uint32_t ucs : 1; /**< Unexpected Completion Status */ uint32_t cas : 1; /**< Completer Abort Status */ uint32_t cts : 1; /**< Completion Timeout Status */ uint32_t fcpes : 1; /**< Flow Control Protocol Error Status */ uint32_t ptlps : 1; /**< Poisoned TLP Status */ uint32_t reserved_6_11 : 6; uint32_t sdes : 1; /**< Surprise Down Error Status (not supported) */ uint32_t dlpes : 1; /**< Data Link Protocol Error Status */ uint32_t reserved_0_3 : 4; #else uint32_t reserved_0_3 : 4; uint32_t dlpes : 1; uint32_t sdes : 1; uint32_t reserved_6_11 : 6; uint32_t ptlps : 1; uint32_t fcpes : 1; uint32_t cts : 1; uint32_t cas : 1; uint32_t ucs : 1; uint32_t ros : 1; uint32_t mtlps : 1; uint32_t ecrces : 1; uint32_t ures : 1; uint32_t reserved_21_31 : 11; #endif } s; struct cvmx_pcieep_cfg065_s cn52xx; struct cvmx_pcieep_cfg065_s cn52xxp1; struct cvmx_pcieep_cfg065_s cn56xx; struct cvmx_pcieep_cfg065_s cn56xxp1; } cvmx_pcieep_cfg065_t; /** * cvmx_pcieep_cfg066 * * PCIE_CFG066 = Sixty-seventh 32-bits of PCIE type 0 config space * (Uncorrectable Error Mask Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg066_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_21_31 : 11; uint32_t urem : 1; /**< Unsupported Request Error Mask */ uint32_t ecrcem : 1; /**< ECRC Error Mask */ uint32_t mtlpm : 1; /**< Malformed TLP Mask */ uint32_t rom : 1; /**< Receiver Overflow Mask */ uint32_t ucm : 1; /**< Unexpected Completion Mask */ uint32_t cam : 1; /**< Completer Abort Mask */ uint32_t ctm : 1; /**< Completion Timeout Mask */ uint32_t fcpem : 1; /**< Flow Control Protocol Error Mask */ uint32_t ptlpm : 1; /**< Poisoned TLP Mask */ uint32_t reserved_6_11 : 6; uint32_t sdem : 1; /**< Surprise Down Error Mask (not supported) */ uint32_t dlpem : 1; /**< Data Link Protocol Error Mask */ uint32_t reserved_0_3 : 4; #else uint32_t reserved_0_3 : 4; uint32_t dlpem : 1; uint32_t sdem : 1; uint32_t reserved_6_11 : 6; uint32_t ptlpm : 1; uint32_t fcpem : 1; uint32_t ctm : 1; uint32_t cam : 1; uint32_t ucm : 1; uint32_t rom : 1; uint32_t mtlpm : 1; uint32_t ecrcem : 1; uint32_t urem : 1; uint32_t reserved_21_31 : 11; #endif } s; struct cvmx_pcieep_cfg066_s cn52xx; struct cvmx_pcieep_cfg066_s cn52xxp1; struct cvmx_pcieep_cfg066_s cn56xx; struct cvmx_pcieep_cfg066_s cn56xxp1; } cvmx_pcieep_cfg066_t; /** * cvmx_pcieep_cfg067 * * PCIE_CFG067 = Sixty-eighth 32-bits of PCIE type 0 config space * (Uncorrectable Error Severity Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg067_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_21_31 : 11; uint32_t ures : 1; /**< Unsupported Request Error Severity */ uint32_t ecrces : 1; /**< ECRC Error Severity */ uint32_t mtlps : 1; /**< Malformed TLP Severity */ uint32_t ros : 1; /**< Receiver Overflow Severity */ uint32_t ucs : 1; /**< Unexpected Completion Severity */ uint32_t cas : 1; /**< Completer Abort Severity */ uint32_t cts : 1; /**< Completion Timeout Severity */ uint32_t fcpes : 1; /**< Flow Control Protocol Error Severity */ uint32_t ptlps : 1; /**< Poisoned TLP Severity */ uint32_t reserved_6_11 : 6; uint32_t sdes : 1; /**< Surprise Down Error Severity (not supported) */ uint32_t dlpes : 1; /**< Data Link Protocol Error Severity */ uint32_t reserved_0_3 : 4; #else uint32_t reserved_0_3 : 4; uint32_t dlpes : 1; uint32_t sdes : 1; uint32_t reserved_6_11 : 6; uint32_t ptlps : 1; uint32_t fcpes : 1; uint32_t cts : 1; uint32_t cas : 1; uint32_t ucs : 1; uint32_t ros : 1; uint32_t mtlps : 1; uint32_t ecrces : 1; uint32_t ures : 1; uint32_t reserved_21_31 : 11; #endif } s; struct cvmx_pcieep_cfg067_s cn52xx; struct cvmx_pcieep_cfg067_s cn52xxp1; struct cvmx_pcieep_cfg067_s cn56xx; struct cvmx_pcieep_cfg067_s cn56xxp1; } cvmx_pcieep_cfg067_t; /** * cvmx_pcieep_cfg068 * * PCIE_CFG068 = Sixty-ninth 32-bits of PCIE type 0 config space * (Correctable Error Status Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg068_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_14_31 : 18; uint32_t anfes : 1; /**< Advisory Non-Fatal Error Status */ uint32_t rtts : 1; /**< Reply Timer Timeout Status */ uint32_t reserved_9_11 : 3; uint32_t rnrs : 1; /**< REPLAY_NUM Rollover Status */ uint32_t bdllps : 1; /**< Bad DLLP Status */ uint32_t btlps : 1; /**< Bad TLP Status */ uint32_t reserved_1_5 : 5; uint32_t res : 1; /**< Receiver Error Status */ #else uint32_t res : 1; uint32_t reserved_1_5 : 5; uint32_t btlps : 1; uint32_t bdllps : 1; uint32_t rnrs : 1; uint32_t reserved_9_11 : 3; uint32_t rtts : 1; uint32_t anfes : 1; uint32_t reserved_14_31 : 18; #endif } s; struct cvmx_pcieep_cfg068_s cn52xx; struct cvmx_pcieep_cfg068_s cn52xxp1; struct cvmx_pcieep_cfg068_s cn56xx; struct cvmx_pcieep_cfg068_s cn56xxp1; } cvmx_pcieep_cfg068_t; /** * cvmx_pcieep_cfg069 * * PCIE_CFG069 = Seventieth 32-bits of PCIE type 0 config space * (Correctable Error Mask Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg069_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_14_31 : 18; uint32_t anfem : 1; /**< Advisory Non-Fatal Error Mask */ uint32_t rttm : 1; /**< Reply Timer Timeout Mask */ uint32_t reserved_9_11 : 3; uint32_t rnrm : 1; /**< REPLAY_NUM Rollover Mask */ uint32_t bdllpm : 1; /**< Bad DLLP Mask */ uint32_t btlpm : 1; /**< Bad TLP Mask */ uint32_t reserved_1_5 : 5; uint32_t rem : 1; /**< Receiver Error Mask */ #else uint32_t rem : 1; uint32_t reserved_1_5 : 5; uint32_t btlpm : 1; uint32_t bdllpm : 1; uint32_t rnrm : 1; uint32_t reserved_9_11 : 3; uint32_t rttm : 1; uint32_t anfem : 1; uint32_t reserved_14_31 : 18; #endif } s; struct cvmx_pcieep_cfg069_s cn52xx; struct cvmx_pcieep_cfg069_s cn52xxp1; struct cvmx_pcieep_cfg069_s cn56xx; struct cvmx_pcieep_cfg069_s cn56xxp1; } cvmx_pcieep_cfg069_t; /** * cvmx_pcieep_cfg070 * * PCIE_CFG070 = Seventy-first 32-bits of PCIE type 0 config space * (Advanced Error Capabilities and Control Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg070_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_9_31 : 23; uint32_t ce : 1; /**< ECRC Check Enable */ uint32_t cc : 1; /**< ECRC Check Capable */ uint32_t ge : 1; /**< ECRC Generation Enable */ uint32_t gc : 1; /**< ECRC Generation Capability */ uint32_t fep : 5; /**< First Error Pointer */ #else uint32_t fep : 5; uint32_t gc : 1; uint32_t ge : 1; uint32_t cc : 1; uint32_t ce : 1; uint32_t reserved_9_31 : 23; #endif } s; struct cvmx_pcieep_cfg070_s cn52xx; struct cvmx_pcieep_cfg070_s cn52xxp1; struct cvmx_pcieep_cfg070_s cn56xx; struct cvmx_pcieep_cfg070_s cn56xxp1; } cvmx_pcieep_cfg070_t; /** * cvmx_pcieep_cfg071 * * PCIE_CFG071 = Seventy-second 32-bits of PCIE type 0 config space * (Header Log Register 1) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg071_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dword1 : 32; /**< Header Log Register (first DWORD) */ #else uint32_t dword1 : 32; #endif } s; struct cvmx_pcieep_cfg071_s cn52xx; struct cvmx_pcieep_cfg071_s cn52xxp1; struct cvmx_pcieep_cfg071_s cn56xx; struct cvmx_pcieep_cfg071_s cn56xxp1; } cvmx_pcieep_cfg071_t; /** * cvmx_pcieep_cfg072 * * PCIE_CFG072 = Seventy-third 32-bits of PCIE type 0 config space * (Header Log Register 2) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg072_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dword2 : 32; /**< Header Log Register (second DWORD) */ #else uint32_t dword2 : 32; #endif } s; struct cvmx_pcieep_cfg072_s cn52xx; struct cvmx_pcieep_cfg072_s cn52xxp1; struct cvmx_pcieep_cfg072_s cn56xx; struct cvmx_pcieep_cfg072_s cn56xxp1; } cvmx_pcieep_cfg072_t; /** * cvmx_pcieep_cfg073 * * PCIE_CFG073 = Seventy-fourth 32-bits of PCIE type 0 config space * (Header Log Register 3) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg073_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dword3 : 32; /**< Header Log Register (third DWORD) */ #else uint32_t dword3 : 32; #endif } s; struct cvmx_pcieep_cfg073_s cn52xx; struct cvmx_pcieep_cfg073_s cn52xxp1; struct cvmx_pcieep_cfg073_s cn56xx; struct cvmx_pcieep_cfg073_s cn56xxp1; } cvmx_pcieep_cfg073_t; /** * cvmx_pcieep_cfg074 * * PCIE_CFG074 = Seventy-fifth 32-bits of PCIE type 0 config space * (Header Log Register 4) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg074_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dword4 : 32; /**< Header Log Register (fourth DWORD) */ #else uint32_t dword4 : 32; #endif } s; struct cvmx_pcieep_cfg074_s cn52xx; struct cvmx_pcieep_cfg074_s cn52xxp1; struct cvmx_pcieep_cfg074_s cn56xx; struct cvmx_pcieep_cfg074_s cn56xxp1; } cvmx_pcieep_cfg074_t; /** * cvmx_pcieep_cfg448 * * PCIE_CFG448 = Four hundred forty-ninth 32-bits of PCIE type 0 config space * (Ack Latency Timer and Replay Timer Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg448_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t rtl : 16; /**< Replay Time Limit The replay timer expires when it reaches this limit. The PCI Express bus initiates a replay upon reception of a Nak or when the replay timer expires. The default is then updated based on the Negotiated Link Width and Max_Payload_Size. */ uint32_t rtltl : 16; /**< Round Trip Latency Time Limit The Ack/Nak latency timer expires when it reaches this limit. The default is then updated based on the Negotiated Link Width and Max_Payload_Size. */ #else uint32_t rtltl : 16; uint32_t rtl : 16; #endif } s; struct cvmx_pcieep_cfg448_s cn52xx; struct cvmx_pcieep_cfg448_s cn52xxp1; struct cvmx_pcieep_cfg448_s cn56xx; struct cvmx_pcieep_cfg448_s cn56xxp1; } cvmx_pcieep_cfg448_t; /** * cvmx_pcieep_cfg449 * * PCIE_CFG449 = Four hundred fiftieth 32-bits of PCIE type 0 config space * (Other Message Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg449_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t omr : 32; /**< Other Message Register This register can be used for either of the following purposes: o To send a specific PCI Express Message, the application writes the payload of the Message into this register, then sets bit 0 of the Port Link Control Register to send the Message. o To store a corruption pattern for corrupting the LCRC on all TLPs, the application places a 32-bit corruption pattern into this register and enables this function by setting bit 25 of the Port Link Control Register. When enabled, the transmit LCRC result is XOR'd with this pattern before inserting it into the packet. */ #else uint32_t omr : 32; #endif } s; struct cvmx_pcieep_cfg449_s cn52xx; struct cvmx_pcieep_cfg449_s cn52xxp1; struct cvmx_pcieep_cfg449_s cn56xx; struct cvmx_pcieep_cfg449_s cn56xxp1; } cvmx_pcieep_cfg449_t; /** * cvmx_pcieep_cfg450 * * PCIE_CFG450 = Four hundred fifty-first 32-bits of PCIE type 0 config space * (Port Force Link Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg450_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lpec : 8; /**< Low Power Entrance Count The Power Management state will wait for this many clock cycles for the associated completion of a CfgWr to PCIE_CFG017 register Power State (PS) field register to go low-power. This register is intended for applications that do not let the PCI Express bus handle a completion for configuration request to the Power Management Control and Status (PCIE_CFG017) register. */ uint32_t reserved_22_23 : 2; uint32_t link_state : 6; /**< Link State The Link state that the PCI Express Bus will be forced to when bit 15 (Force Link) is set. State encoding: o DETECT_QUIET 00h o DETECT_ACT 01h o POLL_ACTIVE 02h o POLL_COMPLIANCE 03h o POLL_CONFIG 04h o PRE_DETECT_QUIET 05h o DETECT_WAIT 06h o CFG_LINKWD_START 07h o CFG_LINKWD_ACEPT 08h o CFG_LANENUM_WAIT 09h o CFG_LANENUM_ACEPT 0Ah o CFG_COMPLETE 0Bh o CFG_IDLE 0Ch o RCVRY_LOCK 0Dh o RCVRY_SPEED 0Eh o RCVRY_RCVRCFG 0Fh o RCVRY_IDLE 10h o L0 11h o L0S 12h o L123_SEND_EIDLE 13h o L1_IDLE 14h o L2_IDLE 15h o L2_WAKE 16h o DISABLED_ENTRY 17h o DISABLED_IDLE 18h o DISABLED 19h o LPBK_ENTRY 1Ah o LPBK_ACTIVE 1Bh o LPBK_EXIT 1Ch o LPBK_EXIT_TIMEOUT 1Dh o HOT_RESET_ENTRY 1Eh o HOT_RESET 1Fh */ uint32_t force_link : 1; /**< Force Link Forces the Link to the state specified by the Link State field. The Force Link pulse will trigger Link re-negotiation. * As the The Force Link is a pulse, writing a 1 to it does trigger the forced link state event, even thought reading it always returns a 0. */ uint32_t reserved_8_14 : 7; uint32_t link_num : 8; /**< Link Number Not used for Endpoint */ #else uint32_t link_num : 8; uint32_t reserved_8_14 : 7; uint32_t force_link : 1; uint32_t link_state : 6; uint32_t reserved_22_23 : 2; uint32_t lpec : 8; #endif } s; struct cvmx_pcieep_cfg450_s cn52xx; struct cvmx_pcieep_cfg450_s cn52xxp1; struct cvmx_pcieep_cfg450_s cn56xx; struct cvmx_pcieep_cfg450_s cn56xxp1; } cvmx_pcieep_cfg450_t; /** * cvmx_pcieep_cfg451 * * PCIE_CFG451 = Four hundred fifty-second 32-bits of PCIE type 0 config space * (Ack Frequency Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg451_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_30_31 : 2; uint32_t l1el : 3; /**< L1 Entrance Latency Values correspond to: o 000: 1 ms o 001: 2 ms o 010: 4 ms o 011: 8 ms o 100: 16 ms o 101: 32 ms o 110 or 111: 64 ms */ uint32_t l0el : 3; /**< L0s Entrance Latency Values correspond to: o 000: 1 ms o 001: 2 ms o 010: 3 ms o 011: 4 ms o 100: 5 ms o 101: 6 ms o 110 or 111: 7 ms */ uint32_t n_fts_cc : 8; /**< N_FTS when common clock is used. The number of Fast Training Sequence ordered sets to be transmitted when transitioning from L0s to L0. The maximum number of FTS ordered-sets that a component can request is 255. Note: A value of zero is not supported; a value of zero can cause the LTSSM to go into the recovery state when exiting from L0s. */ uint32_t n_fts : 8; /**< N_FTS The number of Fast Training Sequence ordered sets to be transmitted when transitioning from L0s to L0. The maximum number of FTS ordered-sets that a component can request is 255. Note: A value of zero is not supported; a value of zero can cause the LTSSM to go into the recovery state when exiting from L0s. */ uint32_t ack_freq : 8; /**< Ack Frequency The number of pending Ack's specified here (up to 255) before sending an Ack. */ #else uint32_t ack_freq : 8; uint32_t n_fts : 8; uint32_t n_fts_cc : 8; uint32_t l0el : 3; uint32_t l1el : 3; uint32_t reserved_30_31 : 2; #endif } s; struct cvmx_pcieep_cfg451_s cn52xx; struct cvmx_pcieep_cfg451_s cn52xxp1; struct cvmx_pcieep_cfg451_s cn56xx; struct cvmx_pcieep_cfg451_s cn56xxp1; } cvmx_pcieep_cfg451_t; /** * cvmx_pcieep_cfg452 * * PCIE_CFG452 = Four hundred fifty-third 32-bits of PCIE type 0 config space * (Port Link Control Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg452_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_26_31 : 6; uint32_t eccrc : 1; /**< Enable Corrupted CRC Causes corrupt LCRC for TLPs when set, using the pattern contained in the Other Message register. This is a test feature, not to be used in normal operation. */ uint32_t reserved_22_24 : 3; uint32_t lme : 6; /**< Link Mode Enable o 000001: x1 o 000011: x2 o 000111: x4 o 001111: x8 o 011111: x16 (not supported) o 111111: x32 (not supported) This field indicates the MAXIMUM number of lanes supported by the PCIe port. It is set to 0xF or 0x7 depending on the value of the QLM_CFG bits (0xF when QLM_CFG == 0 otherwise 0x7). The value can be set less than 0xF or 0x7 to limit the number of lanes the PCIe will attempt to use. If the value of 0xF or 0x7 set by the HW is not desired, this field can be programmed to a smaller value (i.e. EEPROM) See also MLW. (Note: The value of this field does NOT indicate the number of lanes in use by the PCIe. LME sets the max number of lanes in the PCIe core that COULD be used. As per the PCIe specs, the PCIe core can negotiate a smaller link width, so all of x8, x4, x2, and x1 are supported when LME=0xF, for example.) */ uint32_t reserved_8_15 : 8; uint32_t flm : 1; /**< Fast Link Mode Sets all internal timers to fast mode for simulation purposes. If during an eeprom load, the first word loaded is 0xffffffff, then the EEPROM load will be terminated and this bit will be set. */ uint32_t reserved_6_6 : 1; uint32_t dllle : 1; /**< DLL Link Enable Enables Link initialization. If DLL Link Enable = 0, the PCI Express bus does not transmit InitFC DLLPs and does not establish a Link. */ uint32_t reserved_4_4 : 1; uint32_t ra : 1; /**< Reset Assert Triggers a recovery and forces the LTSSM to the Hot Reset state (downstream port only). */ uint32_t le : 1; /**< Loopback Enable Turns on loopback. */ uint32_t sd : 1; /**< Scramble Disable Turns off data scrambling. */ uint32_t omr : 1; /**< Other Message Request When software writes a `1' to this bit, the PCI Express bus transmits the Message contained in the Other Message register. */ #else uint32_t omr : 1; uint32_t sd : 1; uint32_t le : 1; uint32_t ra : 1; uint32_t reserved_4_4 : 1; uint32_t dllle : 1; uint32_t reserved_6_6 : 1; uint32_t flm : 1; uint32_t reserved_8_15 : 8; uint32_t lme : 6; uint32_t reserved_22_24 : 3; uint32_t eccrc : 1; uint32_t reserved_26_31 : 6; #endif } s; struct cvmx_pcieep_cfg452_s cn52xx; struct cvmx_pcieep_cfg452_s cn52xxp1; struct cvmx_pcieep_cfg452_s cn56xx; struct cvmx_pcieep_cfg452_s cn56xxp1; } cvmx_pcieep_cfg452_t; /** * cvmx_pcieep_cfg453 * * PCIE_CFG453 = Four hundred fifty-fourth 32-bits of PCIE type 0 config space * (Lane Skew Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg453_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dlld : 1; /**< Disable Lane-to-Lane Deskew Disables the internal Lane-to-Lane deskew logic. */ uint32_t reserved_26_30 : 5; uint32_t ack_nak : 1; /**< Ack/Nak Disable Prevents the PCI Express bus from sending Ack and Nak DLLPs. */ uint32_t fcd : 1; /**< Flow Control Disable Prevents the PCI Express bus from sending FC DLLPs. */ uint32_t ilst : 24; /**< Insert Lane Skew for Transmit Causes skew between lanes for test purposes. There are three bits per Lane. The value is in units of one symbol time. For example, the value 010b for a Lane forces a skew of two symbol times for that Lane. The maximum skew value for any Lane is 5 symbol times. */ #else uint32_t ilst : 24; uint32_t fcd : 1; uint32_t ack_nak : 1; uint32_t reserved_26_30 : 5; uint32_t dlld : 1; #endif } s; struct cvmx_pcieep_cfg453_s cn52xx; struct cvmx_pcieep_cfg453_s cn52xxp1; struct cvmx_pcieep_cfg453_s cn56xx; struct cvmx_pcieep_cfg453_s cn56xxp1; } cvmx_pcieep_cfg453_t; /** * cvmx_pcieep_cfg454 * * PCIE_CFG454 = Four hundred fifty-fifth 32-bits of PCIE type 0 config space * (Symbol Number Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg454_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_29_31 : 3; uint32_t tmfcwt : 5; /**< Timer Modifier for Flow Control Watchdog Timer Increases the timer value for the Flow Control watchdog timer, in increments of 16 clock cycles. */ uint32_t tmanlt : 5; /**< Timer Modifier for Ack/Nak Latency Timer Increases the timer value for the Ack/Nak latency timer, in increments of 64 clock cycles. */ uint32_t tmrt : 5; /**< Timer Modifier for Replay Timer Increases the timer value for the replay timer, in increments of 64 clock cycles. */ uint32_t reserved_11_13 : 3; uint32_t nskps : 3; /**< Number of SKP Symbols */ uint32_t reserved_4_7 : 4; uint32_t ntss : 4; /**< Number of TS Symbols Sets the number of TS identifier symbols that are sent in TS1 and TS2 ordered sets. */ #else uint32_t ntss : 4; uint32_t reserved_4_7 : 4; uint32_t nskps : 3; uint32_t reserved_11_13 : 3; uint32_t tmrt : 5; uint32_t tmanlt : 5; uint32_t tmfcwt : 5; uint32_t reserved_29_31 : 3; #endif } s; struct cvmx_pcieep_cfg454_s cn52xx; struct cvmx_pcieep_cfg454_s cn52xxp1; struct cvmx_pcieep_cfg454_s cn56xx; struct cvmx_pcieep_cfg454_s cn56xxp1; } cvmx_pcieep_cfg454_t; /** * cvmx_pcieep_cfg455 * * PCIE_CFG455 = Four hundred fifty-sixth 32-bits of PCIE type 0 config space * (Symbol Timer Register/Filter Mask Register 1) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg455_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t m_cfg0_filt : 1; /**< Mask filtering of received Configuration Requests (RC mode only) */ uint32_t m_io_filt : 1; /**< Mask filtering of received I/O Requests (RC mode only) */ uint32_t msg_ctrl : 1; /**< Message Control The application must not change this field. */ uint32_t m_cpl_ecrc_filt : 1; /**< Mask ECRC error filtering for Completions */ uint32_t m_ecrc_filt : 1; /**< Mask ECRC error filtering */ uint32_t m_cpl_len_err : 1; /**< Mask Length mismatch error for received Completions */ uint32_t m_cpl_attr_err : 1; /**< Mask Attributes mismatch error for received Completions */ uint32_t m_cpl_tc_err : 1; /**< Mask Traffic Class mismatch error for received Completions */ uint32_t m_cpl_fun_err : 1; /**< Mask function mismatch error for received Completions */ uint32_t m_cpl_rid_err : 1; /**< Mask Requester ID mismatch error for received Completions */ uint32_t m_cpl_tag_err : 1; /**< Mask Tag error rules for received Completions */ uint32_t m_lk_filt : 1; /**< Mask Locked Request filtering */ uint32_t m_cfg1_filt : 1; /**< Mask Type 1 Configuration Request filtering */ uint32_t m_bar_match : 1; /**< Mask BAR match filtering */ uint32_t m_pois_filt : 1; /**< Mask poisoned TLP filtering */ uint32_t m_fun : 1; /**< Mask function */ uint32_t dfcwt : 1; /**< Disable FC Watchdog Timer */ uint32_t reserved_11_14 : 4; uint32_t skpiv : 11; /**< SKP Interval Value */ #else uint32_t skpiv : 11; uint32_t reserved_11_14 : 4; uint32_t dfcwt : 1; uint32_t m_fun : 1; uint32_t m_pois_filt : 1; uint32_t m_bar_match : 1; uint32_t m_cfg1_filt : 1; uint32_t m_lk_filt : 1; uint32_t m_cpl_tag_err : 1; uint32_t m_cpl_rid_err : 1; uint32_t m_cpl_fun_err : 1; uint32_t m_cpl_tc_err : 1; uint32_t m_cpl_attr_err : 1; uint32_t m_cpl_len_err : 1; uint32_t m_ecrc_filt : 1; uint32_t m_cpl_ecrc_filt : 1; uint32_t msg_ctrl : 1; uint32_t m_io_filt : 1; uint32_t m_cfg0_filt : 1; #endif } s; struct cvmx_pcieep_cfg455_s cn52xx; struct cvmx_pcieep_cfg455_s cn52xxp1; struct cvmx_pcieep_cfg455_s cn56xx; struct cvmx_pcieep_cfg455_s cn56xxp1; } cvmx_pcieep_cfg455_t; /** * cvmx_pcieep_cfg456 * * PCIE_CFG456 = Four hundred fifty-seventh 32-bits of PCIE type 0 config space * (Filter Mask Register 2) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg456_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_2_31 : 30; uint32_t m_vend1_drp : 1; /**< Mask Vendor MSG Type 1 dropped silently */ uint32_t m_vend0_drp : 1; /**< Mask Vendor MSG Type 0 dropped with UR error reporting. */ #else uint32_t m_vend0_drp : 1; uint32_t m_vend1_drp : 1; uint32_t reserved_2_31 : 30; #endif } s; struct cvmx_pcieep_cfg456_s cn52xx; struct cvmx_pcieep_cfg456_s cn52xxp1; struct cvmx_pcieep_cfg456_s cn56xx; struct cvmx_pcieep_cfg456_s cn56xxp1; } cvmx_pcieep_cfg456_t; /** * cvmx_pcieep_cfg458 * * PCIE_CFG458 = Four hundred fifty-ninth 32-bits of PCIE type 0 config space * (Debug Register 0) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg458_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dbg_info_l32 : 32; /**< Debug Info Lower 32 Bits */ #else uint32_t dbg_info_l32 : 32; #endif } s; struct cvmx_pcieep_cfg458_s cn52xx; struct cvmx_pcieep_cfg458_s cn52xxp1; struct cvmx_pcieep_cfg458_s cn56xx; struct cvmx_pcieep_cfg458_s cn56xxp1; } cvmx_pcieep_cfg458_t; /** * cvmx_pcieep_cfg459 * * PCIE_CFG459 = Four hundred sixtieth 32-bits of PCIE type 0 config space * (Debug Register 1) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg459_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dbg_info_u32 : 32; /**< Debug Info Upper 32 Bits */ #else uint32_t dbg_info_u32 : 32; #endif } s; struct cvmx_pcieep_cfg459_s cn52xx; struct cvmx_pcieep_cfg459_s cn52xxp1; struct cvmx_pcieep_cfg459_s cn56xx; struct cvmx_pcieep_cfg459_s cn56xxp1; } cvmx_pcieep_cfg459_t; /** * cvmx_pcieep_cfg460 * * PCIE_CFG460 = Four hundred sixty-first 32-bits of PCIE type 0 config space * (Transmit Posted FC Credit Status) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg460_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_20_31 : 12; uint32_t tphfcc : 8; /**< Transmit Posted Header FC Credits The Posted Header credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ uint32_t tpdfcc : 12; /**< Transmit Posted Data FC Credits The Posted Data credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ #else uint32_t tpdfcc : 12; uint32_t tphfcc : 8; uint32_t reserved_20_31 : 12; #endif } s; struct cvmx_pcieep_cfg460_s cn52xx; struct cvmx_pcieep_cfg460_s cn52xxp1; struct cvmx_pcieep_cfg460_s cn56xx; struct cvmx_pcieep_cfg460_s cn56xxp1; } cvmx_pcieep_cfg460_t; /** * cvmx_pcieep_cfg461 * * PCIE_CFG461 = Four hundred sixty-second 32-bits of PCIE type 0 config space * (Transmit Non-Posted FC Credit Status) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg461_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_20_31 : 12; uint32_t tchfcc : 8; /**< Transmit Non-Posted Header FC Credits The Non-Posted Header credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ uint32_t tcdfcc : 12; /**< Transmit Non-Posted Data FC Credits The Non-Posted Data credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ #else uint32_t tcdfcc : 12; uint32_t tchfcc : 8; uint32_t reserved_20_31 : 12; #endif } s; struct cvmx_pcieep_cfg461_s cn52xx; struct cvmx_pcieep_cfg461_s cn52xxp1; struct cvmx_pcieep_cfg461_s cn56xx; struct cvmx_pcieep_cfg461_s cn56xxp1; } cvmx_pcieep_cfg461_t; /** * cvmx_pcieep_cfg462 * * PCIE_CFG462 = Four hundred sixty-third 32-bits of PCIE type 0 config space * (Transmit Completion FC Credit Status ) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg462_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_20_31 : 12; uint32_t tchfcc : 8; /**< Transmit Completion Header FC Credits The Completion Header credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ uint32_t tcdfcc : 12; /**< Transmit Completion Data FC Credits The Completion Data credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ #else uint32_t tcdfcc : 12; uint32_t tchfcc : 8; uint32_t reserved_20_31 : 12; #endif } s; struct cvmx_pcieep_cfg462_s cn52xx; struct cvmx_pcieep_cfg462_s cn52xxp1; struct cvmx_pcieep_cfg462_s cn56xx; struct cvmx_pcieep_cfg462_s cn56xxp1; } cvmx_pcieep_cfg462_t; /** * cvmx_pcieep_cfg463 * * PCIE_CFG463 = Four hundred sixty-fourth 32-bits of PCIE type 0 config space * (Queue Status) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg463_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_3_31 : 29; uint32_t rqne : 1; /**< Received Queue Not Empty Indicates there is data in one or more of the receive buffers. */ uint32_t trbne : 1; /**< Transmit Retry Buffer Not Empty Indicates that there is data in the transmit retry buffer. */ uint32_t rtlpfccnr : 1; /**< Received TLP FC Credits Not Returned Indicates that the PCI Express bus has sent a TLP but has not yet received an UpdateFC DLLP indicating that the credits for that TLP have been restored by the receiver at the other end of the Link. */ #else uint32_t rtlpfccnr : 1; uint32_t trbne : 1; uint32_t rqne : 1; uint32_t reserved_3_31 : 29; #endif } s; struct cvmx_pcieep_cfg463_s cn52xx; struct cvmx_pcieep_cfg463_s cn52xxp1; struct cvmx_pcieep_cfg463_s cn56xx; struct cvmx_pcieep_cfg463_s cn56xxp1; } cvmx_pcieep_cfg463_t; /** * cvmx_pcieep_cfg464 * * PCIE_CFG464 = Four hundred sixty-fifth 32-bits of PCIE type 0 config space * (VC Transmit Arbitration Register 1) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg464_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t wrr_vc3 : 8; /**< WRR Weight for VC3 */ uint32_t wrr_vc2 : 8; /**< WRR Weight for VC2 */ uint32_t wrr_vc1 : 8; /**< WRR Weight for VC1 */ uint32_t wrr_vc0 : 8; /**< WRR Weight for VC0 */ #else uint32_t wrr_vc0 : 8; uint32_t wrr_vc1 : 8; uint32_t wrr_vc2 : 8; uint32_t wrr_vc3 : 8; #endif } s; struct cvmx_pcieep_cfg464_s cn52xx; struct cvmx_pcieep_cfg464_s cn52xxp1; struct cvmx_pcieep_cfg464_s cn56xx; struct cvmx_pcieep_cfg464_s cn56xxp1; } cvmx_pcieep_cfg464_t; /** * cvmx_pcieep_cfg465 * * PCIE_CFG465 = Four hundred sixty-sixth 32-bits of PCIE type 0 config space * (VC Transmit Arbitration Register 2) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg465_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t wrr_vc7 : 8; /**< WRR Weight for VC7 */ uint32_t wrr_vc6 : 8; /**< WRR Weight for VC6 */ uint32_t wrr_vc5 : 8; /**< WRR Weight for VC5 */ uint32_t wrr_vc4 : 8; /**< WRR Weight for VC4 */ #else uint32_t wrr_vc4 : 8; uint32_t wrr_vc5 : 8; uint32_t wrr_vc6 : 8; uint32_t wrr_vc7 : 8; #endif } s; struct cvmx_pcieep_cfg465_s cn52xx; struct cvmx_pcieep_cfg465_s cn52xxp1; struct cvmx_pcieep_cfg465_s cn56xx; struct cvmx_pcieep_cfg465_s cn56xxp1; } cvmx_pcieep_cfg465_t; /** * cvmx_pcieep_cfg466 * * PCIE_CFG466 = Four hundred sixty-seventh 32-bits of PCIE type 0 config space * (VC0 Posted Receive Queue Control) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg466_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t rx_queue_order : 1; /**< VC Ordering for Receive Queues Determines the VC ordering rule for the receive queues, used only in the segmented-buffer configuration, writable through the DBI: o 1: Strict ordering, higher numbered VCs have higher priority o 0: Round robin However, the application must not change this field. */ uint32_t type_ordering : 1; /**< TLP Type Ordering for VC0 Determines the TLP type ordering rule for VC0 receive queues, used only in the segmented-buffer configuration, writable through the DBI: o 1: Ordering of received TLPs follows the rules in PCI Express Base Specification, Revision 1.1 o 0: Strict ordering for received TLPs: Posted, then Completion, then Non-Posted However, the application must not change this field. */ uint32_t reserved_24_29 : 6; uint32_t queue_mode : 3; /**< VC0 Posted TLP Queue Mode The operating mode of the Posted receive queue for VC0, used only in the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. Only one bit can be set at a time: o Bit 23: Bypass o Bit 22: Cut-through o Bit 21: Store-and-forward */ uint32_t reserved_20_20 : 1; uint32_t header_credits : 8; /**< VC0 Posted Header Credits The number of initial Posted header credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ uint32_t data_credits : 12; /**< VC0 Posted Data Credits The number of initial Posted data credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ #else uint32_t data_credits : 12; uint32_t header_credits : 8; uint32_t reserved_20_20 : 1; uint32_t queue_mode : 3; uint32_t reserved_24_29 : 6; uint32_t type_ordering : 1; uint32_t rx_queue_order : 1; #endif } s; struct cvmx_pcieep_cfg466_s cn52xx; struct cvmx_pcieep_cfg466_s cn52xxp1; struct cvmx_pcieep_cfg466_s cn56xx; struct cvmx_pcieep_cfg466_s cn56xxp1; } cvmx_pcieep_cfg466_t; /** * cvmx_pcieep_cfg467 * * PCIE_CFG467 = Four hundred sixty-eighth 32-bits of PCIE type 0 config space * (VC0 Non-Posted Receive Queue Control) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg467_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_24_31 : 8; uint32_t queue_mode : 3; /**< VC0 Non-Posted TLP Queue Mode The operating mode of the Non-Posted receive queue for VC0, used only in the segmented-buffer configuration, writable through the DBI. Only one bit can be set at a time: o Bit 23: Bypass o Bit 22: Cut-through o Bit 21: Store-and-forward However, the application must not change this field. */ uint32_t reserved_20_20 : 1; uint32_t header_credits : 8; /**< VC0 Non-Posted Header Credits The number of initial Non-Posted header credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ uint32_t data_credits : 12; /**< VC0 Non-Posted Data Credits The number of initial Non-Posted data credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ #else uint32_t data_credits : 12; uint32_t header_credits : 8; uint32_t reserved_20_20 : 1; uint32_t queue_mode : 3; uint32_t reserved_24_31 : 8; #endif } s; struct cvmx_pcieep_cfg467_s cn52xx; struct cvmx_pcieep_cfg467_s cn52xxp1; struct cvmx_pcieep_cfg467_s cn56xx; struct cvmx_pcieep_cfg467_s cn56xxp1; } cvmx_pcieep_cfg467_t; /** * cvmx_pcieep_cfg468 * * PCIE_CFG468 = Four hundred sixty-ninth 32-bits of PCIE type 0 config space * (VC0 Completion Receive Queue Control) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg468_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_24_31 : 8; uint32_t queue_mode : 3; /**< VC0 Completion TLP Queue Mode The operating mode of the Completion receive queue for VC0, used only in the segmented-buffer configuration, writable through the DBI. Only one bit can be set at a time: o Bit 23: Bypass o Bit 22: Cut-through o Bit 21: Store-and-forward However, the application must not change this field. */ uint32_t reserved_20_20 : 1; uint32_t header_credits : 8; /**< VC0 Completion Header Credits The number of initial Completion header credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ uint32_t data_credits : 12; /**< VC0 Completion Data Credits The number of initial Completion data credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ #else uint32_t data_credits : 12; uint32_t header_credits : 8; uint32_t reserved_20_20 : 1; uint32_t queue_mode : 3; uint32_t reserved_24_31 : 8; #endif } s; struct cvmx_pcieep_cfg468_s cn52xx; struct cvmx_pcieep_cfg468_s cn52xxp1; struct cvmx_pcieep_cfg468_s cn56xx; struct cvmx_pcieep_cfg468_s cn56xxp1; } cvmx_pcieep_cfg468_t; /** * cvmx_pcieep_cfg490 * * PCIE_CFG490 = Four hundred ninety-first 32-bits of PCIE type 0 config space * (VC0 Posted Buffer Depth) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg490_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_26_31 : 6; uint32_t header_depth : 10; /**< VC0 Posted Header Queue Depth Sets the number of entries in the Posted header queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ uint32_t reserved_14_15 : 2; uint32_t data_depth : 14; /**< VC0 Posted Data Queue Depth Sets the number of entries in the Posted data queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ #else uint32_t data_depth : 14; uint32_t reserved_14_15 : 2; uint32_t header_depth : 10; uint32_t reserved_26_31 : 6; #endif } s; struct cvmx_pcieep_cfg490_s cn52xx; struct cvmx_pcieep_cfg490_s cn52xxp1; struct cvmx_pcieep_cfg490_s cn56xx; struct cvmx_pcieep_cfg490_s cn56xxp1; } cvmx_pcieep_cfg490_t; /** * cvmx_pcieep_cfg491 * * PCIE_CFG491 = Four hundred ninety-second 32-bits of PCIE type 0 config space * (VC0 Non-Posted Buffer Depth) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg491_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_26_31 : 6; uint32_t header_depth : 10; /**< VC0 Non-Posted Header Queue Depth Sets the number of entries in the Non-Posted header queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ uint32_t reserved_14_15 : 2; uint32_t data_depth : 14; /**< VC0 Non-Posted Data Queue Depth Sets the number of entries in the Non-Posted data queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ #else uint32_t data_depth : 14; uint32_t reserved_14_15 : 2; uint32_t header_depth : 10; uint32_t reserved_26_31 : 6; #endif } s; struct cvmx_pcieep_cfg491_s cn52xx; struct cvmx_pcieep_cfg491_s cn52xxp1; struct cvmx_pcieep_cfg491_s cn56xx; struct cvmx_pcieep_cfg491_s cn56xxp1; } cvmx_pcieep_cfg491_t; /** * cvmx_pcieep_cfg492 * * PCIE_CFG492 = Four hundred ninety-third 32-bits of PCIE type 0 config space * (VC0 Completion Buffer Depth) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg492_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_26_31 : 6; uint32_t header_depth : 10; /**< VC0 Completion Header Queue Depth Sets the number of entries in the Completion header queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ uint32_t reserved_14_15 : 2; uint32_t data_depth : 14; /**< VC0 Completion Data Queue Depth Sets the number of entries in the Completion data queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ #else uint32_t data_depth : 14; uint32_t reserved_14_15 : 2; uint32_t header_depth : 10; uint32_t reserved_26_31 : 6; #endif } s; struct cvmx_pcieep_cfg492_s cn52xx; struct cvmx_pcieep_cfg492_s cn52xxp1; struct cvmx_pcieep_cfg492_s cn56xx; struct cvmx_pcieep_cfg492_s cn56xxp1; } cvmx_pcieep_cfg492_t; /** * cvmx_pcieep_cfg516 * * PCIE_CFG516 = Five hundred seventeenth 32-bits of PCIE type 0 config space * (PHY Status Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg516_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t phy_stat : 32; /**< PHY Status */ #else uint32_t phy_stat : 32; #endif } s; struct cvmx_pcieep_cfg516_s cn52xx; struct cvmx_pcieep_cfg516_s cn52xxp1; struct cvmx_pcieep_cfg516_s cn56xx; struct cvmx_pcieep_cfg516_s cn56xxp1; } cvmx_pcieep_cfg516_t; /** * cvmx_pcieep_cfg517 * * PCIE_CFG517 = Five hundred eighteenth 32-bits of PCIE type 0 config space * (PHY Control Register) */ typedef union { uint32_t u32; struct cvmx_pcieep_cfg517_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t phy_ctrl : 32; /**< PHY Control */ #else uint32_t phy_ctrl : 32; #endif } s; struct cvmx_pcieep_cfg517_s cn52xx; struct cvmx_pcieep_cfg517_s cn52xxp1; struct cvmx_pcieep_cfg517_s cn56xx; struct cvmx_pcieep_cfg517_s cn56xxp1; } cvmx_pcieep_cfg517_t; /** * cvmx_pcierc#_cfg000 * * PCIE_CFG000 = First 32-bits of PCIE type 1 config space (Device ID and Vendor ID Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg000_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t devid : 16; /**< Device ID, writable through the DBI However, the application must not change this field. */ uint32_t vendid : 16; /**< Vendor ID, writable through the DBI However, the application must not change this field. */ #else uint32_t vendid : 16; uint32_t devid : 16; #endif } s; struct cvmx_pciercx_cfg000_s cn52xx; struct cvmx_pciercx_cfg000_s cn52xxp1; struct cvmx_pciercx_cfg000_s cn56xx; struct cvmx_pciercx_cfg000_s cn56xxp1; } cvmx_pciercx_cfg000_t; /** * cvmx_pcierc#_cfg001 * * PCIE_CFG001 = Second 32-bits of PCIE type 1 config space (Command/Status Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg001_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dpe : 1; /**< Detected Parity Error */ uint32_t sse : 1; /**< Signaled System Error */ uint32_t rma : 1; /**< Received Master Abort */ uint32_t rta : 1; /**< Received Target Abort */ uint32_t sta : 1; /**< Signaled Target Abort */ uint32_t devt : 2; /**< DEVSEL Timing Not applicable for PCI Express. Hardwired to 0. */ uint32_t mdpe : 1; /**< Master Data Parity Error */ uint32_t fbb : 1; /**< Fast Back-to-Back Capable Not applicable for PCI Express. Hardwired to 0. */ uint32_t reserved_22_22 : 1; uint32_t m66 : 1; /**< 66 MHz Capable Not applicable for PCI Express. Hardwired to 0. */ uint32_t cl : 1; /**< Capabilities List Indicates presence of an extended capability item. Hardwired to 1. */ uint32_t i_stat : 1; /**< INTx Status */ uint32_t reserved_11_18 : 8; uint32_t i_dis : 1; /**< INTx Assertion Disable */ uint32_t fbbe : 1; /**< Fast Back-to-Back Enable Not applicable for PCI Express. Must be hardwired to 0. */ uint32_t see : 1; /**< SERR# Enable */ uint32_t ids_wcc : 1; /**< IDSEL Stepping/Wait Cycle Control Not applicable for PCI Express. Must be hardwired to 0 */ uint32_t per : 1; /**< Parity Error Response */ uint32_t vps : 1; /**< VGA Palette Snoop Not applicable for PCI Express. Must be hardwired to 0. */ uint32_t mwice : 1; /**< Memory Write and Invalidate Not applicable for PCI Express. Must be hardwired to 0. */ uint32_t scse : 1; /**< Special Cycle Enable Not applicable for PCI Express. Must be hardwired to 0. */ uint32_t me : 1; /**< Bus Master Enable */ uint32_t msae : 1; /**< Memory Space Enable */ uint32_t isae : 1; /**< I/O Space Enable */ #else uint32_t isae : 1; uint32_t msae : 1; uint32_t me : 1; uint32_t scse : 1; uint32_t mwice : 1; uint32_t vps : 1; uint32_t per : 1; uint32_t ids_wcc : 1; uint32_t see : 1; uint32_t fbbe : 1; uint32_t i_dis : 1; uint32_t reserved_11_18 : 8; uint32_t i_stat : 1; uint32_t cl : 1; uint32_t m66 : 1; uint32_t reserved_22_22 : 1; uint32_t fbb : 1; uint32_t mdpe : 1; uint32_t devt : 2; uint32_t sta : 1; uint32_t rta : 1; uint32_t rma : 1; uint32_t sse : 1; uint32_t dpe : 1; #endif } s; struct cvmx_pciercx_cfg001_s cn52xx; struct cvmx_pciercx_cfg001_s cn52xxp1; struct cvmx_pciercx_cfg001_s cn56xx; struct cvmx_pciercx_cfg001_s cn56xxp1; } cvmx_pciercx_cfg001_t; /** * cvmx_pcierc#_cfg002 * * PCIE_CFG002 = Third 32-bits of PCIE type 1 config space (Revision ID/Class Code Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg002_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t bcc : 8; /**< Base Class Code, writable through the DBI However, the application must not change this field. */ uint32_t sc : 8; /**< Subclass Code, writable through the DBI However, the application must not change this field. */ uint32_t pi : 8; /**< Programming Interface, writable through the DBI However, the application must not change this field. */ uint32_t rid : 8; /**< Revision ID, writable through the DBI However, the application must not change this field. */ #else uint32_t rid : 8; uint32_t pi : 8; uint32_t sc : 8; uint32_t bcc : 8; #endif } s; struct cvmx_pciercx_cfg002_s cn52xx; struct cvmx_pciercx_cfg002_s cn52xxp1; struct cvmx_pciercx_cfg002_s cn56xx; struct cvmx_pciercx_cfg002_s cn56xxp1; } cvmx_pciercx_cfg002_t; /** * cvmx_pcierc#_cfg003 * * PCIE_CFG003 = Fourth 32-bits of PCIE type 1 config space (Cache Line Size/Master Latency Timer/Header Type Register/BIST Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg003_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t bist : 8; /**< The BIST register functions are not supported. All 8 bits of the BIST register are hardwired to 0. */ uint32_t mfd : 1; /**< Multi Function Device The Multi Function Device bit is writable through the DBI. However, this is a single function device. Therefore, the application must not write a 1 to this bit. */ uint32_t chf : 7; /**< Configuration Header Format Hardwired to 1. */ uint32_t lt : 8; /**< Master Latency Timer Not applicable for PCI Express, hardwired to 0. */ uint32_t cls : 8; /**< Cache Line Size The Cache Line Size register is RW for legacy compatibility purposes and is not applicable to PCI Express device functionality. */ #else uint32_t cls : 8; uint32_t lt : 8; uint32_t chf : 7; uint32_t mfd : 1; uint32_t bist : 8; #endif } s; struct cvmx_pciercx_cfg003_s cn52xx; struct cvmx_pciercx_cfg003_s cn52xxp1; struct cvmx_pciercx_cfg003_s cn56xx; struct cvmx_pciercx_cfg003_s cn56xxp1; } cvmx_pciercx_cfg003_t; /** * cvmx_pcierc#_cfg004 * * PCIE_CFG004 = Fifth 32-bits of PCIE type 1 config space (Base Address Register 0 - Low) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg004_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_0_31 : 32; #else uint32_t reserved_0_31 : 32; #endif } s; struct cvmx_pciercx_cfg004_s cn52xx; struct cvmx_pciercx_cfg004_s cn52xxp1; struct cvmx_pciercx_cfg004_s cn56xx; struct cvmx_pciercx_cfg004_s cn56xxp1; } cvmx_pciercx_cfg004_t; /** * cvmx_pcierc#_cfg005 * * PCIE_CFG005 = Sixth 32-bits of PCIE type 1 config space (Base Address Register 0 - High) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg005_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_0_31 : 32; #else uint32_t reserved_0_31 : 32; #endif } s; struct cvmx_pciercx_cfg005_s cn52xx; struct cvmx_pciercx_cfg005_s cn52xxp1; struct cvmx_pciercx_cfg005_s cn56xx; struct cvmx_pciercx_cfg005_s cn56xxp1; } cvmx_pciercx_cfg005_t; /** * cvmx_pcierc#_cfg006 * * PCIE_CFG006 = Seventh 32-bits of PCIE type 1 config space (Bus Number Registers) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg006_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t slt : 8; /**< Secondary Latency Timer Not applicable to PCI Express, hardwired to 0x00. */ uint32_t subbnum : 8; /**< Subordinate Bus Number */ uint32_t sbnum : 8; /**< Secondary Bus Number */ uint32_t pbnum : 8; /**< Primary Bus Number */ #else uint32_t pbnum : 8; uint32_t sbnum : 8; uint32_t subbnum : 8; uint32_t slt : 8; #endif } s; struct cvmx_pciercx_cfg006_s cn52xx; struct cvmx_pciercx_cfg006_s cn52xxp1; struct cvmx_pciercx_cfg006_s cn56xx; struct cvmx_pciercx_cfg006_s cn56xxp1; } cvmx_pciercx_cfg006_t; /** * cvmx_pcierc#_cfg007 * * PCIE_CFG007 = Eighth 32-bits of PCIE type 1 config space (IO Base and IO Limit/Secondary Status Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg007_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dpe : 1; /**< Detected Parity Error */ uint32_t sse : 1; /**< Signaled System Error */ uint32_t rma : 1; /**< Received Master Abort */ uint32_t rta : 1; /**< Received Target Abort */ uint32_t sta : 1; /**< Signaled Target Abort */ uint32_t devt : 2; /**< DEVSEL Timing Not applicable for PCI Express. Hardwired to 0. */ uint32_t mdpe : 1; /**< Master Data Parity Error */ uint32_t fbb : 1; /**< Fast Back-to-Back Capable Not applicable for PCI Express. Hardwired to 0. */ uint32_t reserved_22_22 : 1; uint32_t m66 : 1; /**< 66 MHz Capable Not applicable for PCI Express. Hardwired to 0. */ uint32_t reserved_16_20 : 5; uint32_t lio_limi : 4; /**< I/O Space Limit */ uint32_t reserved_9_11 : 3; uint32_t io32b : 1; /**< 32-Bit I/O Space */ uint32_t lio_base : 4; /**< I/O Space Base */ uint32_t reserved_1_3 : 3; uint32_t io32a : 1; /**< 32-Bit I/O Space o 0 = 16-bit I/O addressing o 1 = 32-bit I/O addressing This bit is writable through the DBI. When the application writes to this bit through the DBI, the same value is written to bit 8 of this register. */ #else uint32_t io32a : 1; uint32_t reserved_1_3 : 3; uint32_t lio_base : 4; uint32_t io32b : 1; uint32_t reserved_9_11 : 3; uint32_t lio_limi : 4; uint32_t reserved_16_20 : 5; uint32_t m66 : 1; uint32_t reserved_22_22 : 1; uint32_t fbb : 1; uint32_t mdpe : 1; uint32_t devt : 2; uint32_t sta : 1; uint32_t rta : 1; uint32_t rma : 1; uint32_t sse : 1; uint32_t dpe : 1; #endif } s; struct cvmx_pciercx_cfg007_s cn52xx; struct cvmx_pciercx_cfg007_s cn52xxp1; struct cvmx_pciercx_cfg007_s cn56xx; struct cvmx_pciercx_cfg007_s cn56xxp1; } cvmx_pciercx_cfg007_t; /** * cvmx_pcierc#_cfg008 * * PCIE_CFG008 = Ninth 32-bits of PCIE type 1 config space (Memory Base and Memory Limit Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg008_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ml_addr : 12; /**< Memory Limit Address */ uint32_t reserved_16_19 : 4; uint32_t mb_addr : 12; /**< Memory Base Address */ uint32_t reserved_0_3 : 4; #else uint32_t reserved_0_3 : 4; uint32_t mb_addr : 12; uint32_t reserved_16_19 : 4; uint32_t ml_addr : 12; #endif } s; struct cvmx_pciercx_cfg008_s cn52xx; struct cvmx_pciercx_cfg008_s cn52xxp1; struct cvmx_pciercx_cfg008_s cn56xx; struct cvmx_pciercx_cfg008_s cn56xxp1; } cvmx_pciercx_cfg008_t; /** * cvmx_pcierc#_cfg009 * * PCIE_CFG009 = Tenth 32-bits of PCIE type 1 config space (Prefetchable Memory Base and Limit Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg009_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lmem_limit : 12; /**< Upper 12 bits of 32-bit Prefetchable Memory End Address */ uint32_t reserved_17_19 : 3; uint32_t mem64b : 1; /**< 64-Bit Memory Addressing o 0 = 32-bit memory addressing o 1 = 64-bit memory addressing */ uint32_t lmem_base : 12; /**< Upper 12 bits of 32-bit Prefetchable Memory Start Address */ uint32_t reserved_1_3 : 3; uint32_t mem64a : 1; /**< 64-Bit Memory Addressing o 0 = 32-bit memory addressing o 1 = 64-bit memory addressing This bit is writable through the DBI. When the application writes to this bit through the DBI, the same value is written to bit 16 of this register. */ #else uint32_t mem64a : 1; uint32_t reserved_1_3 : 3; uint32_t lmem_base : 12; uint32_t mem64b : 1; uint32_t reserved_17_19 : 3; uint32_t lmem_limit : 12; #endif } s; struct cvmx_pciercx_cfg009_s cn52xx; struct cvmx_pciercx_cfg009_s cn52xxp1; struct cvmx_pciercx_cfg009_s cn56xx; struct cvmx_pciercx_cfg009_s cn56xxp1; } cvmx_pciercx_cfg009_t; /** * cvmx_pcierc#_cfg010 * * PCIE_CFG010 = Eleventh 32-bits of PCIE type 1 config space (Prefetchable Base Upper 32 Bits Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg010_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t umem_base : 32; /**< Upper 32 Bits of Base Address of Prefetchable Memory Space Used only when 64-bit prefetchable memory addressing is enabled. */ #else uint32_t umem_base : 32; #endif } s; struct cvmx_pciercx_cfg010_s cn52xx; struct cvmx_pciercx_cfg010_s cn52xxp1; struct cvmx_pciercx_cfg010_s cn56xx; struct cvmx_pciercx_cfg010_s cn56xxp1; } cvmx_pciercx_cfg010_t; /** * cvmx_pcierc#_cfg011 * * PCIE_CFG011 = Twelfth 32-bits of PCIE type 1 config space (Prefetchable Limit Upper 32 Bits Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg011_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t umem_limit : 32; /**< Upper 32 Bits of Limit Address of Prefetchable Memory Space Used only when 64-bit prefetchable memory addressing is enabled. */ #else uint32_t umem_limit : 32; #endif } s; struct cvmx_pciercx_cfg011_s cn52xx; struct cvmx_pciercx_cfg011_s cn52xxp1; struct cvmx_pciercx_cfg011_s cn56xx; struct cvmx_pciercx_cfg011_s cn56xxp1; } cvmx_pciercx_cfg011_t; /** * cvmx_pcierc#_cfg012 * * PCIE_CFG012 = Thirteenth 32-bits of PCIE type 1 config space (IO Base and Limit Upper 16 Bits Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg012_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t uio_limit : 16; /**< Upper 16 Bits of I/O Limit (if 32-bit I/O decoding is supported for devices on the secondary side) */ uint32_t uio_base : 16; /**< Upper 16 Bits of I/O Base (if 32-bit I/O decoding is supported for devices on the secondary side) */ #else uint32_t uio_base : 16; uint32_t uio_limit : 16; #endif } s; struct cvmx_pciercx_cfg012_s cn52xx; struct cvmx_pciercx_cfg012_s cn52xxp1; struct cvmx_pciercx_cfg012_s cn56xx; struct cvmx_pciercx_cfg012_s cn56xxp1; } cvmx_pciercx_cfg012_t; /** * cvmx_pcierc#_cfg013 * * PCIE_CFG013 = Fourteenth 32-bits of PCIE type 1 config space (Capability Pointer Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg013_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_8_31 : 24; uint32_t cp : 8; /**< First Capability Pointer. Points to Power Management Capability structure by default, writable through the DBI However, the application must not change this field. */ #else uint32_t cp : 8; uint32_t reserved_8_31 : 24; #endif } s; struct cvmx_pciercx_cfg013_s cn52xx; struct cvmx_pciercx_cfg013_s cn52xxp1; struct cvmx_pciercx_cfg013_s cn56xx; struct cvmx_pciercx_cfg013_s cn56xxp1; } cvmx_pciercx_cfg013_t; /** * cvmx_pcierc#_cfg014 * * PCIE_CFG014 = Fifteenth 32-bits of PCIE type 1 config space (Expansion ROM Base Address Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg014_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_0_31 : 32; #else uint32_t reserved_0_31 : 32; #endif } s; struct cvmx_pciercx_cfg014_s cn52xx; struct cvmx_pciercx_cfg014_s cn52xxp1; struct cvmx_pciercx_cfg014_s cn56xx; struct cvmx_pciercx_cfg014_s cn56xxp1; } cvmx_pciercx_cfg014_t; /** * cvmx_pcierc#_cfg015 * * PCIE_CFG015 = Sixteenth 32-bits of PCIE type 1 config space (Interrupt Line Register/Interrupt Pin/Bridge Control Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg015_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_28_31 : 4; uint32_t dtsees : 1; /**< Discard Timer SERR Enable Status Not applicable to PCI Express, hardwired to 0. */ uint32_t dts : 1; /**< Discard Timer Status Not applicable to PCI Express, hardwired to 0. */ uint32_t sdt : 1; /**< Secondary Discard Timer Not applicable to PCI Express, hardwired to 0. */ uint32_t pdt : 1; /**< Primary Discard Timer Not applicable to PCI Express, hardwired to 0. */ uint32_t fbbe : 1; /**< Fast Back-to-Back Transactions Enable Not applicable to PCI Express, hardwired to 0. */ uint32_t sbrst : 1; /**< Secondary Bus Reset Hot reset. Causes TS1s with the hot reset bit to be sent to the link partner. When set, SW should wait 2ms before clearing. The link partner normally responds by sending TS1s with the hot reset bit set, which will cause a link down event - refer to "PCIe Link-Down Reset in RC Mode" section. */ uint32_t mam : 1; /**< Master Abort Mode Not applicable to PCI Express, hardwired to 0. */ uint32_t vga16d : 1; /**< VGA 16-Bit Decode */ uint32_t vgae : 1; /**< VGA Enable */ uint32_t isae : 1; /**< ISA Enable */ uint32_t see : 1; /**< SERR Enable */ uint32_t pere : 1; /**< Parity Error Response Enable */ uint32_t inta : 8; /**< Interrupt Pin Identifies the legacy interrupt Message that the device (or device function) uses. The Interrupt Pin register is writable through the DBI. In a single-function configuration, only INTA is used. Therefore, the application must not change this field. */ uint32_t il : 8; /**< Interrupt Line */ #else uint32_t il : 8; uint32_t inta : 8; uint32_t pere : 1; uint32_t see : 1; uint32_t isae : 1; uint32_t vgae : 1; uint32_t vga16d : 1; uint32_t mam : 1; uint32_t sbrst : 1; uint32_t fbbe : 1; uint32_t pdt : 1; uint32_t sdt : 1; uint32_t dts : 1; uint32_t dtsees : 1; uint32_t reserved_28_31 : 4; #endif } s; struct cvmx_pciercx_cfg015_s cn52xx; struct cvmx_pciercx_cfg015_s cn52xxp1; struct cvmx_pciercx_cfg015_s cn56xx; struct cvmx_pciercx_cfg015_s cn56xxp1; } cvmx_pciercx_cfg015_t; /** * cvmx_pcierc#_cfg016 * * PCIE_CFG016 = Seventeenth 32-bits of PCIE type 1 config space * (Power Management Capability ID/ * Power Management Next Item Pointer/ * Power Management Capabilities Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg016_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pmes : 5; /**< PME_Support A value of 0 for any bit indicates that the device (or function) is not capable of generating PME Messages while in that power state: o Bit 11: If set, PME Messages can be generated from D0 o Bit 12: If set, PME Messages can be generated from D1 o Bit 13: If set, PME Messages can be generated from D2 o Bit 14: If set, PME Messages can be generated from D3hot o Bit 15: If set, PME Messages can be generated from D3cold The PME_Support field is writable through the DBI. However, the application must not change this field. */ uint32_t d2s : 1; /**< D2 Support, writable through the DBI However, the application must not change this field. */ uint32_t d1s : 1; /**< D1 Support, writable through the DBI However, the application must not change this field. */ uint32_t auxc : 3; /**< AUX Current, writable through the DBI However, the application must not change this field. */ uint32_t dsi : 1; /**< Device Specific Initialization (DSI), writable through the DBI However, the application must not change this field. */ uint32_t reserved_20_20 : 1; uint32_t pme_clock : 1; /**< PME Clock, hardwired to 0 */ uint32_t pmsv : 3; /**< Power Management Specification Version, writable through the DBI However, the application must not change this field. */ uint32_t ncp : 8; /**< Next Capability Pointer Points to the MSI capabilities by default, writable through the DBI. */ uint32_t pmcid : 8; /**< Power Management Capability ID */ #else uint32_t pmcid : 8; uint32_t ncp : 8; uint32_t pmsv : 3; uint32_t pme_clock : 1; uint32_t reserved_20_20 : 1; uint32_t dsi : 1; uint32_t auxc : 3; uint32_t d1s : 1; uint32_t d2s : 1; uint32_t pmes : 5; #endif } s; struct cvmx_pciercx_cfg016_s cn52xx; struct cvmx_pciercx_cfg016_s cn52xxp1; struct cvmx_pciercx_cfg016_s cn56xx; struct cvmx_pciercx_cfg016_s cn56xxp1; } cvmx_pciercx_cfg016_t; /** * cvmx_pcierc#_cfg017 * * PCIE_CFG017 = Eighteenth 32-bits of PCIE type 1 config space (Power Management Control and Status Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg017_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pmdia : 8; /**< Data register for additional information (not supported) */ uint32_t bpccee : 1; /**< Bus Power/Clock Control Enable, hardwired to 0 */ uint32_t bd3h : 1; /**< B2/B3 Support, hardwired to 0 */ uint32_t reserved_16_21 : 6; uint32_t pmess : 1; /**< PME Status Indicates if a previously enabled PME event occurred or not. */ uint32_t pmedsia : 2; /**< Data Scale (not supported) */ uint32_t pmds : 4; /**< Data Select (not supported) */ uint32_t pmeens : 1; /**< PME Enable A value of 1 indicates that the device is enabled to generate PME. */ uint32_t reserved_4_7 : 4; uint32_t nsr : 1; /**< No Soft Reset, writable through the DBI However, the application must not change this field. */ uint32_t reserved_2_2 : 1; uint32_t ps : 2; /**< Power State Controls the device power state: o 00b: D0 o 01b: D1 o 10b: D2 o 11b: D3 The written value is ignored if the specific state is not supported. */ #else uint32_t ps : 2; uint32_t reserved_2_2 : 1; uint32_t nsr : 1; uint32_t reserved_4_7 : 4; uint32_t pmeens : 1; uint32_t pmds : 4; uint32_t pmedsia : 2; uint32_t pmess : 1; uint32_t reserved_16_21 : 6; uint32_t bd3h : 1; uint32_t bpccee : 1; uint32_t pmdia : 8; #endif } s; struct cvmx_pciercx_cfg017_s cn52xx; struct cvmx_pciercx_cfg017_s cn52xxp1; struct cvmx_pciercx_cfg017_s cn56xx; struct cvmx_pciercx_cfg017_s cn56xxp1; } cvmx_pciercx_cfg017_t; /** * cvmx_pcierc#_cfg020 * * PCIE_CFG020 = Twenty-first 32-bits of PCIE type 1 config space * (MSI Capability ID/ * MSI Next Item Pointer/ * MSI Control Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg020_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_24_31 : 8; uint32_t m64 : 1; /**< 64-bit Address Capable, writable through the DBI However, the application must not change this field. */ uint32_t mme : 3; /**< Multiple Message Enabled Indicates that multiple Message mode is enabled by system software. The number of Messages enabled must be less than or equal to the Multiple Message Capable value. */ uint32_t mmc : 3; /**< Multiple Message Capable, writable through the DBI However, the application must not change this field. */ uint32_t msien : 1; /**< MSI Enabled When set, INTx must be disabled. This bit must never be set, as internal-MSI is not supported in RC mode. (Note that this has no effect on external MSI, which will be commonly used in RC mode.) */ uint32_t ncp : 8; /**< Next Capability Pointer Points to PCI Express Capabilities by default, writable through the DBI. However, the application must not change this field. */ uint32_t msicid : 8; /**< MSI Capability ID */ #else uint32_t msicid : 8; uint32_t ncp : 8; uint32_t msien : 1; uint32_t mmc : 3; uint32_t mme : 3; uint32_t m64 : 1; uint32_t reserved_24_31 : 8; #endif } s; struct cvmx_pciercx_cfg020_s cn52xx; struct cvmx_pciercx_cfg020_s cn52xxp1; struct cvmx_pciercx_cfg020_s cn56xx; struct cvmx_pciercx_cfg020_s cn56xxp1; } cvmx_pciercx_cfg020_t; /** * cvmx_pcierc#_cfg021 * * PCIE_CFG021 = Twenty-second 32-bits of PCIE type 1 config space (MSI Lower 32 Bits Address Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg021_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lmsi : 30; /**< Lower 32-bit Address */ uint32_t reserved_0_1 : 2; #else uint32_t reserved_0_1 : 2; uint32_t lmsi : 30; #endif } s; struct cvmx_pciercx_cfg021_s cn52xx; struct cvmx_pciercx_cfg021_s cn52xxp1; struct cvmx_pciercx_cfg021_s cn56xx; struct cvmx_pciercx_cfg021_s cn56xxp1; } cvmx_pciercx_cfg021_t; /** * cvmx_pcierc#_cfg022 * * PCIE_CFG022 = Twenty-third 32-bits of PCIE type 1 config space (MSI Upper 32 bits Address Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg022_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t umsi : 32; /**< Upper 32-bit Address */ #else uint32_t umsi : 32; #endif } s; struct cvmx_pciercx_cfg022_s cn52xx; struct cvmx_pciercx_cfg022_s cn52xxp1; struct cvmx_pciercx_cfg022_s cn56xx; struct cvmx_pciercx_cfg022_s cn56xxp1; } cvmx_pciercx_cfg022_t; /** * cvmx_pcierc#_cfg023 * * PCIE_CFG023 = Twenty-fourth 32-bits of PCIE type 1 config space (MSI Data Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg023_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_16_31 : 16; uint32_t msimd : 16; /**< MSI Data Pattern assigned by system software, bits [4:0] are Or-ed with MSI_VECTOR to generate 32 MSI Messages per function. */ #else uint32_t msimd : 16; uint32_t reserved_16_31 : 16; #endif } s; struct cvmx_pciercx_cfg023_s cn52xx; struct cvmx_pciercx_cfg023_s cn52xxp1; struct cvmx_pciercx_cfg023_s cn56xx; struct cvmx_pciercx_cfg023_s cn56xxp1; } cvmx_pciercx_cfg023_t; /** * cvmx_pcierc#_cfg028 * * PCIE_CFG028 = Twenty-ninth 32-bits of PCIE type 1 config space * (PCI Express Capabilities List Register/ * PCI Express Capabilities Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg028_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_30_31 : 2; uint32_t imn : 5; /**< Interrupt Message Number Updated by hardware, writable through the DBI. However, the application must not change this field. */ uint32_t si : 1; /**< Slot Implemented This bit is writable through the DBI. However, it must 0 for an Endpoint device. Therefore, the application must not write a 1 to this bit. */ uint32_t dpt : 4; /**< Device Port Type */ uint32_t pciecv : 4; /**< PCI Express Capability Version */ uint32_t ncp : 8; /**< Next Capability Pointer writable through the DBI. However, the application must not change this field. */ uint32_t pcieid : 8; /**< PCIE Capability ID */ #else uint32_t pcieid : 8; uint32_t ncp : 8; uint32_t pciecv : 4; uint32_t dpt : 4; uint32_t si : 1; uint32_t imn : 5; uint32_t reserved_30_31 : 2; #endif } s; struct cvmx_pciercx_cfg028_s cn52xx; struct cvmx_pciercx_cfg028_s cn52xxp1; struct cvmx_pciercx_cfg028_s cn56xx; struct cvmx_pciercx_cfg028_s cn56xxp1; } cvmx_pciercx_cfg028_t; /** * cvmx_pcierc#_cfg029 * * PCIE_CFG029 = Thirtieth 32-bits of PCIE type 1 config space (Device Capabilities Register) * */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg029_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_28_31 : 4; uint32_t cspls : 2; /**< Captured Slot Power Limit Scale Not applicable for RC port, upstream port only. */ uint32_t csplv : 8; /**< Captured Slot Power Limit Value Not applicable for RC port, upstream port only. */ uint32_t reserved_16_17 : 2; uint32_t rber : 1; /**< Role-Based Error Reporting, writable through the DBI However, the application must not change this field. */ uint32_t reserved_12_14 : 3; uint32_t el1al : 3; /**< Endpoint L1 Acceptable Latency, writable through the DBI Must be 0x0 for non-endpoint devices. */ uint32_t el0al : 3; /**< Endpoint L0s Acceptable Latency, writable through the DBI Must be 0x0 for non-endpoint devices. */ uint32_t etfs : 1; /**< Extended Tag Field Supported This bit is writable through the DBI. However, the application must not write a 1 to this bit. */ uint32_t pfs : 2; /**< Phantom Function Supported This field is writable through the DBI. However, Phantom Function is not supported. Therefore, the application must not write any value other than 0x0 to this field. */ uint32_t mpss : 3; /**< Max_Payload_Size Supported, writable through the DBI However, the application must not change this field. */ #else uint32_t mpss : 3; uint32_t pfs : 2; uint32_t etfs : 1; uint32_t el0al : 3; uint32_t el1al : 3; uint32_t reserved_12_14 : 3; uint32_t rber : 1; uint32_t reserved_16_17 : 2; uint32_t csplv : 8; uint32_t cspls : 2; uint32_t reserved_28_31 : 4; #endif } s; struct cvmx_pciercx_cfg029_s cn52xx; struct cvmx_pciercx_cfg029_s cn52xxp1; struct cvmx_pciercx_cfg029_s cn56xx; struct cvmx_pciercx_cfg029_s cn56xxp1; } cvmx_pciercx_cfg029_t; /** * cvmx_pcierc#_cfg030 * * PCIE_CFG030 = Thirty-first 32-bits of PCIE type 1 config space * (Device Control Register/Device Status Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg030_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_22_31 : 10; uint32_t tp : 1; /**< Transaction Pending Set to 1 when Non-Posted Requests are not yet completed and clear when they are completed. */ uint32_t ap_d : 1; /**< Aux Power Detected Set to 1 if Aux power detected. */ uint32_t ur_d : 1; /**< Unsupported Request Detected Errors are logged in this register regardless of whether error reporting is enabled in the Device Control register. UR_D occurs when we receive something we don't support. Unsupported requests are Nonfatal errors, so UR_D should cause NFE_D. Receiving a vendor defined message should cause an unsupported request. */ uint32_t fe_d : 1; /**< Fatal Error Detected Errors are logged in this register regardless of whether error reporting is enabled in the Device Control register. FE_D is set if receive any of the errors in PCIE_CFG066 that has a severity set to Fatal. Malformed TLP's generally fit into this category. */ uint32_t nfe_d : 1; /**< Non-Fatal Error detected Errors are logged in this register regardless of whether error reporting is enabled in the Device Control register. NFE_D is set if we receive any of the errors in PCIE_CFG066 that has a severity set to Nonfatal and does NOT meet Advisory Nonfatal criteria (PCIe 1.1 spec, Section 6.2.3.2.4), which most poisoned TLP's should be. */ uint32_t ce_d : 1; /**< Correctable Error Detected Errors are logged in this register regardless of whether error reporting is enabled in the Device Control register. CE_D is set if we receive any of the errors in PCIE_CFG068 for example a Replay Timer Timeout. Also, it can be set if we get any of the errors in PCIE_CFG066 that has a severity set to Nonfatal and meets the Advisory Nonfatal criteria (PCIe 1.1 spec, Section 6.2.3.2.4), which most ECRC errors should be. */ uint32_t reserved_15_15 : 1; uint32_t mrrs : 3; /**< Max Read Request Size 0 = 128B 1 = 256B 2 = 512B 3 = 1024B 4 = 2048B 5 = 4096B Note: NPEI_CTL_STATUS2[MRRS] also must be set properly. NPEI_CTL_STATUS2[MRRS] must not exceed the desired max read request size. */ uint32_t ns_en : 1; /**< Enable No Snoop */ uint32_t ap_en : 1; /**< AUX Power PM Enable */ uint32_t pf_en : 1; /**< Phantom Function Enable This bit should never be set - OCTEON requests never use phantom functions. */ uint32_t etf_en : 1; /**< Extended Tag Field Enable This bit should never be set - OCTEON requests never use extended tags. */ uint32_t mps : 3; /**< Max Payload Size Legal values: 0 = 128B 1 = 256B Larger sizes not supported. Note: Both PCI Express Ports must be set to the same value for Peer-to-Peer to function properly. Note: NPEI_CTL_STATUS2[MPS] must also be set to the same value for proper functionality. */ uint32_t ro_en : 1; /**< Enable Relaxed Ordering */ uint32_t ur_en : 1; /**< Unsupported Request Reporting Enable */ uint32_t fe_en : 1; /**< Fatal Error Reporting Enable */ uint32_t nfe_en : 1; /**< Non-Fatal Error Reporting Enable */ uint32_t ce_en : 1; /**< Correctable Error Reporting Enable */ #else uint32_t ce_en : 1; uint32_t nfe_en : 1; uint32_t fe_en : 1; uint32_t ur_en : 1; uint32_t ro_en : 1; uint32_t mps : 3; uint32_t etf_en : 1; uint32_t pf_en : 1; uint32_t ap_en : 1; uint32_t ns_en : 1; uint32_t mrrs : 3; uint32_t reserved_15_15 : 1; uint32_t ce_d : 1; uint32_t nfe_d : 1; uint32_t fe_d : 1; uint32_t ur_d : 1; uint32_t ap_d : 1; uint32_t tp : 1; uint32_t reserved_22_31 : 10; #endif } s; struct cvmx_pciercx_cfg030_s cn52xx; struct cvmx_pciercx_cfg030_s cn52xxp1; struct cvmx_pciercx_cfg030_s cn56xx; struct cvmx_pciercx_cfg030_s cn56xxp1; } cvmx_pciercx_cfg030_t; /** * cvmx_pcierc#_cfg031 * * PCIE_CFG031 = Thirty-second 32-bits of PCIE type 1 config space * (Link Capabilities Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg031_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t pnum : 8; /**< Port Number, writable through the DBI However, the application must not change this field. */ uint32_t reserved_22_23 : 2; uint32_t lbnc : 1; /**< Link Bandwith Notification Capability */ uint32_t dllarc : 1; /**< Data Link Layer Active Reporting Capable Set to 1 for Root Complex devices and 0 for Endpoint devices. */ uint32_t sderc : 1; /**< Surprise Down Error Reporting Capable Not supported, hardwired to 0x0. */ uint32_t cpm : 1; /**< Clock Power Management The default value is the value you specify during hardware configuration, writable through the DBI. However, the application must not change this field. */ uint32_t l1el : 3; /**< L1 Exit Latency The default value is the value you specify during hardware configuration, writable through the DBI. However, the application must not change this field. */ uint32_t l0el : 3; /**< L0s Exit Latency The default value is the value you specify during hardware configuration, writable through the DBI. However, the application must not change this field. */ uint32_t aslpms : 2; /**< Active State Link PM Support The default value is the value you specify during hardware configuration, writable through the DBI. However, the application must not change this field. */ uint32_t mlw : 6; /**< Maximum Link Width The default value is the value you specify during hardware configuration (x1, x4, x8, or x16), writable through the DBI. The SW needs to set this to 0x8 or 0x4 depending on the max number of lanes (QLM_CFG == 0 set to 0x8 else 0x4). */ uint32_t mls : 4; /**< Maximum Link Speed Default value is 0x1 for 2.5 Gbps Link. This field is writable through the DBI. However, 0x1 is the only supported value. Therefore, the application must not write any value other than 0x1 to this field. */ #else uint32_t mls : 4; uint32_t mlw : 6; uint32_t aslpms : 2; uint32_t l0el : 3; uint32_t l1el : 3; uint32_t cpm : 1; uint32_t sderc : 1; uint32_t dllarc : 1; uint32_t lbnc : 1; uint32_t reserved_22_23 : 2; uint32_t pnum : 8; #endif } s; struct cvmx_pciercx_cfg031_s cn52xx; struct cvmx_pciercx_cfg031_s cn52xxp1; struct cvmx_pciercx_cfg031_s cn56xx; struct cvmx_pciercx_cfg031_s cn56xxp1; } cvmx_pciercx_cfg031_t; /** * cvmx_pcierc#_cfg032 * * PCIE_CFG032 = Thirty-third 32-bits of PCIE type 1 config space * (Link Control Register/Link Status Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg032_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lab : 1; /**< Link Autonomous Bandwidth Status */ uint32_t lbm : 1; /**< Link Bandwidth Management Status */ uint32_t dlla : 1; /**< Data Link Layer Active */ uint32_t scc : 1; /**< Slot Clock Configuration Indicates that the component uses the same physical reference clock that the platform provides on the connector. The default value is the value you select during hardware configuration, writable through the DBI. However, the application must not change this field. */ uint32_t lt : 1; /**< Link Training */ uint32_t reserved_26_26 : 1; uint32_t nlw : 6; /**< Negotiated Link Width Set automatically by hardware after Link initialization. */ uint32_t ls : 4; /**< Link Speed The negotiated Link speed: 2.5 Gbps */ uint32_t reserved_12_15 : 4; uint32_t lab_int_enb : 1; /**< Link Autonomous Bandwidth Interrupt Enable This interrupt is for Gen2 and is not supported. This bit should always be written to zero. */ uint32_t lbm_int_enb : 1; /**< Link Bandwidth Management Interrupt Enable This interrupt is for Gen2 and is not supported. This bit should always be written to zero. */ uint32_t hawd : 1; /**< Hardware Autonomous Width Disable (Not Supported) */ uint32_t ecpm : 1; /**< Enable Clock Power Management Hardwired to 0 if Clock Power Management is disabled in the Link Capabilities register. */ uint32_t es : 1; /**< Extended Synch */ uint32_t ccc : 1; /**< Common Clock Configuration */ uint32_t rl : 1; /**< Retrain Link */ uint32_t ld : 1; /**< Link Disable */ uint32_t rcb : 1; /**< Read Completion Boundary (RCB), writable through the DBI However, the application must not change this field because an RCB of 64 bytes is not supported. */ uint32_t reserved_2_2 : 1; uint32_t aslpc : 2; /**< Active State Link PM Control */ #else uint32_t aslpc : 2; uint32_t reserved_2_2 : 1; uint32_t rcb : 1; uint32_t ld : 1; uint32_t rl : 1; uint32_t ccc : 1; uint32_t es : 1; uint32_t ecpm : 1; uint32_t hawd : 1; uint32_t lbm_int_enb : 1; uint32_t lab_int_enb : 1; uint32_t reserved_12_15 : 4; uint32_t ls : 4; uint32_t nlw : 6; uint32_t reserved_26_26 : 1; uint32_t lt : 1; uint32_t scc : 1; uint32_t dlla : 1; uint32_t lbm : 1; uint32_t lab : 1; #endif } s; struct cvmx_pciercx_cfg032_s cn52xx; struct cvmx_pciercx_cfg032_s cn52xxp1; struct cvmx_pciercx_cfg032_s cn56xx; struct cvmx_pciercx_cfg032_s cn56xxp1; } cvmx_pciercx_cfg032_t; /** * cvmx_pcierc#_cfg033 * * PCIE_CFG033 = Thirty-fourth 32-bits of PCIE type 1 config space * (Slot Capabilities Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg033_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ps_num : 13; /**< Physical Slot Number, writable through the DBI However, the application must not change this field. */ uint32_t nccs : 1; /**< No Command Complete Support, writable through the DBI However, the application must not change this field. */ uint32_t emip : 1; /**< Electromechanical Interlock Present, writable through the DBI However, the application must not change this field. */ uint32_t sp_ls : 2; /**< Slot Power Limit Scale, writable through the DBI. */ uint32_t sp_lv : 8; /**< Slot Power Limit Value, writable through the DBI. */ uint32_t hp_c : 1; /**< Hot-Plug Capable, writable through the DBI However, the application must not change this field. */ uint32_t hp_s : 1; /**< Hot-Plug Surprise, writable through the DBI However, the application must not change this field. */ uint32_t pip : 1; /**< Power Indicator Present, writable through the DBI However, the application must not change this field. */ uint32_t aip : 1; /**< Attention Indicator Present, writable through the DBI However, the application must not change this field. */ uint32_t mrlsp : 1; /**< MRL Sensor Present, writable through the DBI However, the application must not change this field. */ uint32_t pcp : 1; /**< Power Controller Present, writable through the DBI However, the application must not change this field. */ uint32_t abp : 1; /**< Attention Button Present, writable through the DBI However, the application must not change this field. */ #else uint32_t abp : 1; uint32_t pcp : 1; uint32_t mrlsp : 1; uint32_t aip : 1; uint32_t pip : 1; uint32_t hp_s : 1; uint32_t hp_c : 1; uint32_t sp_lv : 8; uint32_t sp_ls : 2; uint32_t emip : 1; uint32_t nccs : 1; uint32_t ps_num : 13; #endif } s; struct cvmx_pciercx_cfg033_s cn52xx; struct cvmx_pciercx_cfg033_s cn52xxp1; struct cvmx_pciercx_cfg033_s cn56xx; struct cvmx_pciercx_cfg033_s cn56xxp1; } cvmx_pciercx_cfg033_t; /** * cvmx_pcierc#_cfg034 * * PCIE_CFG034 = Thirty-fifth 32-bits of PCIE type 1 config space * (Slot Control Register/Slot Status Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg034_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_25_31 : 7; uint32_t dlls_c : 1; /**< Data Link Layer State Changed */ uint32_t emis : 1; /**< Electromechanical Interlock Status */ uint32_t pds : 1; /**< Presence Detect State */ uint32_t mrlss : 1; /**< MRL Sensor State */ uint32_t ccint_d : 1; /**< Command Completed */ uint32_t pd_c : 1; /**< Presence Detect Changed */ uint32_t mrls_c : 1; /**< MRL Sensor Changed */ uint32_t pf_d : 1; /**< Power Fault Detected */ uint32_t abp_d : 1; /**< Attention Button Pressed */ uint32_t reserved_13_15 : 3; uint32_t dlls_en : 1; /**< Data Link Layer State Changed Enable */ uint32_t emic : 1; /**< Electromechanical Interlock Control */ uint32_t pcc : 1; /**< Power Controller Control */ uint32_t pic : 2; /**< Power Indicator Control */ uint32_t aic : 2; /**< Attention Indicator Control */ uint32_t hpint_en : 1; /**< Hot-Plug Interrupt Enable */ uint32_t ccint_en : 1; /**< Command Completed Interrupt Enable */ uint32_t pd_en : 1; /**< Presence Detect Changed Enable */ uint32_t mrls_en : 1; /**< MRL Sensor Changed Enable */ uint32_t pf_en : 1; /**< Power Fault Detected Enable */ uint32_t abp_en : 1; /**< Attention Button Pressed Enable */ #else uint32_t abp_en : 1; uint32_t pf_en : 1; uint32_t mrls_en : 1; uint32_t pd_en : 1; uint32_t ccint_en : 1; uint32_t hpint_en : 1; uint32_t aic : 2; uint32_t pic : 2; uint32_t pcc : 1; uint32_t emic : 1; uint32_t dlls_en : 1; uint32_t reserved_13_15 : 3; uint32_t abp_d : 1; uint32_t pf_d : 1; uint32_t mrls_c : 1; uint32_t pd_c : 1; uint32_t ccint_d : 1; uint32_t mrlss : 1; uint32_t pds : 1; uint32_t emis : 1; uint32_t dlls_c : 1; uint32_t reserved_25_31 : 7; #endif } s; struct cvmx_pciercx_cfg034_s cn52xx; struct cvmx_pciercx_cfg034_s cn52xxp1; struct cvmx_pciercx_cfg034_s cn56xx; struct cvmx_pciercx_cfg034_s cn56xxp1; } cvmx_pciercx_cfg034_t; /** * cvmx_pcierc#_cfg035 * * PCIE_CFG035 = Thirty-sixth 32-bits of PCIE type 1 config space * (Root Control Register/Root Capabilities Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg035_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_17_31 : 15; uint32_t crssv : 1; /**< CRS Software Visibility Not supported, hardwired to 0x0. */ uint32_t reserved_5_15 : 11; uint32_t crssve : 1; /**< CRS Software Visibility Enable Not supported, hardwired to 0x0. */ uint32_t pmeie : 1; /**< PME Interrupt Enable */ uint32_t sefee : 1; /**< System Error on Fatal Error Enable */ uint32_t senfee : 1; /**< System Error on Non-fatal Error Enable */ uint32_t secee : 1; /**< System Error on Correctable Error Enable */ #else uint32_t secee : 1; uint32_t senfee : 1; uint32_t sefee : 1; uint32_t pmeie : 1; uint32_t crssve : 1; uint32_t reserved_5_15 : 11; uint32_t crssv : 1; uint32_t reserved_17_31 : 15; #endif } s; struct cvmx_pciercx_cfg035_s cn52xx; struct cvmx_pciercx_cfg035_s cn52xxp1; struct cvmx_pciercx_cfg035_s cn56xx; struct cvmx_pciercx_cfg035_s cn56xxp1; } cvmx_pciercx_cfg035_t; /** * cvmx_pcierc#_cfg036 * * PCIE_CFG036 = Thirty-seventh 32-bits of PCIE type 1 config space * (Root Status Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg036_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_18_31 : 14; uint32_t pme_pend : 1; /**< PME Pending */ uint32_t pme_stat : 1; /**< PME Status */ uint32_t pme_rid : 16; /**< PME Requester ID */ #else uint32_t pme_rid : 16; uint32_t pme_stat : 1; uint32_t pme_pend : 1; uint32_t reserved_18_31 : 14; #endif } s; struct cvmx_pciercx_cfg036_s cn52xx; struct cvmx_pciercx_cfg036_s cn52xxp1; struct cvmx_pciercx_cfg036_s cn56xx; struct cvmx_pciercx_cfg036_s cn56xxp1; } cvmx_pciercx_cfg036_t; /** * cvmx_pcierc#_cfg037 * * PCIE_CFG037 = Thirty-eighth 32-bits of PCIE type 1 config space * (Device Capabilities 2 Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg037_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_5_31 : 27; uint32_t ctds : 1; /**< Completion Timeout Disable Supported */ uint32_t ctrs : 4; /**< Completion Timeout Ranges Supported Value of 0 indicates that Completion Timeout Programming is not supported. Completion timeout is 16.7ms. */ #else uint32_t ctrs : 4; uint32_t ctds : 1; uint32_t reserved_5_31 : 27; #endif } s; struct cvmx_pciercx_cfg037_s cn52xx; struct cvmx_pciercx_cfg037_s cn52xxp1; struct cvmx_pciercx_cfg037_s cn56xx; struct cvmx_pciercx_cfg037_s cn56xxp1; } cvmx_pciercx_cfg037_t; /** * cvmx_pcierc#_cfg038 * * PCIE_CFG038 = Thirty-ninth 32-bits of PCIE type 1 config space * (Device Control 2 Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg038_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_5_31 : 27; uint32_t ctd : 1; /**< Completion Timeout Disable */ uint32_t ctv : 4; /**< Completion Timeout Value Completion Timeout Programming is not supported Completion timeout is 16.7ms. */ #else uint32_t ctv : 4; uint32_t ctd : 1; uint32_t reserved_5_31 : 27; #endif } s; struct cvmx_pciercx_cfg038_s cn52xx; struct cvmx_pciercx_cfg038_s cn52xxp1; struct cvmx_pciercx_cfg038_s cn56xx; struct cvmx_pciercx_cfg038_s cn56xxp1; } cvmx_pciercx_cfg038_t; /** * cvmx_pcierc#_cfg039 * * PCIE_CFG039 = Fourtieth 32-bits of PCIE type 1 config space * (Link Capabilities 2 Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg039_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_0_31 : 32; #else uint32_t reserved_0_31 : 32; #endif } s; struct cvmx_pciercx_cfg039_s cn52xx; struct cvmx_pciercx_cfg039_s cn52xxp1; struct cvmx_pciercx_cfg039_s cn56xx; struct cvmx_pciercx_cfg039_s cn56xxp1; } cvmx_pciercx_cfg039_t; /** * cvmx_pcierc#_cfg040 * * PCIE_CFG040 = Fourty-first 32-bits of PCIE type 1 config space * (Link Control 2 Register/Link Status 2 Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg040_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_0_31 : 32; #else uint32_t reserved_0_31 : 32; #endif } s; struct cvmx_pciercx_cfg040_s cn52xx; struct cvmx_pciercx_cfg040_s cn52xxp1; struct cvmx_pciercx_cfg040_s cn56xx; struct cvmx_pciercx_cfg040_s cn56xxp1; } cvmx_pciercx_cfg040_t; /** * cvmx_pcierc#_cfg041 * * PCIE_CFG041 = Fourty-second 32-bits of PCIE type 1 config space * (Slot Capabilities 2 Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg041_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_0_31 : 32; #else uint32_t reserved_0_31 : 32; #endif } s; struct cvmx_pciercx_cfg041_s cn52xx; struct cvmx_pciercx_cfg041_s cn52xxp1; struct cvmx_pciercx_cfg041_s cn56xx; struct cvmx_pciercx_cfg041_s cn56xxp1; } cvmx_pciercx_cfg041_t; /** * cvmx_pcierc#_cfg042 * * PCIE_CFG042 = Fourty-third 32-bits of PCIE type 1 config space * (Slot Control 2 Register/Slot Status 2 Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg042_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_0_31 : 32; #else uint32_t reserved_0_31 : 32; #endif } s; struct cvmx_pciercx_cfg042_s cn52xx; struct cvmx_pciercx_cfg042_s cn52xxp1; struct cvmx_pciercx_cfg042_s cn56xx; struct cvmx_pciercx_cfg042_s cn56xxp1; } cvmx_pciercx_cfg042_t; /** * cvmx_pcierc#_cfg064 * * PCIE_CFG064 = Sixty-fifth 32-bits of PCIE type 1 config space * (PCI Express Enhanced Capability Header) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg064_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t nco : 12; /**< Next Capability Offset */ uint32_t cv : 4; /**< Capability Version */ uint32_t pcieec : 16; /**< PCIE Express Extended Capability */ #else uint32_t pcieec : 16; uint32_t cv : 4; uint32_t nco : 12; #endif } s; struct cvmx_pciercx_cfg064_s cn52xx; struct cvmx_pciercx_cfg064_s cn52xxp1; struct cvmx_pciercx_cfg064_s cn56xx; struct cvmx_pciercx_cfg064_s cn56xxp1; } cvmx_pciercx_cfg064_t; /** * cvmx_pcierc#_cfg065 * * PCIE_CFG065 = Sixty-sixth 32-bits of PCIE type 1 config space * (Uncorrectable Error Status Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg065_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_21_31 : 11; uint32_t ures : 1; /**< Unsupported Request Error Status */ uint32_t ecrces : 1; /**< ECRC Error Status */ uint32_t mtlps : 1; /**< Malformed TLP Status */ uint32_t ros : 1; /**< Receiver Overflow Status */ uint32_t ucs : 1; /**< Unexpected Completion Status */ uint32_t cas : 1; /**< Completer Abort Status */ uint32_t cts : 1; /**< Completion Timeout Status */ uint32_t fcpes : 1; /**< Flow Control Protocol Error Status */ uint32_t ptlps : 1; /**< Poisoned TLP Status */ uint32_t reserved_6_11 : 6; uint32_t sdes : 1; /**< Surprise Down Error Status (not supported) */ uint32_t dlpes : 1; /**< Data Link Protocol Error Status */ uint32_t reserved_0_3 : 4; #else uint32_t reserved_0_3 : 4; uint32_t dlpes : 1; uint32_t sdes : 1; uint32_t reserved_6_11 : 6; uint32_t ptlps : 1; uint32_t fcpes : 1; uint32_t cts : 1; uint32_t cas : 1; uint32_t ucs : 1; uint32_t ros : 1; uint32_t mtlps : 1; uint32_t ecrces : 1; uint32_t ures : 1; uint32_t reserved_21_31 : 11; #endif } s; struct cvmx_pciercx_cfg065_s cn52xx; struct cvmx_pciercx_cfg065_s cn52xxp1; struct cvmx_pciercx_cfg065_s cn56xx; struct cvmx_pciercx_cfg065_s cn56xxp1; } cvmx_pciercx_cfg065_t; /** * cvmx_pcierc#_cfg066 * * PCIE_CFG066 = Sixty-seventh 32-bits of PCIE type 1 config space * (Uncorrectable Error Mask Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg066_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_21_31 : 11; uint32_t urem : 1; /**< Unsupported Request Error Mask */ uint32_t ecrcem : 1; /**< ECRC Error Mask */ uint32_t mtlpm : 1; /**< Malformed TLP Mask */ uint32_t rom : 1; /**< Receiver Overflow Mask */ uint32_t ucm : 1; /**< Unexpected Completion Mask */ uint32_t cam : 1; /**< Completer Abort Mask */ uint32_t ctm : 1; /**< Completion Timeout Mask */ uint32_t fcpem : 1; /**< Flow Control Protocol Error Mask */ uint32_t ptlpm : 1; /**< Poisoned TLP Mask */ uint32_t reserved_6_11 : 6; uint32_t sdem : 1; /**< Surprise Down Error Mask (not supported) */ uint32_t dlpem : 1; /**< Data Link Protocol Error Mask */ uint32_t reserved_0_3 : 4; #else uint32_t reserved_0_3 : 4; uint32_t dlpem : 1; uint32_t sdem : 1; uint32_t reserved_6_11 : 6; uint32_t ptlpm : 1; uint32_t fcpem : 1; uint32_t ctm : 1; uint32_t cam : 1; uint32_t ucm : 1; uint32_t rom : 1; uint32_t mtlpm : 1; uint32_t ecrcem : 1; uint32_t urem : 1; uint32_t reserved_21_31 : 11; #endif } s; struct cvmx_pciercx_cfg066_s cn52xx; struct cvmx_pciercx_cfg066_s cn52xxp1; struct cvmx_pciercx_cfg066_s cn56xx; struct cvmx_pciercx_cfg066_s cn56xxp1; } cvmx_pciercx_cfg066_t; /** * cvmx_pcierc#_cfg067 * * PCIE_CFG067 = Sixty-eighth 32-bits of PCIE type 1 config space * (Uncorrectable Error Severity Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg067_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_21_31 : 11; uint32_t ures : 1; /**< Unsupported Request Error Severity */ uint32_t ecrces : 1; /**< ECRC Error Severity */ uint32_t mtlps : 1; /**< Malformed TLP Severity */ uint32_t ros : 1; /**< Receiver Overflow Severity */ uint32_t ucs : 1; /**< Unexpected Completion Severity */ uint32_t cas : 1; /**< Completer Abort Severity */ uint32_t cts : 1; /**< Completion Timeout Severity */ uint32_t fcpes : 1; /**< Flow Control Protocol Error Severity */ uint32_t ptlps : 1; /**< Poisoned TLP Severity */ uint32_t reserved_6_11 : 6; uint32_t sdes : 1; /**< Surprise Down Error Severity (not supported) */ uint32_t dlpes : 1; /**< Data Link Protocol Error Severity */ uint32_t reserved_0_3 : 4; #else uint32_t reserved_0_3 : 4; uint32_t dlpes : 1; uint32_t sdes : 1; uint32_t reserved_6_11 : 6; uint32_t ptlps : 1; uint32_t fcpes : 1; uint32_t cts : 1; uint32_t cas : 1; uint32_t ucs : 1; uint32_t ros : 1; uint32_t mtlps : 1; uint32_t ecrces : 1; uint32_t ures : 1; uint32_t reserved_21_31 : 11; #endif } s; struct cvmx_pciercx_cfg067_s cn52xx; struct cvmx_pciercx_cfg067_s cn52xxp1; struct cvmx_pciercx_cfg067_s cn56xx; struct cvmx_pciercx_cfg067_s cn56xxp1; } cvmx_pciercx_cfg067_t; /** * cvmx_pcierc#_cfg068 * * PCIE_CFG068 = Sixty-ninth 32-bits of PCIE type 1 config space * (Correctable Error Status Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg068_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_14_31 : 18; uint32_t anfes : 1; /**< Advisory Non-Fatal Error Status */ uint32_t rtts : 1; /**< Replay Timer Timeout Status */ uint32_t reserved_9_11 : 3; uint32_t rnrs : 1; /**< REPLAY_NUM Rollover Status */ uint32_t bdllps : 1; /**< Bad DLLP Status */ uint32_t btlps : 1; /**< Bad TLP Status */ uint32_t reserved_1_5 : 5; uint32_t res : 1; /**< Receiver Error Status */ #else uint32_t res : 1; uint32_t reserved_1_5 : 5; uint32_t btlps : 1; uint32_t bdllps : 1; uint32_t rnrs : 1; uint32_t reserved_9_11 : 3; uint32_t rtts : 1; uint32_t anfes : 1; uint32_t reserved_14_31 : 18; #endif } s; struct cvmx_pciercx_cfg068_s cn52xx; struct cvmx_pciercx_cfg068_s cn52xxp1; struct cvmx_pciercx_cfg068_s cn56xx; struct cvmx_pciercx_cfg068_s cn56xxp1; } cvmx_pciercx_cfg068_t; /** * cvmx_pcierc#_cfg069 * * PCIE_CFG069 = Seventieth 32-bits of PCIE type 1 config space * (Correctable Error Mask Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg069_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_14_31 : 18; uint32_t anfem : 1; /**< Advisory Non-Fatal Error Mask */ uint32_t rttm : 1; /**< Replay Timer Timeout Mask */ uint32_t reserved_9_11 : 3; uint32_t rnrm : 1; /**< REPLAY_NUM Rollover Mask */ uint32_t bdllpm : 1; /**< Bad DLLP Mask */ uint32_t btlpm : 1; /**< Bad TLP Mask */ uint32_t reserved_1_5 : 5; uint32_t rem : 1; /**< Receiver Error Mask */ #else uint32_t rem : 1; uint32_t reserved_1_5 : 5; uint32_t btlpm : 1; uint32_t bdllpm : 1; uint32_t rnrm : 1; uint32_t reserved_9_11 : 3; uint32_t rttm : 1; uint32_t anfem : 1; uint32_t reserved_14_31 : 18; #endif } s; struct cvmx_pciercx_cfg069_s cn52xx; struct cvmx_pciercx_cfg069_s cn52xxp1; struct cvmx_pciercx_cfg069_s cn56xx; struct cvmx_pciercx_cfg069_s cn56xxp1; } cvmx_pciercx_cfg069_t; /** * cvmx_pcierc#_cfg070 * * PCIE_CFG070 = Seventy-first 32-bits of PCIE type 1 config space * (Advanced Capabilities and Control Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg070_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_9_31 : 23; uint32_t ce : 1; /**< ECRC Check Enable */ uint32_t cc : 1; /**< ECRC Check Capable */ uint32_t ge : 1; /**< ECRC Generation Enable */ uint32_t gc : 1; /**< ECRC Generation Capability */ uint32_t fep : 5; /**< First Error Pointer */ #else uint32_t fep : 5; uint32_t gc : 1; uint32_t ge : 1; uint32_t cc : 1; uint32_t ce : 1; uint32_t reserved_9_31 : 23; #endif } s; struct cvmx_pciercx_cfg070_s cn52xx; struct cvmx_pciercx_cfg070_s cn52xxp1; struct cvmx_pciercx_cfg070_s cn56xx; struct cvmx_pciercx_cfg070_s cn56xxp1; } cvmx_pciercx_cfg070_t; /** * cvmx_pcierc#_cfg071 * * PCIE_CFG071 = Seventy-second 32-bits of PCIE type 1 config space * (Header Log Register 1) * * The Header Log registers collect the header for the TLP corresponding to a detected error. */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg071_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dword1 : 32; /**< Header Log Register (first DWORD) */ #else uint32_t dword1 : 32; #endif } s; struct cvmx_pciercx_cfg071_s cn52xx; struct cvmx_pciercx_cfg071_s cn52xxp1; struct cvmx_pciercx_cfg071_s cn56xx; struct cvmx_pciercx_cfg071_s cn56xxp1; } cvmx_pciercx_cfg071_t; /** * cvmx_pcierc#_cfg072 * * PCIE_CFG072 = Seventy-third 32-bits of PCIE type 1 config space * (Header Log Register 2) * * The Header Log registers collect the header for the TLP corresponding to a detected error. */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg072_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dword2 : 32; /**< Header Log Register (second DWORD) */ #else uint32_t dword2 : 32; #endif } s; struct cvmx_pciercx_cfg072_s cn52xx; struct cvmx_pciercx_cfg072_s cn52xxp1; struct cvmx_pciercx_cfg072_s cn56xx; struct cvmx_pciercx_cfg072_s cn56xxp1; } cvmx_pciercx_cfg072_t; /** * cvmx_pcierc#_cfg073 * * PCIE_CFG073 = Seventy-fourth 32-bits of PCIE type 1 config space * (Header Log Register 3) * * The Header Log registers collect the header for the TLP corresponding to a detected error. */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg073_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dword3 : 32; /**< Header Log Register (third DWORD) */ #else uint32_t dword3 : 32; #endif } s; struct cvmx_pciercx_cfg073_s cn52xx; struct cvmx_pciercx_cfg073_s cn52xxp1; struct cvmx_pciercx_cfg073_s cn56xx; struct cvmx_pciercx_cfg073_s cn56xxp1; } cvmx_pciercx_cfg073_t; /** * cvmx_pcierc#_cfg074 * * PCIE_CFG074 = Seventy-fifth 32-bits of PCIE type 1 config space * (Header Log Register 4) * * The Header Log registers collect the header for the TLP corresponding to a detected error. */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg074_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dword4 : 32; /**< Header Log Register (fourth DWORD) */ #else uint32_t dword4 : 32; #endif } s; struct cvmx_pciercx_cfg074_s cn52xx; struct cvmx_pciercx_cfg074_s cn52xxp1; struct cvmx_pciercx_cfg074_s cn56xx; struct cvmx_pciercx_cfg074_s cn56xxp1; } cvmx_pciercx_cfg074_t; /** * cvmx_pcierc#_cfg075 * * PCIE_CFG075 = Seventy-sixth 32-bits of PCIE type 1 config space * (Root Error Command Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg075_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_3_31 : 29; uint32_t fere : 1; /**< Fatal Error Reporting Enable */ uint32_t nfere : 1; /**< Non-Fatal Error Reporting Enable */ uint32_t cere : 1; /**< Correctable Error Reporting Enable */ #else uint32_t cere : 1; uint32_t nfere : 1; uint32_t fere : 1; uint32_t reserved_3_31 : 29; #endif } s; struct cvmx_pciercx_cfg075_s cn52xx; struct cvmx_pciercx_cfg075_s cn52xxp1; struct cvmx_pciercx_cfg075_s cn56xx; struct cvmx_pciercx_cfg075_s cn56xxp1; } cvmx_pciercx_cfg075_t; /** * cvmx_pcierc#_cfg076 * * PCIE_CFG076 = Seventy-seventh 32-bits of PCIE type 1 config space * (Root Error Status Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg076_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t aeimn : 5; /**< Advanced Error Interrupt Message Number, writable through the DBI */ uint32_t reserved_7_26 : 20; uint32_t femr : 1; /**< Fatal Error Messages Received */ uint32_t nfemr : 1; /**< Non-Fatal Error Messages Received */ uint32_t fuf : 1; /**< First Uncorrectable Fatal */ uint32_t multi_efnfr : 1; /**< Multiple ERR_FATAL/NONFATAL Received */ uint32_t efnfr : 1; /**< ERR_FATAL/NONFATAL Received */ uint32_t multi_ecr : 1; /**< Multiple ERR_COR Received */ uint32_t ecr : 1; /**< ERR_COR Received */ #else uint32_t ecr : 1; uint32_t multi_ecr : 1; uint32_t efnfr : 1; uint32_t multi_efnfr : 1; uint32_t fuf : 1; uint32_t nfemr : 1; uint32_t femr : 1; uint32_t reserved_7_26 : 20; uint32_t aeimn : 5; #endif } s; struct cvmx_pciercx_cfg076_s cn52xx; struct cvmx_pciercx_cfg076_s cn52xxp1; struct cvmx_pciercx_cfg076_s cn56xx; struct cvmx_pciercx_cfg076_s cn56xxp1; } cvmx_pciercx_cfg076_t; /** * cvmx_pcierc#_cfg077 * * PCIE_CFG077 = Seventy-eighth 32-bits of PCIE type 1 config space * (Error Source Identification Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg077_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t efnfsi : 16; /**< ERR_FATAL/NONFATAL Source Identification */ uint32_t ecsi : 16; /**< ERR_COR Source Identification */ #else uint32_t ecsi : 16; uint32_t efnfsi : 16; #endif } s; struct cvmx_pciercx_cfg077_s cn52xx; struct cvmx_pciercx_cfg077_s cn52xxp1; struct cvmx_pciercx_cfg077_s cn56xx; struct cvmx_pciercx_cfg077_s cn56xxp1; } cvmx_pciercx_cfg077_t; /** * cvmx_pcierc#_cfg448 * * PCIE_CFG448 = Four hundred forty-ninth 32-bits of PCIE type 1 config space * (Ack Latency Timer and Replay Timer Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg448_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t rtl : 16; /**< Replay Time Limit The replay timer expires when it reaches this limit. The PCI Express bus initiates a replay upon reception of a Nak or when the replay timer expires. The default is then updated based on the Negotiated Link Width and Max_Payload_Size. */ uint32_t rtltl : 16; /**< Round Trip Latency Time Limit The Ack/Nak latency timer expires when it reaches this limit. The default is then updated based on the Negotiated Link Width and Max_Payload_Size. */ #else uint32_t rtltl : 16; uint32_t rtl : 16; #endif } s; struct cvmx_pciercx_cfg448_s cn52xx; struct cvmx_pciercx_cfg448_s cn52xxp1; struct cvmx_pciercx_cfg448_s cn56xx; struct cvmx_pciercx_cfg448_s cn56xxp1; } cvmx_pciercx_cfg448_t; /** * cvmx_pcierc#_cfg449 * * PCIE_CFG449 = Four hundred fiftieth 32-bits of PCIE type 1 config space * (Other Message Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg449_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t omr : 32; /**< Other Message Register This register can be used for either of the following purposes: o To send a specific PCI Express Message, the application writes the payload of the Message into this register, then sets bit 0 of the Port Link Control Register to send the Message. o To store a corruption pattern for corrupting the LCRC on all TLPs, the application places a 32-bit corruption pattern into this register and enables this function by setting bit 25 of the Port Link Control Register. When enabled, the transmit LCRC result is XOR'd with this pattern before inserting it into the packet. */ #else uint32_t omr : 32; #endif } s; struct cvmx_pciercx_cfg449_s cn52xx; struct cvmx_pciercx_cfg449_s cn52xxp1; struct cvmx_pciercx_cfg449_s cn56xx; struct cvmx_pciercx_cfg449_s cn56xxp1; } cvmx_pciercx_cfg449_t; /** * cvmx_pcierc#_cfg450 * * PCIE_CFG450 = Four hundred fifty-first 32-bits of PCIE type 1 config space * (Port Force Link Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg450_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t lpec : 8; /**< Low Power Entrance Count The Power Management state will wait for this many clock cycles for the associated completion of a CfgWr to PCIE_CFG017 register Power State (PS) field register to go low-power. This register is intended for applications that do not let the PCI Express bus handle a completion for configuration request to the Power Management Control and Status (PCIE_CFG017) register. */ uint32_t reserved_22_23 : 2; uint32_t link_state : 6; /**< Link State The Link state that the PCI Express Bus will be forced to when bit 15 (Force Link) is set. State encoding: o DETECT_QUIET 00h o DETECT_ACT 01h o POLL_ACTIVE 02h o POLL_COMPLIANCE 03h o POLL_CONFIG 04h o PRE_DETECT_QUIET 05h o DETECT_WAIT 06h o CFG_LINKWD_START 07h o CFG_LINKWD_ACEPT 08h o CFG_LANENUM_WAIT 09h o CFG_LANENUM_ACEPT 0Ah o CFG_COMPLETE 0Bh o CFG_IDLE 0Ch o RCVRY_LOCK 0Dh o RCVRY_SPEED 0Eh o RCVRY_RCVRCFG 0Fh o RCVRY_IDLE 10h o L0 11h o L0S 12h o L123_SEND_EIDLE 13h o L1_IDLE 14h o L2_IDLE 15h o L2_WAKE 16h o DISABLED_ENTRY 17h o DISABLED_IDLE 18h o DISABLED 19h o LPBK_ENTRY 1Ah o LPBK_ACTIVE 1Bh o LPBK_EXIT 1Ch o LPBK_EXIT_TIMEOUT 1Dh o HOT_RESET_ENTRY 1Eh o HOT_RESET 1Fh */ uint32_t force_link : 1; /**< Force Link Forces the Link to the state specified by the Link State field. The Force Link pulse will trigger Link re-negotiation. * As the The Force Link is a pulse, writing a 1 to it does trigger the forced link state event, even thought reading it always returns a 0. */ uint32_t reserved_8_14 : 7; uint32_t link_num : 8; /**< Link Number */ #else uint32_t link_num : 8; uint32_t reserved_8_14 : 7; uint32_t force_link : 1; uint32_t link_state : 6; uint32_t reserved_22_23 : 2; uint32_t lpec : 8; #endif } s; struct cvmx_pciercx_cfg450_s cn52xx; struct cvmx_pciercx_cfg450_s cn52xxp1; struct cvmx_pciercx_cfg450_s cn56xx; struct cvmx_pciercx_cfg450_s cn56xxp1; } cvmx_pciercx_cfg450_t; /** * cvmx_pcierc#_cfg451 * * PCIE_CFG451 = Four hundred fifty-second 32-bits of PCIE type 1 config space * (Ack Frequency Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg451_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_30_31 : 2; uint32_t l1el : 3; /**< L1 Entrance Latency Values correspond to: o 000: 1 ms o 001: 2 ms o 010: 4 ms o 011: 8 ms o 100: 16 ms o 101: 32 ms o 110 or 111: 64 ms */ uint32_t l0el : 3; /**< L0s Entrance Latency Values correspond to: o 000: 1 ms o 001: 2 ms o 010: 3 ms o 011: 4 ms o 100: 5 ms o 101: 6 ms o 110 or 111: 7 ms */ uint32_t n_fts_cc : 8; /**< N_FTS when common clock is used. The number of Fast Training Sequence ordered sets to be transmitted when transitioning from L0s to L0. The maximum number of FTS ordered-sets that a component can request is 255. Note: The core does not support a value of zero; a value of zero can cause the LTSSM to go into the recovery state when exiting from L0s. */ uint32_t n_fts : 8; /**< N_FTS The number of Fast Training Sequence ordered sets to be transmitted when transitioning from L0s to L0. The maximum number of FTS ordered-sets that a component can request is 255. Note: The core does not support a value of zero; a value of zero can cause the LTSSM to go into the recovery state when exiting from L0s. */ uint32_t ack_freq : 8; /**< Ack Frequency The number of pending Ack's specified here (up to 255) before sending an Ack. */ #else uint32_t ack_freq : 8; uint32_t n_fts : 8; uint32_t n_fts_cc : 8; uint32_t l0el : 3; uint32_t l1el : 3; uint32_t reserved_30_31 : 2; #endif } s; struct cvmx_pciercx_cfg451_s cn52xx; struct cvmx_pciercx_cfg451_s cn52xxp1; struct cvmx_pciercx_cfg451_s cn56xx; struct cvmx_pciercx_cfg451_s cn56xxp1; } cvmx_pciercx_cfg451_t; /** * cvmx_pcierc#_cfg452 * * PCIE_CFG452 = Four hundred fifty-third 32-bits of PCIE type 1 config space * (Port Link Control Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg452_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_26_31 : 6; uint32_t eccrc : 1; /**< Enable Corrupted CRC Causes corrupt LCRC for TLPs when set, using the pattern contained in the Other Message register. This is a test feature, not to be used in normal operation. */ uint32_t reserved_22_24 : 3; uint32_t lme : 6; /**< Link Mode Enable o 000001: x1 o 000011: x2 o 000111: x4 o 001111: x8 o 011111: x16 (not supported) o 111111: x32 (not supported) This field indicates the MAXIMUM number of lanes supported by the PCIe port. It is normally set to 0xF or 0x7 depending on the value of the QLM_CFG bits (0xF when QLM_CFG == 0 otherwise 0x7). The value can be set less than 0xF or 0x7 to limit the number of lanes the PCIe will attempt to use. The programming of this field needs to be done by SW BEFORE enabling the link. See also MLW. (Note: The value of this field does NOT indicate the number of lanes in use by the PCIe. LME sets the max number of lanes in the PCIe core that COULD be used. As per the PCIe specs, the PCIe core can negotiate a smaller link width, so all of x8, x4, x2, and x1 are supported when LME=0xF, for example.) */ uint32_t reserved_8_15 : 8; uint32_t flm : 1; /**< Fast Link Mode Sets all internal timers to fast mode for simulation purposes. */ uint32_t reserved_6_6 : 1; uint32_t dllle : 1; /**< DLL Link Enable Enables Link initialization. If DLL Link Enable = 0, the PCI Express bus does not transmit InitFC DLLPs and does not establish a Link. */ uint32_t reserved_4_4 : 1; uint32_t ra : 1; /**< Reset Assert Triggers a recovery and forces the LTSSM to the Hot Reset state (downstream port only). */ uint32_t le : 1; /**< Loopback Enable Turns on loopback. */ uint32_t sd : 1; /**< Scramble Disable Turns off data scrambling. */ uint32_t omr : 1; /**< Other Message Request When software writes a `1' to this bit, the PCI Express bus transmits the Message contained in the Other Message register. */ #else uint32_t omr : 1; uint32_t sd : 1; uint32_t le : 1; uint32_t ra : 1; uint32_t reserved_4_4 : 1; uint32_t dllle : 1; uint32_t reserved_6_6 : 1; uint32_t flm : 1; uint32_t reserved_8_15 : 8; uint32_t lme : 6; uint32_t reserved_22_24 : 3; uint32_t eccrc : 1; uint32_t reserved_26_31 : 6; #endif } s; struct cvmx_pciercx_cfg452_s cn52xx; struct cvmx_pciercx_cfg452_s cn52xxp1; struct cvmx_pciercx_cfg452_s cn56xx; struct cvmx_pciercx_cfg452_s cn56xxp1; } cvmx_pciercx_cfg452_t; /** * cvmx_pcierc#_cfg453 * * PCIE_CFG453 = Four hundred fifty-fourth 32-bits of PCIE type 1 config space * (Lane Skew Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg453_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dlld : 1; /**< Disable Lane-to-Lane Deskew Disables the internal Lane-to-Lane deskew logic. */ uint32_t reserved_26_30 : 5; uint32_t ack_nak : 1; /**< Ack/Nak Disable Prevents the PCI Express bus from sending Ack and Nak DLLPs. */ uint32_t fcd : 1; /**< Flow Control Disable Prevents the PCI Express bus from sending FC DLLPs. */ uint32_t ilst : 24; /**< Insert Lane Skew for Transmit (not supported for x16) Causes skew between lanes for test purposes. There are three bits per Lane. The value is in units of one symbol time. For example, the value 010b for a Lane forces a skew of two symbol times for that Lane. The maximum skew value for any Lane is 5 symbol times. */ #else uint32_t ilst : 24; uint32_t fcd : 1; uint32_t ack_nak : 1; uint32_t reserved_26_30 : 5; uint32_t dlld : 1; #endif } s; struct cvmx_pciercx_cfg453_s cn52xx; struct cvmx_pciercx_cfg453_s cn52xxp1; struct cvmx_pciercx_cfg453_s cn56xx; struct cvmx_pciercx_cfg453_s cn56xxp1; } cvmx_pciercx_cfg453_t; /** * cvmx_pcierc#_cfg454 * * PCIE_CFG454 = Four hundred fifty-fifth 32-bits of PCIE type 1 config space * (Symbol Number Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg454_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_29_31 : 3; uint32_t tmfcwt : 5; /**< Timer Modifier for Flow Control Watchdog Timer Increases the timer value for the Flow Control watchdog timer, in increments of 16 clock cycles. */ uint32_t tmanlt : 5; /**< Timer Modifier for Ack/Nak Latency Timer Increases the timer value for the Ack/Nak latency timer, in increments of 64 clock cycles. */ uint32_t tmrt : 5; /**< Timer Modifier for Replay Timer Increases the timer value for the replay timer, in increments of 64 clock cycles. */ uint32_t reserved_11_13 : 3; uint32_t nskps : 3; /**< Number of SKP Symbols */ uint32_t reserved_4_7 : 4; uint32_t ntss : 4; /**< Number of TS Symbols Sets the number of TS identifier symbols that are sent in TS1 and TS2 ordered sets. */ #else uint32_t ntss : 4; uint32_t reserved_4_7 : 4; uint32_t nskps : 3; uint32_t reserved_11_13 : 3; uint32_t tmrt : 5; uint32_t tmanlt : 5; uint32_t tmfcwt : 5; uint32_t reserved_29_31 : 3; #endif } s; struct cvmx_pciercx_cfg454_s cn52xx; struct cvmx_pciercx_cfg454_s cn52xxp1; struct cvmx_pciercx_cfg454_s cn56xx; struct cvmx_pciercx_cfg454_s cn56xxp1; } cvmx_pciercx_cfg454_t; /** * cvmx_pcierc#_cfg455 * * PCIE_CFG455 = Four hundred fifty-sixth 32-bits of PCIE type 1 config space * (Symbol Timer Register/Filter Mask Register 1) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg455_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t m_cfg0_filt : 1; /**< Mask filtering of received Configuration Requests (RC mode only) */ uint32_t m_io_filt : 1; /**< Mask filtering of received I/O Requests (RC mode only) */ uint32_t msg_ctrl : 1; /**< Message Control The application must not change this field. */ uint32_t m_cpl_ecrc_filt : 1; /**< Mask ECRC error filtering for Completions */ uint32_t m_ecrc_filt : 1; /**< Mask ECRC error filtering */ uint32_t m_cpl_len_err : 1; /**< Mask Length mismatch error for received Completions */ uint32_t m_cpl_attr_err : 1; /**< Mask Attributes mismatch error for received Completions */ uint32_t m_cpl_tc_err : 1; /**< Mask Traffic Class mismatch error for received Completions */ uint32_t m_cpl_fun_err : 1; /**< Mask function mismatch error for received Completions */ uint32_t m_cpl_rid_err : 1; /**< Mask Requester ID mismatch error for received Completions */ uint32_t m_cpl_tag_err : 1; /**< Mask Tag error rules for received Completions */ uint32_t m_lk_filt : 1; /**< Mask Locked Request filtering */ uint32_t m_cfg1_filt : 1; /**< Mask Type 1 Configuration Request filtering */ uint32_t m_bar_match : 1; /**< Mask BAR match filtering */ uint32_t m_pois_filt : 1; /**< Mask poisoned TLP filtering */ uint32_t m_fun : 1; /**< Mask function */ uint32_t dfcwt : 1; /**< Disable FC Watchdog Timer */ uint32_t reserved_11_14 : 4; uint32_t skpiv : 11; /**< SKP Interval Value */ #else uint32_t skpiv : 11; uint32_t reserved_11_14 : 4; uint32_t dfcwt : 1; uint32_t m_fun : 1; uint32_t m_pois_filt : 1; uint32_t m_bar_match : 1; uint32_t m_cfg1_filt : 1; uint32_t m_lk_filt : 1; uint32_t m_cpl_tag_err : 1; uint32_t m_cpl_rid_err : 1; uint32_t m_cpl_fun_err : 1; uint32_t m_cpl_tc_err : 1; uint32_t m_cpl_attr_err : 1; uint32_t m_cpl_len_err : 1; uint32_t m_ecrc_filt : 1; uint32_t m_cpl_ecrc_filt : 1; uint32_t msg_ctrl : 1; uint32_t m_io_filt : 1; uint32_t m_cfg0_filt : 1; #endif } s; struct cvmx_pciercx_cfg455_s cn52xx; struct cvmx_pciercx_cfg455_s cn52xxp1; struct cvmx_pciercx_cfg455_s cn56xx; struct cvmx_pciercx_cfg455_s cn56xxp1; } cvmx_pciercx_cfg455_t; /** * cvmx_pcierc#_cfg456 * * PCIE_CFG456 = Four hundred fifty-seventh 32-bits of PCIE type 1 config space * (Filter Mask Register 2) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg456_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_2_31 : 30; uint32_t m_vend1_drp : 1; /**< Mask Vendor MSG Type 1 dropped silently */ uint32_t m_vend0_drp : 1; /**< Mask Vendor MSG Type 0 dropped with UR error reporting. */ #else uint32_t m_vend0_drp : 1; uint32_t m_vend1_drp : 1; uint32_t reserved_2_31 : 30; #endif } s; struct cvmx_pciercx_cfg456_s cn52xx; struct cvmx_pciercx_cfg456_s cn52xxp1; struct cvmx_pciercx_cfg456_s cn56xx; struct cvmx_pciercx_cfg456_s cn56xxp1; } cvmx_pciercx_cfg456_t; /** * cvmx_pcierc#_cfg458 * * PCIE_CFG458 = Four hundred fifty-ninth 32-bits of PCIE type 1 config space * (Debug Register 0) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg458_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dbg_info_l32 : 32; /**< The value on cxpl_debug_info[31:0]. */ #else uint32_t dbg_info_l32 : 32; #endif } s; struct cvmx_pciercx_cfg458_s cn52xx; struct cvmx_pciercx_cfg458_s cn52xxp1; struct cvmx_pciercx_cfg458_s cn56xx; struct cvmx_pciercx_cfg458_s cn56xxp1; } cvmx_pciercx_cfg458_t; /** * cvmx_pcierc#_cfg459 * * PCIE_CFG459 = Four hundred sixtieth 32-bits of PCIE type 1 config space * (Debug Register 1) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg459_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dbg_info_u32 : 32; /**< The value on cxpl_debug_info[63:32]. */ #else uint32_t dbg_info_u32 : 32; #endif } s; struct cvmx_pciercx_cfg459_s cn52xx; struct cvmx_pciercx_cfg459_s cn52xxp1; struct cvmx_pciercx_cfg459_s cn56xx; struct cvmx_pciercx_cfg459_s cn56xxp1; } cvmx_pciercx_cfg459_t; /** * cvmx_pcierc#_cfg460 * * PCIE_CFG460 = Four hundred sixty-first 32-bits of PCIE type 1 config space * (Transmit Posted FC Credit Status) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg460_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_20_31 : 12; uint32_t tphfcc : 8; /**< Transmit Posted Header FC Credits The Posted Header credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ uint32_t tpdfcc : 12; /**< Transmit Posted Data FC Credits The Posted Data credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ #else uint32_t tpdfcc : 12; uint32_t tphfcc : 8; uint32_t reserved_20_31 : 12; #endif } s; struct cvmx_pciercx_cfg460_s cn52xx; struct cvmx_pciercx_cfg460_s cn52xxp1; struct cvmx_pciercx_cfg460_s cn56xx; struct cvmx_pciercx_cfg460_s cn56xxp1; } cvmx_pciercx_cfg460_t; /** * cvmx_pcierc#_cfg461 * * PCIE_CFG461 = Four hundred sixty-second 32-bits of PCIE type 1 config space * (Transmit Non-Posted FC Credit Status) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg461_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_20_31 : 12; uint32_t tchfcc : 8; /**< Transmit Non-Posted Header FC Credits The Non-Posted Header credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ uint32_t tcdfcc : 12; /**< Transmit Non-Posted Data FC Credits The Non-Posted Data credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ #else uint32_t tcdfcc : 12; uint32_t tchfcc : 8; uint32_t reserved_20_31 : 12; #endif } s; struct cvmx_pciercx_cfg461_s cn52xx; struct cvmx_pciercx_cfg461_s cn52xxp1; struct cvmx_pciercx_cfg461_s cn56xx; struct cvmx_pciercx_cfg461_s cn56xxp1; } cvmx_pciercx_cfg461_t; /** * cvmx_pcierc#_cfg462 * * PCIE_CFG462 = Four hundred sixty-third 32-bits of PCIE type 1 config space * (Transmit Completion FC Credit Status ) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg462_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_20_31 : 12; uint32_t tchfcc : 8; /**< Transmit Completion Header FC Credits The Completion Header credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ uint32_t tcdfcc : 12; /**< Transmit Completion Data FC Credits The Completion Data credits advertised by the receiver at the other end of the Link, updated with each UpdateFC DLLP. */ #else uint32_t tcdfcc : 12; uint32_t tchfcc : 8; uint32_t reserved_20_31 : 12; #endif } s; struct cvmx_pciercx_cfg462_s cn52xx; struct cvmx_pciercx_cfg462_s cn52xxp1; struct cvmx_pciercx_cfg462_s cn56xx; struct cvmx_pciercx_cfg462_s cn56xxp1; } cvmx_pciercx_cfg462_t; /** * cvmx_pcierc#_cfg463 * * PCIE_CFG463 = Four hundred sixty-fourth 32-bits of PCIE type 1 config space * (Queue Status) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg463_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_3_31 : 29; uint32_t rqne : 1; /**< Received Queue Not Empty Indicates there is data in one or more of the receive buffers. */ uint32_t trbne : 1; /**< Transmit Retry Buffer Not Empty Indicates that there is data in the transmit retry buffer. */ uint32_t rtlpfccnr : 1; /**< Received TLP FC Credits Not Returned Indicates that the PCI Express bus has sent a TLP but has not yet received an UpdateFC DLLP indicating that the credits for that TLP have been restored by the receiver at the other end of the Link. */ #else uint32_t rtlpfccnr : 1; uint32_t trbne : 1; uint32_t rqne : 1; uint32_t reserved_3_31 : 29; #endif } s; struct cvmx_pciercx_cfg463_s cn52xx; struct cvmx_pciercx_cfg463_s cn52xxp1; struct cvmx_pciercx_cfg463_s cn56xx; struct cvmx_pciercx_cfg463_s cn56xxp1; } cvmx_pciercx_cfg463_t; /** * cvmx_pcierc#_cfg464 * * PCIE_CFG464 = Four hundred sixty-fifth 32-bits of PCIE type 1 config space * (VC Transmit Arbitration Register 1) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg464_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t wrr_vc3 : 8; /**< WRR Weight for VC3 */ uint32_t wrr_vc2 : 8; /**< WRR Weight for VC2 */ uint32_t wrr_vc1 : 8; /**< WRR Weight for VC1 */ uint32_t wrr_vc0 : 8; /**< WRR Weight for VC0 */ #else uint32_t wrr_vc0 : 8; uint32_t wrr_vc1 : 8; uint32_t wrr_vc2 : 8; uint32_t wrr_vc3 : 8; #endif } s; struct cvmx_pciercx_cfg464_s cn52xx; struct cvmx_pciercx_cfg464_s cn52xxp1; struct cvmx_pciercx_cfg464_s cn56xx; struct cvmx_pciercx_cfg464_s cn56xxp1; } cvmx_pciercx_cfg464_t; /** * cvmx_pcierc#_cfg465 * * PCIE_CFG465 = Four hundred sixty-sixth 32-bits of config space * (VC Transmit Arbitration Register 2) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg465_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t wrr_vc7 : 8; /**< WRR Weight for VC7 */ uint32_t wrr_vc6 : 8; /**< WRR Weight for VC6 */ uint32_t wrr_vc5 : 8; /**< WRR Weight for VC5 */ uint32_t wrr_vc4 : 8; /**< WRR Weight for VC4 */ #else uint32_t wrr_vc4 : 8; uint32_t wrr_vc5 : 8; uint32_t wrr_vc6 : 8; uint32_t wrr_vc7 : 8; #endif } s; struct cvmx_pciercx_cfg465_s cn52xx; struct cvmx_pciercx_cfg465_s cn52xxp1; struct cvmx_pciercx_cfg465_s cn56xx; struct cvmx_pciercx_cfg465_s cn56xxp1; } cvmx_pciercx_cfg465_t; /** * cvmx_pcierc#_cfg466 * * PCIE_CFG466 = Four hundred sixty-seventh 32-bits of PCIE type 1 config space * (VC0 Posted Receive Queue Control) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg466_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t rx_queue_order : 1; /**< VC Ordering for Receive Queues Determines the VC ordering rule for the receive queues, used only in the segmented-buffer configuration, writable through the DBI: o 1: Strict ordering, higher numbered VCs have higher priority o 0: Round robin However, the application must not change this field. */ uint32_t type_ordering : 1; /**< TLP Type Ordering for VC0 Determines the TLP type ordering rule for VC0 receive queues, used only in the segmented-buffer configuration, writable through the DBI: o 1: Ordering of received TLPs follows the rules in PCI Express Base Specification, Revision 1.1 o 0: Strict ordering for received TLPs: Posted, then Completion, then Non-Posted However, the application must not change this field. */ uint32_t reserved_24_29 : 6; uint32_t queue_mode : 3; /**< VC0 Posted TLP Queue Mode The operating mode of the Posted receive queue for VC0, used only in the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. Only one bit can be set at a time: o Bit 23: Bypass o Bit 22: Cut-through o Bit 21: Store-and-forward */ uint32_t reserved_20_20 : 1; uint32_t header_credits : 8; /**< VC0 Posted Header Credits The number of initial Posted header credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ uint32_t data_credits : 12; /**< VC0 Posted Data Credits The number of initial Posted data credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ #else uint32_t data_credits : 12; uint32_t header_credits : 8; uint32_t reserved_20_20 : 1; uint32_t queue_mode : 3; uint32_t reserved_24_29 : 6; uint32_t type_ordering : 1; uint32_t rx_queue_order : 1; #endif } s; struct cvmx_pciercx_cfg466_s cn52xx; struct cvmx_pciercx_cfg466_s cn52xxp1; struct cvmx_pciercx_cfg466_s cn56xx; struct cvmx_pciercx_cfg466_s cn56xxp1; } cvmx_pciercx_cfg466_t; /** * cvmx_pcierc#_cfg467 * * PCIE_CFG467 = Four hundred sixty-eighth 32-bits of PCIE type 1 config space * (VC0 Non-Posted Receive Queue Control) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg467_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_24_31 : 8; uint32_t queue_mode : 3; /**< VC0 Non-Posted TLP Queue Mode The operating mode of the Non-Posted receive queue for VC0, used only in the segmented-buffer configuration, writable through the DBI. Only one bit can be set at a time: o Bit 23: Bypass o Bit 22: Cut-through o Bit 21: Store-and-forward However, the application must not change this field. */ uint32_t reserved_20_20 : 1; uint32_t header_credits : 8; /**< VC0 Non-Posted Header Credits The number of initial Non-Posted header credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ uint32_t data_credits : 12; /**< VC0 Non-Posted Data Credits The number of initial Non-Posted data credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ #else uint32_t data_credits : 12; uint32_t header_credits : 8; uint32_t reserved_20_20 : 1; uint32_t queue_mode : 3; uint32_t reserved_24_31 : 8; #endif } s; struct cvmx_pciercx_cfg467_s cn52xx; struct cvmx_pciercx_cfg467_s cn52xxp1; struct cvmx_pciercx_cfg467_s cn56xx; struct cvmx_pciercx_cfg467_s cn56xxp1; } cvmx_pciercx_cfg467_t; /** * cvmx_pcierc#_cfg468 * * PCIE_CFG468 = Four hundred sixty-ninth 32-bits of PCIE type 1 config space * (VC0 Completion Receive Queue Control) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg468_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_24_31 : 8; uint32_t queue_mode : 3; /**< VC0 Completion TLP Queue Mode The operating mode of the Completion receive queue for VC0, used only in the segmented-buffer configuration, writable through the DBI. Only one bit can be set at a time: o Bit 23: Bypass o Bit 22: Cut-through o Bit 21: Store-and-forward However, the application must not change this field. */ uint32_t reserved_20_20 : 1; uint32_t header_credits : 8; /**< VC0 Completion Header Credits The number of initial Completion header credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ uint32_t data_credits : 12; /**< VC0 Completion Data Credits The number of initial Completion data credits for VC0, used for all receive queue buffer configurations. This field is writable through the DBI. However, the application must not change this field. */ #else uint32_t data_credits : 12; uint32_t header_credits : 8; uint32_t reserved_20_20 : 1; uint32_t queue_mode : 3; uint32_t reserved_24_31 : 8; #endif } s; struct cvmx_pciercx_cfg468_s cn52xx; struct cvmx_pciercx_cfg468_s cn52xxp1; struct cvmx_pciercx_cfg468_s cn56xx; struct cvmx_pciercx_cfg468_s cn56xxp1; } cvmx_pciercx_cfg468_t; /** * cvmx_pcierc#_cfg490 * * PCIE_CFG490 = Four hundred ninety-first 32-bits of PCIE type 1 config space * (VC0 Posted Buffer Depth) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg490_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_26_31 : 6; uint32_t header_depth : 10; /**< VC0 Posted Header Queue Depth Sets the number of entries in the Posted header queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ uint32_t reserved_14_15 : 2; uint32_t data_depth : 14; /**< VC0 Posted Data Queue Depth Sets the number of entries in the Posted data queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ #else uint32_t data_depth : 14; uint32_t reserved_14_15 : 2; uint32_t header_depth : 10; uint32_t reserved_26_31 : 6; #endif } s; struct cvmx_pciercx_cfg490_s cn52xx; struct cvmx_pciercx_cfg490_s cn52xxp1; struct cvmx_pciercx_cfg490_s cn56xx; struct cvmx_pciercx_cfg490_s cn56xxp1; } cvmx_pciercx_cfg490_t; /** * cvmx_pcierc#_cfg491 * * PCIE_CFG491 = Four hundred ninety-second 32-bits of PCIE type 1 config space * (VC0 Non-Posted Buffer Depth) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg491_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_26_31 : 6; uint32_t header_depth : 10; /**< VC0 Non-Posted Header Queue Depth Sets the number of entries in the Non-Posted header queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ uint32_t reserved_14_15 : 2; uint32_t data_depth : 14; /**< VC0 Non-Posted Data Queue Depth Sets the number of entries in the Non-Posted data queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ #else uint32_t data_depth : 14; uint32_t reserved_14_15 : 2; uint32_t header_depth : 10; uint32_t reserved_26_31 : 6; #endif } s; struct cvmx_pciercx_cfg491_s cn52xx; struct cvmx_pciercx_cfg491_s cn52xxp1; struct cvmx_pciercx_cfg491_s cn56xx; struct cvmx_pciercx_cfg491_s cn56xxp1; } cvmx_pciercx_cfg491_t; /** * cvmx_pcierc#_cfg492 * * PCIE_CFG492 = Four hundred ninety-third 32-bits of PCIE type 1 config space * (VC0 Completion Buffer Depth) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg492_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_26_31 : 6; uint32_t header_depth : 10; /**< VC0 Completion Header Queue Depth Sets the number of entries in the Completion header queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ uint32_t reserved_14_15 : 2; uint32_t data_depth : 14; /**< VC0 Completion Data Queue Depth Sets the number of entries in the Completion data queue for VC0 when using the segmented-buffer configuration, writable through the DBI. However, the application must not change this field. */ #else uint32_t data_depth : 14; uint32_t reserved_14_15 : 2; uint32_t header_depth : 10; uint32_t reserved_26_31 : 6; #endif } s; struct cvmx_pciercx_cfg492_s cn52xx; struct cvmx_pciercx_cfg492_s cn52xxp1; struct cvmx_pciercx_cfg492_s cn56xx; struct cvmx_pciercx_cfg492_s cn56xxp1; } cvmx_pciercx_cfg492_t; /** * cvmx_pcierc#_cfg516 * * PCIE_CFG516 = Five hundred seventeenth 32-bits of PCIE type 1 config space * (PHY Status Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg516_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t phy_stat : 32; /**< PHY Status */ #else uint32_t phy_stat : 32; #endif } s; struct cvmx_pciercx_cfg516_s cn52xx; struct cvmx_pciercx_cfg516_s cn52xxp1; struct cvmx_pciercx_cfg516_s cn56xx; struct cvmx_pciercx_cfg516_s cn56xxp1; } cvmx_pciercx_cfg516_t; /** * cvmx_pcierc#_cfg517 * * PCIE_CFG517 = Five hundred eighteenth 32-bits of PCIE type 1 config space * (PHY Control Register) */ typedef union { uint32_t u32; struct cvmx_pciercx_cfg517_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t phy_ctrl : 32; /**< PHY Control */ #else uint32_t phy_ctrl : 32; #endif } s; struct cvmx_pciercx_cfg517_s cn52xx; struct cvmx_pciercx_cfg517_s cn52xxp1; struct cvmx_pciercx_cfg517_s cn56xx; struct cvmx_pciercx_cfg517_s cn56xxp1; } cvmx_pciercx_cfg517_t; /** * cvmx_pcm#_dma_cfg */ typedef union { uint64_t u64; struct cvmx_pcmx_dma_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t rdpend : 1; /**< If 0, no L2C read responses pending 1, L2C read responses are outstanding NOTE: When restarting after stopping a running TDM engine, software must wait for RDPEND to read 0 before writing PCMn_TDM_CFG[ENABLE] to a 1 */ uint64_t reserved_54_62 : 9; uint64_t rxslots : 10; /**< Number of 8-bit slots to receive per frame (number of slots in a receive superframe) */ uint64_t reserved_42_43 : 2; uint64_t txslots : 10; /**< Number of 8-bit slots to transmit per frame (number of slots in a transmit superframe) */ uint64_t reserved_30_31 : 2; uint64_t rxst : 10; /**< Number of frame writes for interrupt */ uint64_t reserved_19_19 : 1; uint64_t useldt : 1; /**< If 0, use LDI command to read from L2C 1, use LDT command to read from L2C */ uint64_t txrd : 10; /**< Number of frame reads for interrupt */ uint64_t fetchsiz : 4; /**< FETCHSIZ+1 timeslots are read when threshold is reached. */ uint64_t thresh : 4; /**< If number of bytes remaining in the DMA fifo is <= THRESH, initiate a fetch of timeslot data from the transmit memory region. NOTE: there are only 16B of buffer for each engine so the seetings for FETCHSIZ and THRESH must be such that the buffer will not be overrun: THRESH + min(FETCHSIZ + 1,TXSLOTS) MUST BE <= 16 */ #else uint64_t thresh : 4; uint64_t fetchsiz : 4; uint64_t txrd : 10; uint64_t useldt : 1; uint64_t reserved_19_19 : 1; uint64_t rxst : 10; uint64_t reserved_30_31 : 2; uint64_t txslots : 10; uint64_t reserved_42_43 : 2; uint64_t rxslots : 10; uint64_t reserved_54_62 : 9; uint64_t rdpend : 1; #endif } s; struct cvmx_pcmx_dma_cfg_s cn30xx; struct cvmx_pcmx_dma_cfg_s cn31xx; struct cvmx_pcmx_dma_cfg_s cn50xx; } cvmx_pcmx_dma_cfg_t; /** * cvmx_pcm#_int_ena */ typedef union { uint64_t u64; struct cvmx_pcmx_int_ena_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t rxovf : 1; /**< Enable interrupt if RX byte overflows */ uint64_t txempty : 1; /**< Enable interrupt on TX byte empty */ uint64_t txrd : 1; /**< Enable DMA engine frame read interrupts */ uint64_t txwrap : 1; /**< Enable TX region wrap interrupts */ uint64_t rxst : 1; /**< Enable DMA engine frame store interrupts */ uint64_t rxwrap : 1; /**< Enable RX region wrap interrupts */ uint64_t fsyncextra : 1; /**< Enable FSYNC extra interrupts NOTE: FSYNCEXTRA errors are defined as an FSYNC found in the "wrong" spot of a frame given the programming of PCMn_CLK_CFG[NUMSLOTS] and PCMn_CLK_CFG[EXTRABIT]. */ uint64_t fsyncmissed : 1; /**< Enable FSYNC missed interrupts NOTE: FSYNCMISSED errors are defined as an FSYNC missing from the correct spot in a frame given the programming of PCMn_CLK_CFG[NUMSLOTS] and PCMn_CLK_CFG[EXTRABIT]. */ #else uint64_t fsyncmissed : 1; uint64_t fsyncextra : 1; uint64_t rxwrap : 1; uint64_t rxst : 1; uint64_t txwrap : 1; uint64_t txrd : 1; uint64_t txempty : 1; uint64_t rxovf : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_pcmx_int_ena_s cn30xx; struct cvmx_pcmx_int_ena_s cn31xx; struct cvmx_pcmx_int_ena_s cn50xx; } cvmx_pcmx_int_ena_t; /** * cvmx_pcm#_int_sum */ typedef union { uint64_t u64; struct cvmx_pcmx_int_sum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t rxovf : 1; /**< RX byte overflowed */ uint64_t txempty : 1; /**< TX byte was empty when sampled */ uint64_t txrd : 1; /**< DMA engine frame read interrupt occurred */ uint64_t txwrap : 1; /**< TX region wrap interrupt occurred */ uint64_t rxst : 1; /**< DMA engine frame store interrupt occurred */ uint64_t rxwrap : 1; /**< RX region wrap interrupt occurred */ uint64_t fsyncextra : 1; /**< FSYNC extra interrupt occurred */ uint64_t fsyncmissed : 1; /**< FSYNC missed interrupt occurred */ #else uint64_t fsyncmissed : 1; uint64_t fsyncextra : 1; uint64_t rxwrap : 1; uint64_t rxst : 1; uint64_t txwrap : 1; uint64_t txrd : 1; uint64_t txempty : 1; uint64_t rxovf : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_pcmx_int_sum_s cn30xx; struct cvmx_pcmx_int_sum_s cn31xx; struct cvmx_pcmx_int_sum_s cn50xx; } cvmx_pcmx_int_sum_t; /** * cvmx_pcm#_rxaddr */ typedef union { uint64_t u64; struct cvmx_pcmx_rxaddr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Address of the next write to the receive memory region */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_pcmx_rxaddr_s cn30xx; struct cvmx_pcmx_rxaddr_s cn31xx; struct cvmx_pcmx_rxaddr_s cn50xx; } cvmx_pcmx_rxaddr_t; /** * cvmx_pcm#_rxcnt */ typedef union { uint64_t u64; struct cvmx_pcmx_rxcnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt : 16; /**< Number of superframes in receive memory region */ #else uint64_t cnt : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcmx_rxcnt_s cn30xx; struct cvmx_pcmx_rxcnt_s cn31xx; struct cvmx_pcmx_rxcnt_s cn50xx; } cvmx_pcmx_rxcnt_t; /** * cvmx_pcm#_rxmsk0 */ typedef union { uint64_t u64; struct cvmx_pcmx_rxmsk0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Receive mask bits for slots 63 to 0 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_rxmsk0_s cn30xx; struct cvmx_pcmx_rxmsk0_s cn31xx; struct cvmx_pcmx_rxmsk0_s cn50xx; } cvmx_pcmx_rxmsk0_t; /** * cvmx_pcm#_rxmsk1 */ typedef union { uint64_t u64; struct cvmx_pcmx_rxmsk1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Receive mask bits for slots 127 to 64 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_rxmsk1_s cn30xx; struct cvmx_pcmx_rxmsk1_s cn31xx; struct cvmx_pcmx_rxmsk1_s cn50xx; } cvmx_pcmx_rxmsk1_t; /** * cvmx_pcm#_rxmsk2 */ typedef union { uint64_t u64; struct cvmx_pcmx_rxmsk2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Receive mask bits for slots 191 to 128 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_rxmsk2_s cn30xx; struct cvmx_pcmx_rxmsk2_s cn31xx; struct cvmx_pcmx_rxmsk2_s cn50xx; } cvmx_pcmx_rxmsk2_t; /** * cvmx_pcm#_rxmsk3 */ typedef union { uint64_t u64; struct cvmx_pcmx_rxmsk3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Receive mask bits for slots 255 to 192 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_rxmsk3_s cn30xx; struct cvmx_pcmx_rxmsk3_s cn31xx; struct cvmx_pcmx_rxmsk3_s cn50xx; } cvmx_pcmx_rxmsk3_t; /** * cvmx_pcm#_rxmsk4 */ typedef union { uint64_t u64; struct cvmx_pcmx_rxmsk4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Receive mask bits for slots 319 to 256 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_rxmsk4_s cn30xx; struct cvmx_pcmx_rxmsk4_s cn31xx; struct cvmx_pcmx_rxmsk4_s cn50xx; } cvmx_pcmx_rxmsk4_t; /** * cvmx_pcm#_rxmsk5 */ typedef union { uint64_t u64; struct cvmx_pcmx_rxmsk5_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Receive mask bits for slots 383 to 320 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_rxmsk5_s cn30xx; struct cvmx_pcmx_rxmsk5_s cn31xx; struct cvmx_pcmx_rxmsk5_s cn50xx; } cvmx_pcmx_rxmsk5_t; /** * cvmx_pcm#_rxmsk6 */ typedef union { uint64_t u64; struct cvmx_pcmx_rxmsk6_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Receive mask bits for slots 447 to 384 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_rxmsk6_s cn30xx; struct cvmx_pcmx_rxmsk6_s cn31xx; struct cvmx_pcmx_rxmsk6_s cn50xx; } cvmx_pcmx_rxmsk6_t; /** * cvmx_pcm#_rxmsk7 */ typedef union { uint64_t u64; struct cvmx_pcmx_rxmsk7_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Receive mask bits for slots 511 to 448 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_rxmsk7_s cn30xx; struct cvmx_pcmx_rxmsk7_s cn31xx; struct cvmx_pcmx_rxmsk7_s cn50xx; } cvmx_pcmx_rxmsk7_t; /** * cvmx_pcm#_rxstart */ typedef union { uint64_t u64; struct cvmx_pcmx_rxstart_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 33; /**< Starting address for the receive memory region */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t addr : 33; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_pcmx_rxstart_s cn30xx; struct cvmx_pcmx_rxstart_s cn31xx; struct cvmx_pcmx_rxstart_s cn50xx; } cvmx_pcmx_rxstart_t; /** * cvmx_pcm#_tdm_cfg */ typedef union { uint64_t u64; struct cvmx_pcmx_tdm_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t drvtim : 16; /**< Number of ECLKs from start of bit time to stop driving last bit of timeslot (if not driving next timeslot) */ uint64_t samppt : 16; /**< Number of ECLKs from start of bit time to sample data bit. */ uint64_t reserved_3_31 : 29; uint64_t lsbfirst : 1; /**< If 0, shift/receive MSB first 1, shift/receive LSB first */ uint64_t useclk1 : 1; /**< If 0, this PCM is based on BCLK/FSYNC0 1, this PCM is based on BCLK/FSYNC1 */ uint64_t enable : 1; /**< If 1, PCM is enabled, otherwise pins are GPIOs NOTE: when TDM is disabled by detection of an FSYNC error all transmission and reception is halted. In addition, PCMn_TX/RXADDR are updated to point to the position at which the error was detected. */ #else uint64_t enable : 1; uint64_t useclk1 : 1; uint64_t lsbfirst : 1; uint64_t reserved_3_31 : 29; uint64_t samppt : 16; uint64_t drvtim : 16; #endif } s; struct cvmx_pcmx_tdm_cfg_s cn30xx; struct cvmx_pcmx_tdm_cfg_s cn31xx; struct cvmx_pcmx_tdm_cfg_s cn50xx; } cvmx_pcmx_tdm_cfg_t; /** * cvmx_pcm#_tdm_dbg */ typedef union { uint64_t u64; struct cvmx_pcmx_tdm_dbg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t debuginfo : 64; /**< Miscellaneous debug information */ #else uint64_t debuginfo : 64; #endif } s; struct cvmx_pcmx_tdm_dbg_s cn30xx; struct cvmx_pcmx_tdm_dbg_s cn31xx; struct cvmx_pcmx_tdm_dbg_s cn50xx; } cvmx_pcmx_tdm_dbg_t; /** * cvmx_pcm#_txaddr */ typedef union { uint64_t u64; struct cvmx_pcmx_txaddr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 33; /**< Address of the next read from the transmit memory region */ uint64_t fram : 3; /**< Frame offset NOTE: this is used to extract the correct byte from each 64b word read from the transmit memory region */ #else uint64_t fram : 3; uint64_t addr : 33; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_pcmx_txaddr_s cn30xx; struct cvmx_pcmx_txaddr_s cn31xx; struct cvmx_pcmx_txaddr_s cn50xx; } cvmx_pcmx_txaddr_t; /** * cvmx_pcm#_txcnt */ typedef union { uint64_t u64; struct cvmx_pcmx_txcnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t cnt : 16; /**< Number of superframes in transmit memory region */ #else uint64_t cnt : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcmx_txcnt_s cn30xx; struct cvmx_pcmx_txcnt_s cn31xx; struct cvmx_pcmx_txcnt_s cn50xx; } cvmx_pcmx_txcnt_t; /** * cvmx_pcm#_txmsk0 */ typedef union { uint64_t u64; struct cvmx_pcmx_txmsk0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Transmit mask bits for slots 63 to 0 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_txmsk0_s cn30xx; struct cvmx_pcmx_txmsk0_s cn31xx; struct cvmx_pcmx_txmsk0_s cn50xx; } cvmx_pcmx_txmsk0_t; /** * cvmx_pcm#_txmsk1 */ typedef union { uint64_t u64; struct cvmx_pcmx_txmsk1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Transmit mask bits for slots 127 to 64 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_txmsk1_s cn30xx; struct cvmx_pcmx_txmsk1_s cn31xx; struct cvmx_pcmx_txmsk1_s cn50xx; } cvmx_pcmx_txmsk1_t; /** * cvmx_pcm#_txmsk2 */ typedef union { uint64_t u64; struct cvmx_pcmx_txmsk2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Transmit mask bits for slots 191 to 128 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_txmsk2_s cn30xx; struct cvmx_pcmx_txmsk2_s cn31xx; struct cvmx_pcmx_txmsk2_s cn50xx; } cvmx_pcmx_txmsk2_t; /** * cvmx_pcm#_txmsk3 */ typedef union { uint64_t u64; struct cvmx_pcmx_txmsk3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Transmit mask bits for slots 255 to 192 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_txmsk3_s cn30xx; struct cvmx_pcmx_txmsk3_s cn31xx; struct cvmx_pcmx_txmsk3_s cn50xx; } cvmx_pcmx_txmsk3_t; /** * cvmx_pcm#_txmsk4 */ typedef union { uint64_t u64; struct cvmx_pcmx_txmsk4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Transmit mask bits for slots 319 to 256 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_txmsk4_s cn30xx; struct cvmx_pcmx_txmsk4_s cn31xx; struct cvmx_pcmx_txmsk4_s cn50xx; } cvmx_pcmx_txmsk4_t; /** * cvmx_pcm#_txmsk5 */ typedef union { uint64_t u64; struct cvmx_pcmx_txmsk5_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Transmit mask bits for slots 383 to 320 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_txmsk5_s cn30xx; struct cvmx_pcmx_txmsk5_s cn31xx; struct cvmx_pcmx_txmsk5_s cn50xx; } cvmx_pcmx_txmsk5_t; /** * cvmx_pcm#_txmsk6 */ typedef union { uint64_t u64; struct cvmx_pcmx_txmsk6_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Transmit mask bits for slots 447 to 384 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_txmsk6_s cn30xx; struct cvmx_pcmx_txmsk6_s cn31xx; struct cvmx_pcmx_txmsk6_s cn50xx; } cvmx_pcmx_txmsk6_t; /** * cvmx_pcm#_txmsk7 */ typedef union { uint64_t u64; struct cvmx_pcmx_txmsk7_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t mask : 64; /**< Transmit mask bits for slots 511 to 448 (1 means transmit, 0 means don't transmit) */ #else uint64_t mask : 64; #endif } s; struct cvmx_pcmx_txmsk7_s cn30xx; struct cvmx_pcmx_txmsk7_s cn31xx; struct cvmx_pcmx_txmsk7_s cn50xx; } cvmx_pcmx_txmsk7_t; /** * cvmx_pcm#_txstart */ typedef union { uint64_t u64; struct cvmx_pcmx_txstart_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 33; /**< Starting address for the transmit memory region */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t addr : 33; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_pcmx_txstart_s cn30xx; struct cvmx_pcmx_txstart_s cn31xx; struct cvmx_pcmx_txstart_s cn50xx; } cvmx_pcmx_txstart_t; /** * cvmx_pcm_clk#_cfg */ typedef union { uint64_t u64; struct cvmx_pcm_clkx_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t fsyncgood : 1; /**< FSYNC status If 1, the last frame had a correctly positioned fsync pulse If 0, none/extra fsync pulse seen on most recent frame NOTE: this is intended for startup. the FSYNCEXTRA and FSYNCMISSING interrupts are intended for detecting loss of sync during normal operation. */ uint64_t reserved_48_62 : 15; uint64_t fsyncsamp : 16; /**< Number of ECLKs from internal BCLK edge to sample FSYNC NOTE: used to sync to the start of a frame and to check for FSYNC errors. */ uint64_t reserved_26_31 : 6; uint64_t fsynclen : 5; /**< Number of 1/2 BCLKs FSYNC is asserted for NOTE: only used when GEN==1 */ uint64_t fsyncloc : 5; /**< FSYNC location, in 1/2 BCLKS before timeslot 0, bit 0. NOTE: also used to detect framing errors and therefore must have a correct value even if GEN==0 */ uint64_t numslots : 10; /**< Number of 8-bit slots in a frame NOTE: this, along with EXTRABIT and Fbclk determines FSYNC frequency when GEN == 1 NOTE: also used to detect framing errors and therefore must have a correct value even if GEN==0 */ uint64_t extrabit : 1; /**< If 0, no frame bit If 1, add one extra bit time for frame bit NOTE: if GEN == 1, then FSYNC will be delayed one extra bit time. NOTE: also used to detect framing errors and therefore must have a correct value even if GEN==0 NOTE: the extra bit comes from the LSB/MSB of the first byte of the frame in the transmit memory region. LSB vs MSB is determined from the setting of PCMn_TDM_CFG[LSBFIRST]. */ uint64_t bitlen : 2; /**< Number of BCLKs in a bit time. 0 : 1 BCLK 1 : 2 BCLKs 2 : 4 BCLKs 3 : operation undefined */ uint64_t bclkpol : 1; /**< If 0, BCLK rise edge is start of bit time If 1, BCLK fall edge is start of bit time NOTE: also used to detect framing errors and therefore must have a correct value even if GEN==0 */ uint64_t fsyncpol : 1; /**< If 0, FSYNC idles low, asserts high If 1, FSYNC idles high, asserts low NOTE: also used to detect framing errors and therefore must have a correct value even if GEN==0 */ uint64_t ena : 1; /**< If 0, Clock receiving logic is doing nothing 1, Clock receiving logic is looking for sync */ #else uint64_t ena : 1; uint64_t fsyncpol : 1; uint64_t bclkpol : 1; uint64_t bitlen : 2; uint64_t extrabit : 1; uint64_t numslots : 10; uint64_t fsyncloc : 5; uint64_t fsynclen : 5; uint64_t reserved_26_31 : 6; uint64_t fsyncsamp : 16; uint64_t reserved_48_62 : 15; uint64_t fsyncgood : 1; #endif } s; struct cvmx_pcm_clkx_cfg_s cn30xx; struct cvmx_pcm_clkx_cfg_s cn31xx; struct cvmx_pcm_clkx_cfg_s cn50xx; } cvmx_pcm_clkx_cfg_t; /** * cvmx_pcm_clk#_dbg */ typedef union { uint64_t u64; struct cvmx_pcm_clkx_dbg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t debuginfo : 64; /**< Miscellaneous debug information */ #else uint64_t debuginfo : 64; #endif } s; struct cvmx_pcm_clkx_dbg_s cn30xx; struct cvmx_pcm_clkx_dbg_s cn31xx; struct cvmx_pcm_clkx_dbg_s cn50xx; } cvmx_pcm_clkx_dbg_t; /** * cvmx_pcm_clk#_gen */ typedef union { uint64_t u64; struct cvmx_pcm_clkx_gen_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t deltasamp : 16; /**< Signed number of ECLKs to move sampled BCLK edge NOTE: the complete number of ECLKs to move is: NUMSAMP + 2 + 1 + DELTASAMP NUMSAMP to compensate for sampling delay + 2 to compensate for dual-rank synchronizer + 1 for uncertainity + DELTASAMP to CMA/debugging */ uint64_t numsamp : 16; /**< Number of ECLK samples to detect BCLK change when receiving clock. */ uint64_t n : 32; /**< Determines BCLK frequency when generating clock NOTE: Fbclk = Feclk * N / 2^32 N = (Fbclk / Feclk) * 2^32 NOTE: writing N == 0 stops the clock generator, and causes bclk and fsync to be RECEIVED */ #else uint64_t n : 32; uint64_t numsamp : 16; uint64_t deltasamp : 16; #endif } s; struct cvmx_pcm_clkx_gen_s cn30xx; struct cvmx_pcm_clkx_gen_s cn31xx; struct cvmx_pcm_clkx_gen_s cn50xx; } cvmx_pcm_clkx_gen_t; /** * cvmx_pcs#_an#_adv_reg * * Bits [15:9] in the Status Register indicate ability to operate as per those signalling specification, * when misc ctl reg MAC_PHY bit is set to MAC mode. Bits [15:9] will all, always read 1'b0, indicating * that the chip cannot operate in the corresponding modes. * * Bit [4] RM_FLT is a don't care when the selected mode is SGMII. * * * * PCS_AN_ADV_REG = AN Advertisement Register4 */ typedef union { uint64_t u64; struct cvmx_pcsx_anx_adv_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t np : 1; /**< Always 0, no next page capability supported */ uint64_t reserved_14_14 : 1; uint64_t rem_flt : 2; /**< [<13>,<12>] 0 0 Link OK XMIT=DATA 0 1 Link failure (loss of sync, XMIT!= DATA) 1 0 local device Offline 1 1 AN Error failure to complete AN AN Error is set if resolution function precludes operation with link partner */ uint64_t reserved_9_11 : 3; uint64_t pause : 2; /**< [<8>, <7>] Pause frame flow capability across link Exchanged during Auto Negotiation 0 0 No Pause 0 1 Symmetric pause 1 0 Asymmetric Pause 1 1 Both symm and asymm pause to local device */ uint64_t hfd : 1; /**< 1 means local device Half Duplex capable */ uint64_t fd : 1; /**< 1 means local device Full Duplex capable */ uint64_t reserved_0_4 : 5; #else uint64_t reserved_0_4 : 5; uint64_t fd : 1; uint64_t hfd : 1; uint64_t pause : 2; uint64_t reserved_9_11 : 3; uint64_t rem_flt : 2; uint64_t reserved_14_14 : 1; uint64_t np : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsx_anx_adv_reg_s cn52xx; struct cvmx_pcsx_anx_adv_reg_s cn52xxp1; struct cvmx_pcsx_anx_adv_reg_s cn56xx; struct cvmx_pcsx_anx_adv_reg_s cn56xxp1; } cvmx_pcsx_anx_adv_reg_t; /** * cvmx_pcs#_an#_ext_st_reg * * NOTE: * an_results_reg is don't care when AN_OVRD is set to 1. If AN_OVRD=0 and AN_CPT=1 * the an_results_reg is valid. * * * PCS_AN_EXT_ST_REG = AN Extended Status Register15 * as per IEEE802.3 Clause 22 */ typedef union { uint64_t u64; struct cvmx_pcsx_anx_ext_st_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t thou_xfd : 1; /**< 1 means PHY is 1000BASE-X Full Dup capable */ uint64_t thou_xhd : 1; /**< 1 means PHY is 1000BASE-X Half Dup capable */ uint64_t thou_tfd : 1; /**< 1 means PHY is 1000BASE-T Full Dup capable */ uint64_t thou_thd : 1; /**< 1 means PHY is 1000BASE-T Half Dup capable */ uint64_t reserved_0_11 : 12; #else uint64_t reserved_0_11 : 12; uint64_t thou_thd : 1; uint64_t thou_tfd : 1; uint64_t thou_xhd : 1; uint64_t thou_xfd : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsx_anx_ext_st_reg_s cn52xx; struct cvmx_pcsx_anx_ext_st_reg_s cn52xxp1; struct cvmx_pcsx_anx_ext_st_reg_s cn56xx; struct cvmx_pcsx_anx_ext_st_reg_s cn56xxp1; } cvmx_pcsx_anx_ext_st_reg_t; /** * cvmx_pcs#_an#_lp_abil_reg * * PCS_AN_LP_ABIL_REG = AN link Partner Ability Register5 * as per IEEE802.3 Clause 37 */ typedef union { uint64_t u64; struct cvmx_pcsx_anx_lp_abil_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t np : 1; /**< 1=lp next page capable, 0=lp not next page capable */ uint64_t ack : 1; /**< 1=Acknowledgement received */ uint64_t rem_flt : 2; /**< [<13>,<12>] Link Partner's link status 0 0 Link OK 0 1 Offline 1 0 Link failure 1 1 AN Error */ uint64_t reserved_9_11 : 3; uint64_t pause : 2; /**< [<8>, <7>] Link Partner Pause setting 0 0 No Pause 0 1 Symmetric pause 1 0 Asymmetric Pause 1 1 Both symm and asymm pause to local device */ uint64_t hfd : 1; /**< 1 means link partner Half Duplex capable */ uint64_t fd : 1; /**< 1 means link partner Full Duplex capable */ uint64_t reserved_0_4 : 5; #else uint64_t reserved_0_4 : 5; uint64_t fd : 1; uint64_t hfd : 1; uint64_t pause : 2; uint64_t reserved_9_11 : 3; uint64_t rem_flt : 2; uint64_t ack : 1; uint64_t np : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsx_anx_lp_abil_reg_s cn52xx; struct cvmx_pcsx_anx_lp_abil_reg_s cn52xxp1; struct cvmx_pcsx_anx_lp_abil_reg_s cn56xx; struct cvmx_pcsx_anx_lp_abil_reg_s cn56xxp1; } cvmx_pcsx_anx_lp_abil_reg_t; /** * cvmx_pcs#_an#_results_reg * * PCS_AN_RESULTS_REG = AN Results Register * */ typedef union { uint64_t u64; struct cvmx_pcsx_anx_results_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t pause : 2; /**< [<6>, <5>] PAUSE Selection (Don't care for SGMII) 0 0 Disable Pause, TX and RX 0 1 Enable pause frames RX only 1 0 Enable Pause frames TX only 1 1 Enable pause frames TX and RX */ uint64_t spd : 2; /**< [<4>, <3>] Link Speed Selection 0 0 10Mb/s 0 1 100Mb/s 1 0 1000Mb/s 1 1 RSVD */ uint64_t an_cpt : 1; /**< 1=AN Completed, 0=AN not completed or failed */ uint64_t dup : 1; /**< 1=Full Duplex, 0=Half Duplex */ uint64_t link_ok : 1; /**< 1=Link up(OK), 0=Link down */ #else uint64_t link_ok : 1; uint64_t dup : 1; uint64_t an_cpt : 1; uint64_t spd : 2; uint64_t pause : 2; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_pcsx_anx_results_reg_s cn52xx; struct cvmx_pcsx_anx_results_reg_s cn52xxp1; struct cvmx_pcsx_anx_results_reg_s cn56xx; struct cvmx_pcsx_anx_results_reg_s cn56xxp1; } cvmx_pcsx_anx_results_reg_t; /** * cvmx_pcs#_int#_en_reg * * NOTE: RXERR and TXERR conditions to be discussed with Dan before finalising * * * PCS Interrupt Enable Register */ typedef union { uint64_t u64; struct cvmx_pcsx_intx_en_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t dup : 1; /**< Enable duplex mode changed interrupt */ uint64_t sync_bad_en : 1; /**< Enable rx sync st machine in bad state interrupt */ uint64_t an_bad_en : 1; /**< Enable AN state machine bad state interrupt */ uint64_t rxlock_en : 1; /**< Enable rx code group sync/bit lock failure interrupt */ uint64_t rxbad_en : 1; /**< Enable rx state machine in bad state interrupt */ uint64_t rxerr_en : 1; /**< Enable RX error condition interrupt */ uint64_t txbad_en : 1; /**< Enable tx state machine in bad state interrupt */ uint64_t txfifo_en : 1; /**< Enable tx fifo overflow condition interrupt */ uint64_t txfifu_en : 1; /**< Enable tx fifo underflow condition intrrupt */ uint64_t an_err_en : 1; /**< Enable AN Error condition interrupt */ uint64_t xmit_en : 1; /**< Enable XMIT variable state change interrupt */ uint64_t lnkspd_en : 1; /**< Enable Link Speed has changed interrupt */ #else uint64_t lnkspd_en : 1; uint64_t xmit_en : 1; uint64_t an_err_en : 1; uint64_t txfifu_en : 1; uint64_t txfifo_en : 1; uint64_t txbad_en : 1; uint64_t rxerr_en : 1; uint64_t rxbad_en : 1; uint64_t rxlock_en : 1; uint64_t an_bad_en : 1; uint64_t sync_bad_en : 1; uint64_t dup : 1; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_pcsx_intx_en_reg_s cn52xx; struct cvmx_pcsx_intx_en_reg_s cn52xxp1; struct cvmx_pcsx_intx_en_reg_s cn56xx; struct cvmx_pcsx_intx_en_reg_s cn56xxp1; } cvmx_pcsx_intx_en_reg_t; /** * cvmx_pcs#_int#_reg * * SGMII bit [12] is really a misnomer, it is a decode of pi_qlm_cfg pins to indicate SGMII or 1000Base-X modes. * * Note: MODE bit * When MODE=1, 1000Base-X mode is selected. Auto negotiation will follow IEEE 802.3 clause 37. * When MODE=0, SGMII mode is selected and the following note will apply. * Repeat note from SGM_AN_ADV register * NOTE: The SGMII AN Advertisement Register above will be sent during Auto Negotiation if the MAC_PHY mode bit in misc_ctl_reg * is set (1=PHY mode). If the bit is not set (0=MAC mode), the tx_config_reg[14] becomes ACK bit and [0] is always 1. * All other bits in tx_config_reg sent will be 0. The PHY dictates the Auto Negotiation results. * * PCS Interrupt Register */ typedef union { uint64_t u64; struct cvmx_pcsx_intx_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t dup : 1; /**< Set whenever Duplex mode changes on the link */ uint64_t sync_bad : 1; /**< Set by HW whenever rx sync st machine reaches a bad state. Should never be set during normal operation */ uint64_t an_bad : 1; /**< Set by HW whenever AN st machine reaches a bad state. Should never be set during normal operation */ uint64_t rxlock : 1; /**< Set by HW whenever code group Sync or bit lock failure occurs Cannot fire in loopback1 mode */ uint64_t rxbad : 1; /**< Set by HW whenever rx st machine reaches a bad state. Should never be set during normal operation */ uint64_t rxerr : 1; /**< Set whenever RX receives a code group error in 10 bit to 8 bit decode logic Cannot fire in loopback1 mode */ uint64_t txbad : 1; /**< Set by HW whenever tx st machine reaches a bad state. Should never be set during normal operation */ uint64_t txfifo : 1; /**< Set whenever HW detects a TX fifo overflow condition */ uint64_t txfifu : 1; /**< Set whenever HW detects a TX fifo underflowflow condition */ uint64_t an_err : 1; /**< AN Error, AN resolution function failed */ uint64_t xmit : 1; /**< Set whenever HW detects a change in the XMIT variable. XMIT variable states are IDLE, CONFIG and DATA */ uint64_t lnkspd : 1; /**< Set by HW whenever Link Speed has changed */ #else uint64_t lnkspd : 1; uint64_t xmit : 1; uint64_t an_err : 1; uint64_t txfifu : 1; uint64_t txfifo : 1; uint64_t txbad : 1; uint64_t rxerr : 1; uint64_t rxbad : 1; uint64_t rxlock : 1; uint64_t an_bad : 1; uint64_t sync_bad : 1; uint64_t dup : 1; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_pcsx_intx_reg_s cn52xx; struct cvmx_pcsx_intx_reg_s cn52xxp1; struct cvmx_pcsx_intx_reg_s cn56xx; struct cvmx_pcsx_intx_reg_s cn56xxp1; } cvmx_pcsx_intx_reg_t; /** * cvmx_pcs#_link#_timer_count_reg * * PCS_LINK_TIMER_COUNT_REG = 1.6ms nominal link timer register * */ typedef union { uint64_t u64; struct cvmx_pcsx_linkx_timer_count_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t count : 16; /**< (core clock period times 1024) times "COUNT" should be 1.6ms(SGMII)/10ms(otherwise) which is the link timer used in auto negotiation. Reset assums a 700MHz eclk for 1.6ms link timer */ #else uint64_t count : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsx_linkx_timer_count_reg_s cn52xx; struct cvmx_pcsx_linkx_timer_count_reg_s cn52xxp1; struct cvmx_pcsx_linkx_timer_count_reg_s cn56xx; struct cvmx_pcsx_linkx_timer_count_reg_s cn56xxp1; } cvmx_pcsx_linkx_timer_count_reg_t; /** * cvmx_pcs#_log_anl#_reg * * PCS Logic Analyzer Register * */ typedef union { uint64_t u64; struct cvmx_pcsx_log_anlx_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t lafifovfl : 1; /**< 1=logic analyser fif overflowed during packetization Write 1 to clear this bit */ uint64_t la_en : 1; /**< 1= Logic Analyzer enabled, 0=Logic Analyzer disabled */ uint64_t pkt_sz : 2; /**< [<1>, <0>] Logic Analyzer Packet Size 0 0 Packet size 1k bytes 0 1 Packet size 4k bytes 1 0 Packet size 8k bytes 1 1 Packet size 16k bytes */ #else uint64_t pkt_sz : 2; uint64_t la_en : 1; uint64_t lafifovfl : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_pcsx_log_anlx_reg_s cn52xx; struct cvmx_pcsx_log_anlx_reg_s cn52xxp1; struct cvmx_pcsx_log_anlx_reg_s cn56xx; struct cvmx_pcsx_log_anlx_reg_s cn56xxp1; } cvmx_pcsx_log_anlx_reg_t; /** * cvmx_pcs#_misc#_ctl_reg * * SGMII Misc Control Register * */ typedef union { uint64_t u64; struct cvmx_pcsx_miscx_ctl_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t sgmii : 1; /**< 1=SGMII or 1000Base-X mode selected, 0=XAUI or PCIE mode selected This bit represents pi_qlm1/3_cfg[1:0] pin status */ uint64_t gmxeno : 1; /**< GMX Enable override. When set to 1, forces GMX to appear disabled. The enable/disable status of GMX is checked only at SOP of every packet. */ uint64_t loopbck2 : 1; /**< Sets external loopback mode to return rx data back out via tx data path. 0=no loopback, 1=loopback */ uint64_t mac_phy : 1; /**< 0=MAC, 1=PHY decides the tx_config_reg value to be sent during auto negotiation. See SGMII spec ENG-46158 from CISCO */ uint64_t mode : 1; /**< 0=SGMII or 1= 1000 Base X */ uint64_t an_ovrd : 1; /**< 0=disable, 1= enable over ride AN results Auto negotiation is allowed to happen but the results are ignored when set. Duplex and Link speed values are set from the pcs_mr_ctrl reg */ uint64_t samp_pt : 7; /**< Byte# in elongated frames for 10/100Mb/s operation for data sampling on RX side in PCS. Recommended values are 0x5 for 100Mb/s operation and 0x32 for 10Mb/s operation. For 10Mb/s operaton this field should be set to a value less than 99 and greater than 0. If set out of this range a value of 50 will be used for actual sampling internally without affecting the CSR field For 100Mb/s operation this field should be set to a value less than 9 and greater than 0. If set out of this range a value of 5 will be used for actual sampling internally without affecting the CSR field */ #else uint64_t samp_pt : 7; uint64_t an_ovrd : 1; uint64_t mode : 1; uint64_t mac_phy : 1; uint64_t loopbck2 : 1; uint64_t gmxeno : 1; uint64_t sgmii : 1; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_pcsx_miscx_ctl_reg_s cn52xx; struct cvmx_pcsx_miscx_ctl_reg_s cn52xxp1; struct cvmx_pcsx_miscx_ctl_reg_s cn56xx; struct cvmx_pcsx_miscx_ctl_reg_s cn56xxp1; } cvmx_pcsx_miscx_ctl_reg_t; /** * cvmx_pcs#_mr#_control_reg * * PCS_MR_CONTROL_REG = Control Register0 * */ typedef union { uint64_t u64; struct cvmx_pcsx_mrx_control_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t reset : 1; /**< 1=SW Reset, the bit will return to 0 after pcs has been reset. Takes 32 eclk cycles to reset pcs */ uint64_t loopbck1 : 1; /**< 0=normal operation, 1=loopback. The loopback mode will return(loopback) tx data from GMII tx back to GMII rx interface. The loopback happens in the pcs module. Auto Negotiation will be disabled even if the AN_EN bit is set, during loopback */ uint64_t spdlsb : 1; /**< See bit 6 description */ uint64_t an_en : 1; /**< 1=AN Enable, 0=AN Disable */ uint64_t pwr_dn : 1; /**< 1=Power Down(HW reset), 0=Normal operation */ uint64_t reserved_10_10 : 1; uint64_t rst_an : 1; /**< If bit 12 is set and bit 3 of status reg is 1 Auto Negotiation begins. Else,SW writes are ignored and this bit remians at 0. This bit clears itself to 0, when AN starts. */ uint64_t dup : 1; /**< 1=full duplex, 0=half duplex; effective only if AN disabled. If status register bits [15:9] and and extended status reg bits [15:12] allow only one duplex mode|, this bit will correspond to that value and any attempt to write will be ignored. */ uint64_t coltst : 1; /**< 1=enable COL signal test, 0=disable test During COL test, the COL signal will reflect the GMII TX_EN signal with less than 16BT delay */ uint64_t spdmsb : 1; /**< [<6>, <13>]Link Speed effective only if AN disabled 0 0 10Mb/s 0 1 100Mb/s 1 0 1000Mb/s 1 1 RSVD */ uint64_t uni : 1; /**< Unidirectional (Std 802.3-2005, Clause 66.2) This bit will override the AN_EN bit and disable auto-negotiation variable mr_an_enable, when set Used in both 1000Base-X and SGMII modes */ uint64_t reserved_0_4 : 5; #else uint64_t reserved_0_4 : 5; uint64_t uni : 1; uint64_t spdmsb : 1; uint64_t coltst : 1; uint64_t dup : 1; uint64_t rst_an : 1; uint64_t reserved_10_10 : 1; uint64_t pwr_dn : 1; uint64_t an_en : 1; uint64_t spdlsb : 1; uint64_t loopbck1 : 1; uint64_t reset : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsx_mrx_control_reg_s cn52xx; struct cvmx_pcsx_mrx_control_reg_s cn52xxp1; struct cvmx_pcsx_mrx_control_reg_s cn56xx; struct cvmx_pcsx_mrx_control_reg_s cn56xxp1; } cvmx_pcsx_mrx_control_reg_t; /** * cvmx_pcs#_mr#_status_reg * * NOTE: * Whenever AN_EN bit[12] is set, Auto negotiation is allowed to happen. The results * of the auto negotiation process set the fields in the AN_RESULTS reg. When AN_EN is not set, * AN_RESULTS reg is don't care. The effective SPD, DUP etc.. get their values * from the pcs_mr_ctrl reg. * * PCS_MR_STATUS_REG = Status Register1 */ typedef union { uint64_t u64; struct cvmx_pcsx_mrx_status_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t hun_t4 : 1; /**< 1 means 100Base-T4 capable */ uint64_t hun_xfd : 1; /**< 1 means 100Base-X Full Duplex */ uint64_t hun_xhd : 1; /**< 1 means 100Base-X Half Duplex */ uint64_t ten_fd : 1; /**< 1 means 10Mb/s Full Duplex */ uint64_t ten_hd : 1; /**< 1 means 10Mb/s Half Duplex */ uint64_t hun_t2fd : 1; /**< 1 means 100Base-T2 Full Duplex */ uint64_t hun_t2hd : 1; /**< 1 means 100Base-T2 Half Duplex */ uint64_t ext_st : 1; /**< 1 means extended status info in reg15 */ uint64_t reserved_7_7 : 1; uint64_t prb_sup : 1; /**< 1 means able to work without preamble bytes at the beginning of frames. 0 means not able to accept frames without preamble bytes preceding them. */ uint64_t an_cpt : 1; /**< 1 means Auto Negotiation is complete and the contents of the an_results_reg are valid. */ uint64_t rm_flt : 1; /**< Set to 1 when remote flt condition occurs. This bit implements a latching Hi behavior. It is cleared by SW read of this reg or when reset bit [15] in Control Reg is asserted. See an adv reg[13:12] for flt conditions */ uint64_t an_abil : 1; /**< 1 means Auto Negotiation capable */ uint64_t lnk_st : 1; /**< 1=link up, 0=link down. Set during AN process Set whenever XMIT=DATA. Latching Lo behavior when link goes down. Link down value of the bit stays low until SW reads the reg. */ uint64_t reserved_1_1 : 1; uint64_t extnd : 1; /**< Always 0, no extended capability regs present */ #else uint64_t extnd : 1; uint64_t reserved_1_1 : 1; uint64_t lnk_st : 1; uint64_t an_abil : 1; uint64_t rm_flt : 1; uint64_t an_cpt : 1; uint64_t prb_sup : 1; uint64_t reserved_7_7 : 1; uint64_t ext_st : 1; uint64_t hun_t2hd : 1; uint64_t hun_t2fd : 1; uint64_t ten_hd : 1; uint64_t ten_fd : 1; uint64_t hun_xhd : 1; uint64_t hun_xfd : 1; uint64_t hun_t4 : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsx_mrx_status_reg_s cn52xx; struct cvmx_pcsx_mrx_status_reg_s cn52xxp1; struct cvmx_pcsx_mrx_status_reg_s cn56xx; struct cvmx_pcsx_mrx_status_reg_s cn56xxp1; } cvmx_pcsx_mrx_status_reg_t; /** * cvmx_pcs#_rx#_states_reg * * PCS_RX_STATES_REG = RX State Machines states register * */ typedef union { uint64_t u64; struct cvmx_pcsx_rxx_states_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t rx_bad : 1; /**< Receive state machine in an illegal state */ uint64_t rx_st : 5; /**< Receive state machine state */ uint64_t sync_bad : 1; /**< Receive synchronization SM in an illegal state */ uint64_t sync : 4; /**< Receive synchronization SM state */ uint64_t an_bad : 1; /**< Auto Negotiation state machine in an illegal state */ uint64_t an_st : 4; /**< Auto Negotiation state machine state */ #else uint64_t an_st : 4; uint64_t an_bad : 1; uint64_t sync : 4; uint64_t sync_bad : 1; uint64_t rx_st : 5; uint64_t rx_bad : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsx_rxx_states_reg_s cn52xx; struct cvmx_pcsx_rxx_states_reg_s cn52xxp1; struct cvmx_pcsx_rxx_states_reg_s cn56xx; struct cvmx_pcsx_rxx_states_reg_s cn56xxp1; } cvmx_pcsx_rxx_states_reg_t; /** * cvmx_pcs#_rx#_sync_reg * * Note: * r_tx_rx_polarity_reg bit [2] will show correct polarity needed on the link receive path after code grp synchronization is achieved. * * * PCS_RX_SYNC_REG = Code Group synchronization reg */ typedef union { uint64_t u64; struct cvmx_pcsx_rxx_sync_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t sync : 1; /**< 1 means code group synchronization achieved */ uint64_t bit_lock : 1; /**< 1 means bit lock achieved */ #else uint64_t bit_lock : 1; uint64_t sync : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_pcsx_rxx_sync_reg_s cn52xx; struct cvmx_pcsx_rxx_sync_reg_s cn52xxp1; struct cvmx_pcsx_rxx_sync_reg_s cn56xx; struct cvmx_pcsx_rxx_sync_reg_s cn56xxp1; } cvmx_pcsx_rxx_sync_reg_t; /** * cvmx_pcs#_sgm#_an_adv_reg * * SGMII AN Advertisement Register (sent out as tx_config_reg) * */ typedef union { uint64_t u64; struct cvmx_pcsx_sgmx_an_adv_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t link : 1; /**< Link status 1 Link Up, 0 Link Down */ uint64_t ack : 1; /**< Auto negotiation ack */ uint64_t reserved_13_13 : 1; uint64_t dup : 1; /**< Duplex mode 1=full duplex, 0=half duplex */ uint64_t speed : 2; /**< Link Speed 0 0 10Mb/s 0 1 100Mb/s 1 0 1000Mb/s 1 1 RSVD */ uint64_t reserved_1_9 : 9; uint64_t one : 1; /**< Always set to match tx_config_reg<0> */ #else uint64_t one : 1; uint64_t reserved_1_9 : 9; uint64_t speed : 2; uint64_t dup : 1; uint64_t reserved_13_13 : 1; uint64_t ack : 1; uint64_t link : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xx; struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xxp1; struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xx; struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xxp1; } cvmx_pcsx_sgmx_an_adv_reg_t; /** * cvmx_pcs#_sgm#_lp_adv_reg * * NOTE: The SGMII AN Advertisement Register above will be sent during Auto Negotiation if the MAC_PHY mode bit in misc_ctl_reg * is set (1=PHY mode). If the bit is not set (0=MAC mode), the tx_config_reg[14] becomes ACK bit and [0] is always 1. * All other bits in tx_config_reg sent will be 0. The PHY dictates the Auto Negotiation results. * * SGMII LP Advertisement Register (received as rx_config_reg) */ typedef union { uint64_t u64; struct cvmx_pcsx_sgmx_lp_adv_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t link : 1; /**< Link status 1 Link Up, 0 Link Down */ uint64_t reserved_13_14 : 2; uint64_t dup : 1; /**< Duplex mode 1=full duplex, 0=half duplex */ uint64_t speed : 2; /**< Link Speed 0 0 10Mb/s 0 1 100Mb/s 1 0 1000Mb/s 1 1 RSVD */ uint64_t reserved_1_9 : 9; uint64_t one : 1; /**< Always set to match tx_config_reg<0> */ #else uint64_t one : 1; uint64_t reserved_1_9 : 9; uint64_t speed : 2; uint64_t dup : 1; uint64_t reserved_13_14 : 2; uint64_t link : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xx; struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xxp1; struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xx; struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xxp1; } cvmx_pcsx_sgmx_lp_adv_reg_t; /** * cvmx_pcs#_tx#_states_reg * * PCS_TX_STATES_REG = TX State Machines states register * */ typedef union { uint64_t u64; struct cvmx_pcsx_txx_states_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t xmit : 2; /**< 0=undefined, 1=config, 2=idle, 3=data */ uint64_t tx_bad : 1; /**< Xmit state machine in a bad state */ uint64_t ord_st : 4; /**< Xmit ordered set state machine state */ #else uint64_t ord_st : 4; uint64_t tx_bad : 1; uint64_t xmit : 2; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_pcsx_txx_states_reg_s cn52xx; struct cvmx_pcsx_txx_states_reg_s cn52xxp1; struct cvmx_pcsx_txx_states_reg_s cn56xx; struct cvmx_pcsx_txx_states_reg_s cn56xxp1; } cvmx_pcsx_txx_states_reg_t; /** * cvmx_pcs#_tx_rx#_polarity_reg * * PCS_POLARITY_REG = TX_RX polarity reg * */ typedef union { uint64_t u64; struct cvmx_pcsx_tx_rxx_polarity_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t rxovrd : 1; /**< When 0, <2> determines polarity when 1, <1> determines polarity */ uint64_t autorxpl : 1; /**< Auto RX polarity detected. 1=inverted, 0=normal This bit always represents the correct rx polarity setting needed for successful rx path operartion, once a successful code group sync is obtained */ uint64_t rxplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */ uint64_t txplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */ #else uint64_t txplrt : 1; uint64_t rxplrt : 1; uint64_t autorxpl : 1; uint64_t rxovrd : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xx; struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xxp1; struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xx; struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xxp1; } cvmx_pcsx_tx_rxx_polarity_reg_t; /** * cvmx_pcsx#_10gbx_status_reg * * PCSX_10GBX_STATUS_REG = 10gbx_status_reg * */ typedef union { uint64_t u64; struct cvmx_pcsxx_10gbx_status_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t alignd : 1; /**< 1=Lane alignment achieved, 0=Lanes not aligned */ uint64_t pattst : 1; /**< Always at 0, no pattern testing capability */ uint64_t reserved_4_10 : 7; uint64_t l3sync : 1; /**< 1=Rcv lane 3 code grp synchronized, 0=not sync'ed */ uint64_t l2sync : 1; /**< 1=Rcv lane 2 code grp synchronized, 0=not sync'ed */ uint64_t l1sync : 1; /**< 1=Rcv lane 1 code grp synchronized, 0=not sync'ed */ uint64_t l0sync : 1; /**< 1=Rcv lane 0 code grp synchronized, 0=not sync'ed */ #else uint64_t l0sync : 1; uint64_t l1sync : 1; uint64_t l2sync : 1; uint64_t l3sync : 1; uint64_t reserved_4_10 : 7; uint64_t pattst : 1; uint64_t alignd : 1; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_pcsxx_10gbx_status_reg_s cn52xx; struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1; struct cvmx_pcsxx_10gbx_status_reg_s cn56xx; struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1; } cvmx_pcsxx_10gbx_status_reg_t; /** * cvmx_pcsx#_bist_status_reg * * NOTE: Logic Analyzer is enabled with LA_EN for xaui only. PKT_SZ is effective only when LA_EN=1 * For normal operation(xaui), this bit must be 0. The dropped lane is used to send rxc[3:0]. * See pcs.csr for sgmii/1000Base-X logic analyzer mode. * For full description see document at .../rtl/pcs/readme_logic_analyzer.txt * * * PCSX Bist Status Register */ typedef union { uint64_t u64; struct cvmx_pcsxx_bist_status_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t bist_status : 1; /**< 1=bist failure, 0=bisted memory ok or bist in progress pcsx.tx_sm.drf8x36m1_async_bist */ #else uint64_t bist_status : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_pcsxx_bist_status_reg_s cn52xx; struct cvmx_pcsxx_bist_status_reg_s cn52xxp1; struct cvmx_pcsxx_bist_status_reg_s cn56xx; struct cvmx_pcsxx_bist_status_reg_s cn56xxp1; } cvmx_pcsxx_bist_status_reg_t; /** * cvmx_pcsx#_bit_lock_status_reg * * LN_SWAP for XAUI is to simplify interconnection layout between devices * * * PCSX Bit Lock Status Register */ typedef union { uint64_t u64; struct cvmx_pcsxx_bit_lock_status_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t bitlck3 : 1; /**< Receive Lane 3 bit lock status */ uint64_t bitlck2 : 1; /**< Receive Lane 2 bit lock status */ uint64_t bitlck1 : 1; /**< Receive Lane 1 bit lock status */ uint64_t bitlck0 : 1; /**< Receive Lane 0 bit lock status */ #else uint64_t bitlck0 : 1; uint64_t bitlck1 : 1; uint64_t bitlck2 : 1; uint64_t bitlck3 : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx; struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1; struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx; struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1; } cvmx_pcsxx_bit_lock_status_reg_t; /** * cvmx_pcsx#_control1_reg * * NOTE: Logic Analyzer is enabled with LA_EN for the specified PCS lane only. PKT_SZ is effective only when LA_EN=1 * For normal operation(sgmii or 1000Base-X), this bit must be 0. * See pcsx.csr for xaui logic analyzer mode. * For full description see document at .../rtl/pcs/readme_logic_analyzer.txt * * * PCSX regs follow IEEE Std 802.3-2005, Section: 45.2.3 * * * PCSX_CONTROL1_REG = Control Register1 */ typedef union { uint64_t u64; struct cvmx_pcsxx_control1_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t reset : 1; /**< 1=SW PCSX Reset, the bit will return to 0 after pcs has been reset. Takes 32 eclk cycles to reset pcs 0=Normal operation */ uint64_t loopbck1 : 1; /**< 0=normal operation, 1=internal loopback mode xgmii tx data received from gmx tx port is returned back into gmx, xgmii rx port. */ uint64_t spdsel1 : 1; /**< See bit 6 description */ uint64_t reserved_12_12 : 1; uint64_t lo_pwr : 1; /**< The status of this bit has no effect on operation of the PCS sublayer. */ uint64_t reserved_7_10 : 4; uint64_t spdsel0 : 1; /**< SPDSEL1 and SPDSEL0 are always at 1'b1. Write has no effect. [<6>, <13>]Link Speed selection 1 1 Bits 5:2 select speed */ uint64_t spd : 4; /**< Always select 10Gb/s, writes have no effect */ uint64_t reserved_0_1 : 2; #else uint64_t reserved_0_1 : 2; uint64_t spd : 4; uint64_t spdsel0 : 1; uint64_t reserved_7_10 : 4; uint64_t lo_pwr : 1; uint64_t reserved_12_12 : 1; uint64_t spdsel1 : 1; uint64_t loopbck1 : 1; uint64_t reset : 1; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsxx_control1_reg_s cn52xx; struct cvmx_pcsxx_control1_reg_s cn52xxp1; struct cvmx_pcsxx_control1_reg_s cn56xx; struct cvmx_pcsxx_control1_reg_s cn56xxp1; } cvmx_pcsxx_control1_reg_t; /** * cvmx_pcsx#_control2_reg * * PCSX_CONTROL2_REG = Control Register2 * */ typedef union { uint64_t u64; struct cvmx_pcsxx_control2_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t type : 2; /**< Always 2'b01, 10GBASE-X only supported */ #else uint64_t type : 2; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_pcsxx_control2_reg_s cn52xx; struct cvmx_pcsxx_control2_reg_s cn52xxp1; struct cvmx_pcsxx_control2_reg_s cn56xx; struct cvmx_pcsxx_control2_reg_s cn56xxp1; } cvmx_pcsxx_control2_reg_t; /** * cvmx_pcsx#_int_en_reg * * PCSX Interrupt Enable Register * */ typedef union { uint64_t u64; struct cvmx_pcsxx_int_en_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t algnlos_en : 1; /**< Enable ALGNLOS interrupt */ uint64_t synlos_en : 1; /**< Enable SYNLOS interrupt */ uint64_t bitlckls_en : 1; /**< Enable BITLCKLS interrupt */ uint64_t rxsynbad_en : 1; /**< Enable RXSYNBAD interrupt */ uint64_t rxbad_en : 1; /**< Enable RXBAD interrupt */ uint64_t txflt_en : 1; /**< Enable TXFLT interrupt */ #else uint64_t txflt_en : 1; uint64_t rxbad_en : 1; uint64_t rxsynbad_en : 1; uint64_t bitlckls_en : 1; uint64_t synlos_en : 1; uint64_t algnlos_en : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_pcsxx_int_en_reg_s cn52xx; struct cvmx_pcsxx_int_en_reg_s cn52xxp1; struct cvmx_pcsxx_int_en_reg_s cn56xx; struct cvmx_pcsxx_int_en_reg_s cn56xxp1; } cvmx_pcsxx_int_en_reg_t; /** * cvmx_pcsx#_int_reg * * PCSX Interrupt Register * */ typedef union { uint64_t u64; struct cvmx_pcsxx_int_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t algnlos : 1; /**< Set when XAUI lanes lose alignment */ uint64_t synlos : 1; /**< Set when Code group sync lost on 1 or more lanes */ uint64_t bitlckls : 1; /**< Set when Bit lock lost on 1 or more xaui lanes */ uint64_t rxsynbad : 1; /**< Set when RX code grp sync st machine in bad state in one of the 4 xaui lanes */ uint64_t rxbad : 1; /**< Set when RX state machine in bad state */ uint64_t txflt : 1; /**< None defined at this time, always 0x0 */ #else uint64_t txflt : 1; uint64_t rxbad : 1; uint64_t rxsynbad : 1; uint64_t bitlckls : 1; uint64_t synlos : 1; uint64_t algnlos : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_pcsxx_int_reg_s cn52xx; struct cvmx_pcsxx_int_reg_s cn52xxp1; struct cvmx_pcsxx_int_reg_s cn56xx; struct cvmx_pcsxx_int_reg_s cn56xxp1; } cvmx_pcsxx_int_reg_t; /** * cvmx_pcsx#_log_anl_reg * * PCSX Logic Analyzer Register * */ typedef union { uint64_t u64; struct cvmx_pcsxx_log_anl_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t enc_mode : 1; /**< 1=send xaui encoded data, 0=send xaui raw data to GMX See .../rtl/pcs/readme_logic_analyzer.txt for details */ uint64_t drop_ln : 2; /**< xaui lane# to drop from logic analyzer packets [<5>, <4>] Drop lane \# 0 0 Drop lane 0 data 0 1 Drop lane 1 data 1 0 Drop lane 2 data 1 1 Drop lane 3 data */ uint64_t lafifovfl : 1; /**< 1=logic analyser fif overflowed one or more times during packetization. Write 1 to clear this bit */ uint64_t la_en : 1; /**< 1= Logic Analyzer enabled, 0=Logic Analyzer disabled */ uint64_t pkt_sz : 2; /**< [<1>, <0>] Logic Analyzer Packet Size 0 0 Packet size 1k bytes 0 1 Packet size 4k bytes 1 0 Packet size 8k bytes 1 1 Packet size 16k bytes */ #else uint64_t pkt_sz : 2; uint64_t la_en : 1; uint64_t lafifovfl : 1; uint64_t drop_ln : 2; uint64_t enc_mode : 1; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_pcsxx_log_anl_reg_s cn52xx; struct cvmx_pcsxx_log_anl_reg_s cn52xxp1; struct cvmx_pcsxx_log_anl_reg_s cn56xx; struct cvmx_pcsxx_log_anl_reg_s cn56xxp1; } cvmx_pcsxx_log_anl_reg_t; /** * cvmx_pcsx#_misc_ctl_reg * * RX lane polarity vector [3:0] = XOR_RXPLRT<9:6> ^ [4[RXPLRT<1>]]; * * TX lane polarity vector [3:0] = XOR_TXPLRT<5:2> ^ [4[TXPLRT<0>]]; * * In short keep <1:0> to 2'b00, and use <5:2> and <9:6> fields to define per lane polarities * * * * PCSX Misc Control Register */ typedef union { uint64_t u64; struct cvmx_pcsxx_misc_ctl_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t tx_swap : 1; /**< 0=do not swap xaui lanes going out to qlm's 1=swap lanes 3 <-> 0 and 2 <-> 1 */ uint64_t rx_swap : 1; /**< 0=do not swap xaui lanes coming in from qlm's 1=swap lanes 3 <-> 0 and 2 <-> 1 */ uint64_t xaui : 1; /**< 1=XAUI mode selected, 0=not XAUI mode selected This bit represents pi_qlm1/3_cfg[1:0] pin status */ uint64_t gmxeno : 1; /**< GMX port enable override, GMX en/dis status is held during data packet reception. */ #else uint64_t gmxeno : 1; uint64_t xaui : 1; uint64_t rx_swap : 1; uint64_t tx_swap : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_pcsxx_misc_ctl_reg_s cn52xx; struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1; struct cvmx_pcsxx_misc_ctl_reg_s cn56xx; struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1; } cvmx_pcsxx_misc_ctl_reg_t; /** * cvmx_pcsx#_rx_sync_states_reg * * PCSX_RX_SYNC_STATES_REG = Receive Sync States Register * */ typedef union { uint64_t u64; struct cvmx_pcsxx_rx_sync_states_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t sync3st : 4; /**< Receive lane 3 code grp sync state machine state */ uint64_t sync2st : 4; /**< Receive lane 2 code grp sync state machine state */ uint64_t sync1st : 4; /**< Receive lane 1 code grp sync state machine state */ uint64_t sync0st : 4; /**< Receive lane 0 code grp sync state machine state */ #else uint64_t sync0st : 4; uint64_t sync1st : 4; uint64_t sync2st : 4; uint64_t sync3st : 4; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx; struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1; struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx; struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1; } cvmx_pcsxx_rx_sync_states_reg_t; /** * cvmx_pcsx#_spd_abil_reg * * PCSX_SPD_ABIL_REG = Speed ability register * */ typedef union { uint64_t u64; struct cvmx_pcsxx_spd_abil_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t tenpasst : 1; /**< Always 0, no 10PASS-TS/2BASE-TL capability support */ uint64_t tengb : 1; /**< Always 1, 10Gb/s supported */ #else uint64_t tengb : 1; uint64_t tenpasst : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_pcsxx_spd_abil_reg_s cn52xx; struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1; struct cvmx_pcsxx_spd_abil_reg_s cn56xx; struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1; } cvmx_pcsxx_spd_abil_reg_t; /** * cvmx_pcsx#_status1_reg * * PCSX_STATUS1_REG = Status Register1 * */ typedef union { uint64_t u64; struct cvmx_pcsxx_status1_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t flt : 1; /**< 1=Fault condition detected, 0=No fault condition This bit is a logical OR of Status2 reg bits 11,10 */ uint64_t reserved_3_6 : 4; uint64_t rcv_lnk : 1; /**< 1=Receive Link up, 0=Receive Link down Latching Low version of r_10gbx_status_reg[12], Link down status continues until SW read. */ uint64_t lpable : 1; /**< Always set to 1 for Low Power ablility indication */ uint64_t reserved_0_0 : 1; #else uint64_t reserved_0_0 : 1; uint64_t lpable : 1; uint64_t rcv_lnk : 1; uint64_t reserved_3_6 : 4; uint64_t flt : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_pcsxx_status1_reg_s cn52xx; struct cvmx_pcsxx_status1_reg_s cn52xxp1; struct cvmx_pcsxx_status1_reg_s cn56xx; struct cvmx_pcsxx_status1_reg_s cn56xxp1; } cvmx_pcsxx_status1_reg_t; /** * cvmx_pcsx#_status2_reg * * PCSX_STATUS2_REG = Status Register2 * */ typedef union { uint64_t u64; struct cvmx_pcsxx_status2_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t dev : 2; /**< Always at 2'b10, means a Device present at the addr */ uint64_t reserved_12_13 : 2; uint64_t xmtflt : 1; /**< 0=No xmit fault, 1=xmit fault. Implements latching High function until SW read. */ uint64_t rcvflt : 1; /**< 0=No rcv fault, 1=rcv fault. Implements latching High function until SW read */ uint64_t reserved_3_9 : 7; uint64_t tengb_w : 1; /**< Always 0, no 10GBASE-W capability */ uint64_t tengb_x : 1; /**< Always 1, 10GBASE-X capable */ uint64_t tengb_r : 1; /**< Always 0, no 10GBASE-R capability */ #else uint64_t tengb_r : 1; uint64_t tengb_x : 1; uint64_t tengb_w : 1; uint64_t reserved_3_9 : 7; uint64_t rcvflt : 1; uint64_t xmtflt : 1; uint64_t reserved_12_13 : 2; uint64_t dev : 2; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pcsxx_status2_reg_s cn52xx; struct cvmx_pcsxx_status2_reg_s cn52xxp1; struct cvmx_pcsxx_status2_reg_s cn56xx; struct cvmx_pcsxx_status2_reg_s cn56xxp1; } cvmx_pcsxx_status2_reg_t; /** * cvmx_pcsx#_tx_rx_polarity_reg * * PCSX_POLARITY_REG = TX_RX polarity reg * */ typedef union { uint64_t u64; struct cvmx_pcsxx_tx_rx_polarity_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t xor_rxplrt : 4; /**< Per lane RX polarity control */ uint64_t xor_txplrt : 4; /**< Per lane TX polarity control */ uint64_t rxplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */ uint64_t txplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */ #else uint64_t txplrt : 1; uint64_t rxplrt : 1; uint64_t xor_txplrt : 4; uint64_t xor_rxplrt : 4; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx; struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t rxplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */ uint64_t txplrt : 1; /**< 1 is inverted polarity, 0 is normal polarity */ #else uint64_t txplrt : 1; uint64_t rxplrt : 1; uint64_t reserved_2_63 : 62; #endif } cn52xxp1; struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx; struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1; } cvmx_pcsxx_tx_rx_polarity_reg_t; /** * cvmx_pcsx#_tx_rx_states_reg * * PCSX_TX_RX_STATES_REG = Transmit Receive States Register * */ typedef union { uint64_t u64; struct cvmx_pcsxx_tx_rx_states_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t term_err : 1; /**< 1=Check end function detected error in packet terminate ||T|| column or the one after it */ uint64_t syn3bad : 1; /**< 1=lane 3 code grp sync state machine in bad state */ uint64_t syn2bad : 1; /**< 1=lane 2 code grp sync state machine in bad state */ uint64_t syn1bad : 1; /**< 1=lane 1 code grp sync state machine in bad state */ uint64_t syn0bad : 1; /**< 1=lane 0 code grp sync state machine in bad state */ uint64_t rxbad : 1; /**< 1=Rcv state machine in a bad state, HW malfunction */ uint64_t algn_st : 3; /**< Lane alignment state machine state state */ uint64_t rx_st : 2; /**< Receive state machine state state */ uint64_t tx_st : 3; /**< Transmit state machine state state */ #else uint64_t tx_st : 3; uint64_t rx_st : 2; uint64_t algn_st : 3; uint64_t rxbad : 1; uint64_t syn0bad : 1; uint64_t syn1bad : 1; uint64_t syn2bad : 1; uint64_t syn3bad : 1; uint64_t term_err : 1; uint64_t reserved_14_63 : 50; #endif } s; struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx; struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t syn3bad : 1; /**< 1=lane 3 code grp sync state machine in bad state */ uint64_t syn2bad : 1; /**< 1=lane 2 code grp sync state machine in bad state */ uint64_t syn1bad : 1; /**< 1=lane 1 code grp sync state machine in bad state */ uint64_t syn0bad : 1; /**< 1=lane 0 code grp sync state machine in bad state */ uint64_t rxbad : 1; /**< 1=Rcv state machine in a bad state, HW malfunction */ uint64_t algn_st : 3; /**< Lane alignment state machine state state */ uint64_t rx_st : 2; /**< Receive state machine state state */ uint64_t tx_st : 3; /**< Transmit state machine state state */ #else uint64_t tx_st : 3; uint64_t rx_st : 2; uint64_t algn_st : 3; uint64_t rxbad : 1; uint64_t syn0bad : 1; uint64_t syn1bad : 1; uint64_t syn2bad : 1; uint64_t syn3bad : 1; uint64_t reserved_13_63 : 51; #endif } cn52xxp1; struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx; struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1; } cvmx_pcsxx_tx_rx_states_reg_t; /** * cvmx_pesc#_bist_status * * PESC_BIST_STATUS = PESC Bist Status * * Contains the diffrent interrupt summary bits of the PESC. */ typedef union { uint64_t u64; struct cvmx_pescx_bist_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t rqdata5 : 1; /**< Rx Queue Data Memory5. */ uint64_t ctlp_or : 1; /**< C-TLP Order Fifo. */ uint64_t ntlp_or : 1; /**< N-TLP Order Fifo. */ uint64_t ptlp_or : 1; /**< P-TLP Order Fifo. */ uint64_t retry : 1; /**< Retry Buffer. */ uint64_t rqdata0 : 1; /**< Rx Queue Data Memory0. */ uint64_t rqdata1 : 1; /**< Rx Queue Data Memory1. */ uint64_t rqdata2 : 1; /**< Rx Queue Data Memory2. */ uint64_t rqdata3 : 1; /**< Rx Queue Data Memory3. */ uint64_t rqdata4 : 1; /**< Rx Queue Data Memory4. */ uint64_t rqhdr1 : 1; /**< Rx Queue Header1. */ uint64_t rqhdr0 : 1; /**< Rx Queue Header0. */ uint64_t sot : 1; /**< SOT Buffer. */ #else uint64_t sot : 1; uint64_t rqhdr0 : 1; uint64_t rqhdr1 : 1; uint64_t rqdata4 : 1; uint64_t rqdata3 : 1; uint64_t rqdata2 : 1; uint64_t rqdata1 : 1; uint64_t rqdata0 : 1; uint64_t retry : 1; uint64_t ptlp_or : 1; uint64_t ntlp_or : 1; uint64_t ctlp_or : 1; uint64_t rqdata5 : 1; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_pescx_bist_status_s cn52xx; struct cvmx_pescx_bist_status_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t ctlp_or : 1; /**< C-TLP Order Fifo. */ uint64_t ntlp_or : 1; /**< N-TLP Order Fifo. */ uint64_t ptlp_or : 1; /**< P-TLP Order Fifo. */ uint64_t retry : 1; /**< Retry Buffer. */ uint64_t rqdata0 : 1; /**< Rx Queue Data Memory0. */ uint64_t rqdata1 : 1; /**< Rx Queue Data Memory1. */ uint64_t rqdata2 : 1; /**< Rx Queue Data Memory2. */ uint64_t rqdata3 : 1; /**< Rx Queue Data Memory3. */ uint64_t rqdata4 : 1; /**< Rx Queue Data Memory4. */ uint64_t rqhdr1 : 1; /**< Rx Queue Header1. */ uint64_t rqhdr0 : 1; /**< Rx Queue Header0. */ uint64_t sot : 1; /**< SOT Buffer. */ #else uint64_t sot : 1; uint64_t rqhdr0 : 1; uint64_t rqhdr1 : 1; uint64_t rqdata4 : 1; uint64_t rqdata3 : 1; uint64_t rqdata2 : 1; uint64_t rqdata1 : 1; uint64_t rqdata0 : 1; uint64_t retry : 1; uint64_t ptlp_or : 1; uint64_t ntlp_or : 1; uint64_t ctlp_or : 1; uint64_t reserved_12_63 : 52; #endif } cn52xxp1; struct cvmx_pescx_bist_status_s cn56xx; struct cvmx_pescx_bist_status_cn52xxp1 cn56xxp1; } cvmx_pescx_bist_status_t; /** * cvmx_pesc#_bist_status2 * * PESC(0..1)_BIST_STATUS2 = PESC BIST Status Register * * Results from BIST runs of PESC's memories. */ typedef union { uint64_t u64; struct cvmx_pescx_bist_status2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t cto_p2e : 1; /**< BIST Status for the cto_p2e_fifo */ uint64_t e2p_cpl : 1; /**< BIST Status for the e2p_cpl_fifo */ uint64_t e2p_n : 1; /**< BIST Status for the e2p_n_fifo */ uint64_t e2p_p : 1; /**< BIST Status for the e2p_p_fifo */ uint64_t e2p_rsl : 1; /**< BIST Status for the e2p_rsl__fifo */ uint64_t dbg_p2e : 1; /**< BIST Status for the dbg_p2e_fifo */ uint64_t peai_p2e : 1; /**< BIST Status for the peai__pesc_fifo */ uint64_t rsl_p2e : 1; /**< BIST Status for the rsl_p2e_fifo */ uint64_t pef_tpf1 : 1; /**< BIST Status for the pef_tlp_p_fifo1 */ uint64_t pef_tpf0 : 1; /**< BIST Status for the pef_tlp_p_fifo0 */ uint64_t pef_tnf : 1; /**< BIST Status for the pef_tlp_n_fifo */ uint64_t pef_tcf1 : 1; /**< BIST Status for the pef_tlp_cpl_fifo1 */ uint64_t pef_tc0 : 1; /**< BIST Status for the pef_tlp_cpl_fifo0 */ uint64_t ppf : 1; /**< BIST Status for the ppf_fifo */ #else uint64_t ppf : 1; uint64_t pef_tc0 : 1; uint64_t pef_tcf1 : 1; uint64_t pef_tnf : 1; uint64_t pef_tpf0 : 1; uint64_t pef_tpf1 : 1; uint64_t rsl_p2e : 1; uint64_t peai_p2e : 1; uint64_t dbg_p2e : 1; uint64_t e2p_rsl : 1; uint64_t e2p_p : 1; uint64_t e2p_n : 1; uint64_t e2p_cpl : 1; uint64_t cto_p2e : 1; uint64_t reserved_14_63 : 50; #endif } s; struct cvmx_pescx_bist_status2_s cn52xx; struct cvmx_pescx_bist_status2_s cn52xxp1; struct cvmx_pescx_bist_status2_s cn56xx; struct cvmx_pescx_bist_status2_s cn56xxp1; } cvmx_pescx_bist_status2_t; /** * cvmx_pesc#_cfg_rd * * PESC_CFG_RD = PESC Configuration Read * * Allows read access to the configuration in the PCIe Core. */ typedef union { uint64_t u64; struct cvmx_pescx_cfg_rd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 32; /**< Data. */ uint64_t addr : 32; /**< Address to read. A write to this register starts a read operation. */ #else uint64_t addr : 32; uint64_t data : 32; #endif } s; struct cvmx_pescx_cfg_rd_s cn52xx; struct cvmx_pescx_cfg_rd_s cn52xxp1; struct cvmx_pescx_cfg_rd_s cn56xx; struct cvmx_pescx_cfg_rd_s cn56xxp1; } cvmx_pescx_cfg_rd_t; /** * cvmx_pesc#_cfg_wr * * PESC_CFG_WR = PESC Configuration Write * * Allows write access to the configuration in the PCIe Core. */ typedef union { uint64_t u64; struct cvmx_pescx_cfg_wr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 32; /**< Data to write. A write to this register starts a write operation. */ uint64_t addr : 32; /**< Address to write. A write to this register starts a write operation. */ #else uint64_t addr : 32; uint64_t data : 32; #endif } s; struct cvmx_pescx_cfg_wr_s cn52xx; struct cvmx_pescx_cfg_wr_s cn52xxp1; struct cvmx_pescx_cfg_wr_s cn56xx; struct cvmx_pescx_cfg_wr_s cn56xxp1; } cvmx_pescx_cfg_wr_t; /** * cvmx_pesc#_cpl_lut_valid * * PESC_CPL_LUT_VALID = PESC Cmpletion Lookup Table Valid * * Bit set for outstanding tag read. */ typedef union { uint64_t u64; struct cvmx_pescx_cpl_lut_valid_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t tag : 32; /**< Bit vector set cooresponds to an outstanding tag expecting a completion. */ #else uint64_t tag : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pescx_cpl_lut_valid_s cn52xx; struct cvmx_pescx_cpl_lut_valid_s cn52xxp1; struct cvmx_pescx_cpl_lut_valid_s cn56xx; struct cvmx_pescx_cpl_lut_valid_s cn56xxp1; } cvmx_pescx_cpl_lut_valid_t; /** * cvmx_pesc#_ctl_status * * PESC_CTL_STATUS = PESC Control Status * * General control and status of the PESC. */ typedef union { uint64_t u64; struct cvmx_pescx_ctl_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t dnum : 5; /**< Primary bus device number. */ uint64_t pbus : 8; /**< Primary bus number. */ uint64_t qlm_cfg : 2; /**< The QLM configuration pad bits. */ uint64_t lane_swp : 1; /**< Lane Swap. For PEDC1, when 0 NO LANE SWAP when '1' enables LANE SWAP. THis bit has no effect on PEDC0. This bit should be set before enabling PEDC1. */ uint64_t pm_xtoff : 1; /**< When WRITTEN with a '1' a single cycle pulse is to the PCIe core pm_xmt_turnoff port. RC mode. */ uint64_t pm_xpme : 1; /**< When WRITTEN with a '1' a single cycle pulse is to the PCIe core pm_xmt_pme port. EP mode. */ uint64_t ob_p_cmd : 1; /**< When WRITTEN with a '1' a single cycle pulse is to the PCIe core outband_pwrup_cmd port. EP mode. */ uint64_t reserved_7_8 : 2; uint64_t nf_ecrc : 1; /**< Do not forward peer-to-peer ECRC TLPs. */ uint64_t dly_one : 1; /**< When set the output client state machines will wait one cycle before starting a new TLP out. */ uint64_t lnk_enb : 1; /**< When set '1' the link is enabled when '0' the link is disabled. This bit only is active when in RC mode. */ uint64_t ro_ctlp : 1; /**< When set '1' C-TLPs that have the RO bit set will not wait for P-TLPs that normaly would be sent first. */ uint64_t reserved_2_2 : 1; uint64_t inv_ecrc : 1; /**< When '1' causes the LSB of the ECRC to be inverted. */ uint64_t inv_lcrc : 1; /**< When '1' causes the LSB of the LCRC to be inverted. */ #else uint64_t inv_lcrc : 1; uint64_t inv_ecrc : 1; uint64_t reserved_2_2 : 1; uint64_t ro_ctlp : 1; uint64_t lnk_enb : 1; uint64_t dly_one : 1; uint64_t nf_ecrc : 1; uint64_t reserved_7_8 : 2; uint64_t ob_p_cmd : 1; uint64_t pm_xpme : 1; uint64_t pm_xtoff : 1; uint64_t lane_swp : 1; uint64_t qlm_cfg : 2; uint64_t pbus : 8; uint64_t dnum : 5; uint64_t reserved_28_63 : 36; #endif } s; struct cvmx_pescx_ctl_status_s cn52xx; struct cvmx_pescx_ctl_status_s cn52xxp1; struct cvmx_pescx_ctl_status_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t dnum : 5; /**< Primary bus device number. */ uint64_t pbus : 8; /**< Primary bus number. */ uint64_t qlm_cfg : 2; /**< The QLM configuration pad bits. */ uint64_t reserved_12_12 : 1; uint64_t pm_xtoff : 1; /**< When WRITTEN with a '1' a single cycle pulse is to the PCIe core pm_xmt_turnoff port. RC mode. */ uint64_t pm_xpme : 1; /**< When WRITTEN with a '1' a single cycle pulse is to the PCIe core pm_xmt_pme port. EP mode. */ uint64_t ob_p_cmd : 1; /**< When WRITTEN with a '1' a single cycle pulse is to the PCIe core outband_pwrup_cmd port. EP mode. */ uint64_t reserved_7_8 : 2; uint64_t nf_ecrc : 1; /**< Do not forward peer-to-peer ECRC TLPs. */ uint64_t dly_one : 1; /**< When set the output client state machines will wait one cycle before starting a new TLP out. */ uint64_t lnk_enb : 1; /**< When set '1' the link is enabled when '0' the link is disabled. This bit only is active when in RC mode. */ uint64_t ro_ctlp : 1; /**< When set '1' C-TLPs that have the RO bit set will not wait for P-TLPs that normaly would be sent first. */ uint64_t reserved_2_2 : 1; uint64_t inv_ecrc : 1; /**< When '1' causes the LSB of the ECRC to be inverted. */ uint64_t inv_lcrc : 1; /**< When '1' causes the LSB of the LCRC to be inverted. */ #else uint64_t inv_lcrc : 1; uint64_t inv_ecrc : 1; uint64_t reserved_2_2 : 1; uint64_t ro_ctlp : 1; uint64_t lnk_enb : 1; uint64_t dly_one : 1; uint64_t nf_ecrc : 1; uint64_t reserved_7_8 : 2; uint64_t ob_p_cmd : 1; uint64_t pm_xpme : 1; uint64_t pm_xtoff : 1; uint64_t reserved_12_12 : 1; uint64_t qlm_cfg : 2; uint64_t pbus : 8; uint64_t dnum : 5; uint64_t reserved_28_63 : 36; #endif } cn56xx; struct cvmx_pescx_ctl_status_cn56xx cn56xxp1; } cvmx_pescx_ctl_status_t; /** * cvmx_pesc#_ctl_status2 * * Below are in PESC * * PESC(0..1)_BIST_STATUS2 = PESC BIST Status Register * * Results from BIST runs of PESC's memories. */ typedef union { uint64_t u64; struct cvmx_pescx_ctl_status2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t pclk_run : 1; /**< When the pce_clk is running this bit will be '1'. Writing a '1' to this location will cause the bit to be cleared, but if the pce_clk is running this bit will be re-set. */ uint64_t pcierst : 1; /**< Set to '1' when PCIe is in reset. */ #else uint64_t pcierst : 1; uint64_t pclk_run : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_pescx_ctl_status2_s cn52xx; struct cvmx_pescx_ctl_status2_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t pcierst : 1; /**< Set to '1' when PCIe is in reset. */ #else uint64_t pcierst : 1; uint64_t reserved_1_63 : 63; #endif } cn52xxp1; struct cvmx_pescx_ctl_status2_s cn56xx; struct cvmx_pescx_ctl_status2_cn52xxp1 cn56xxp1; } cvmx_pescx_ctl_status2_t; /** * cvmx_pesc#_dbg_info * * PESC(0..1)_DBG_INFO = PESC Debug Information * * General debug info. */ typedef union { uint64_t u64; struct cvmx_pescx_dbg_info_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t ecrc_e : 1; /**< Received a ECRC error. radm_ecrc_err */ uint64_t rawwpp : 1; /**< Received a write with poisoned payload radm_rcvd_wreq_poisoned */ uint64_t racpp : 1; /**< Received a completion with poisoned payload radm_rcvd_cpl_poisoned */ uint64_t ramtlp : 1; /**< Received a malformed TLP radm_mlf_tlp_err */ uint64_t rarwdns : 1; /**< Recieved a request which device does not support radm_rcvd_ur_req */ uint64_t caar : 1; /**< Completer aborted a request radm_rcvd_ca_req This bit will never be set because Octeon does not generate Completer Aborts. */ uint64_t racca : 1; /**< Received a completion with CA status radm_rcvd_cpl_ca */ uint64_t racur : 1; /**< Received a completion with UR status radm_rcvd_cpl_ur */ uint64_t rauc : 1; /**< Received an unexpected completion radm_unexp_cpl_err */ uint64_t rqo : 1; /**< Receive queue overflow. Normally happens only when flow control advertisements are ignored radm_qoverflow */ uint64_t fcuv : 1; /**< Flow Control Update Violation (opt. checks) int_xadm_fc_prot_err */ uint64_t rpe : 1; /**< When the PHY reports 8B/10B decode error (RxStatus = 3b100) or disparity error (RxStatus = 3b111), the signal rmlh_rcvd_err will be asserted. rmlh_rcvd_err */ uint64_t fcpvwt : 1; /**< Flow Control Protocol Violation (Watchdog Timer) rtlh_fc_prot_err */ uint64_t dpeoosd : 1; /**< DLLP protocol error (out of sequence DLLP) rdlh_prot_err */ uint64_t rtwdle : 1; /**< Received TLP with DataLink Layer Error rdlh_bad_tlp_err */ uint64_t rdwdle : 1; /**< Received DLLP with DataLink Layer Error rdlh_bad_dllp_err */ uint64_t mre : 1; /**< Max Retries Exceeded xdlh_replay_num_rlover_err */ uint64_t rte : 1; /**< Replay Timer Expired xdlh_replay_timeout_err This bit is set when the REPLAY_TIMER expires in the PCIE core. The probability of this bit being set will increase with the traffic load. */ uint64_t acto : 1; /**< A Completion Timeout Occured pedc_radm_cpl_timeout */ uint64_t rvdm : 1; /**< Received Vendor-Defined Message pedc_radm_vendor_msg */ uint64_t rumep : 1; /**< Received Unlock Message (EP Mode Only) pedc_radm_msg_unlock */ uint64_t rptamrc : 1; /**< Received PME Turnoff Acknowledge Message (RC Mode only) pedc_radm_pm_to_ack */ uint64_t rpmerc : 1; /**< Received PME Message (RC Mode only) pedc_radm_pm_pme */ uint64_t rfemrc : 1; /**< Received Fatal Error Message (RC Mode only) pedc_radm_fatal_err Bit set when a message with ERR_FATAL is set. */ uint64_t rnfemrc : 1; /**< Received Non-Fatal Error Message (RC Mode only) pedc_radm_nonfatal_err */ uint64_t rcemrc : 1; /**< Received Correctable Error Message (RC Mode only) pedc_radm_correctable_err */ uint64_t rpoison : 1; /**< Received Poisoned TLP pedc__radm_trgt1_poisoned & pedc__radm_trgt1_hv */ uint64_t recrce : 1; /**< Received ECRC Error pedc_radm_trgt1_ecrc_err & pedc__radm_trgt1_eot */ uint64_t rtlplle : 1; /**< Received TLP has link layer error pedc_radm_trgt1_dllp_abort & pedc__radm_trgt1_eot */ uint64_t rtlpmal : 1; /**< Received TLP is malformed or a message. pedc_radm_trgt1_tlp_abort & pedc__radm_trgt1_eot If the core receives a MSG (or Vendor Message) this bit will be set. */ uint64_t spoison : 1; /**< Poisoned TLP sent peai__client0_tlp_ep & peai__client0_tlp_hv */ #else uint64_t spoison : 1; uint64_t rtlpmal : 1; uint64_t rtlplle : 1; uint64_t recrce : 1; uint64_t rpoison : 1; uint64_t rcemrc : 1; uint64_t rnfemrc : 1; uint64_t rfemrc : 1; uint64_t rpmerc : 1; uint64_t rptamrc : 1; uint64_t rumep : 1; uint64_t rvdm : 1; uint64_t acto : 1; uint64_t rte : 1; uint64_t mre : 1; uint64_t rdwdle : 1; uint64_t rtwdle : 1; uint64_t dpeoosd : 1; uint64_t fcpvwt : 1; uint64_t rpe : 1; uint64_t fcuv : 1; uint64_t rqo : 1; uint64_t rauc : 1; uint64_t racur : 1; uint64_t racca : 1; uint64_t caar : 1; uint64_t rarwdns : 1; uint64_t ramtlp : 1; uint64_t racpp : 1; uint64_t rawwpp : 1; uint64_t ecrc_e : 1; uint64_t reserved_31_63 : 33; #endif } s; struct cvmx_pescx_dbg_info_s cn52xx; struct cvmx_pescx_dbg_info_s cn52xxp1; struct cvmx_pescx_dbg_info_s cn56xx; struct cvmx_pescx_dbg_info_s cn56xxp1; } cvmx_pescx_dbg_info_t; /** * cvmx_pesc#_dbg_info_en * * PESC(0..1)_DBG_INFO_EN = PESC Debug Information Enable * * Allows PESC_DBG_INFO to generate interrupts when cooresponding enable bit is set. */ typedef union { uint64_t u64; struct cvmx_pescx_dbg_info_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t ecrc_e : 1; /**< Allows PESC_DBG_INFO[30] to generate an interrupt. */ uint64_t rawwpp : 1; /**< Allows PESC_DBG_INFO[29] to generate an interrupt. */ uint64_t racpp : 1; /**< Allows PESC_DBG_INFO[28] to generate an interrupt. */ uint64_t ramtlp : 1; /**< Allows PESC_DBG_INFO[27] to generate an interrupt. */ uint64_t rarwdns : 1; /**< Allows PESC_DBG_INFO[26] to generate an interrupt. */ uint64_t caar : 1; /**< Allows PESC_DBG_INFO[25] to generate an interrupt. */ uint64_t racca : 1; /**< Allows PESC_DBG_INFO[24] to generate an interrupt. */ uint64_t racur : 1; /**< Allows PESC_DBG_INFO[23] to generate an interrupt. */ uint64_t rauc : 1; /**< Allows PESC_DBG_INFO[22] to generate an interrupt. */ uint64_t rqo : 1; /**< Allows PESC_DBG_INFO[21] to generate an interrupt. */ uint64_t fcuv : 1; /**< Allows PESC_DBG_INFO[20] to generate an interrupt. */ uint64_t rpe : 1; /**< Allows PESC_DBG_INFO[19] to generate an interrupt. */ uint64_t fcpvwt : 1; /**< Allows PESC_DBG_INFO[18] to generate an interrupt. */ uint64_t dpeoosd : 1; /**< Allows PESC_DBG_INFO[17] to generate an interrupt. */ uint64_t rtwdle : 1; /**< Allows PESC_DBG_INFO[16] to generate an interrupt. */ uint64_t rdwdle : 1; /**< Allows PESC_DBG_INFO[15] to generate an interrupt. */ uint64_t mre : 1; /**< Allows PESC_DBG_INFO[14] to generate an interrupt. */ uint64_t rte : 1; /**< Allows PESC_DBG_INFO[13] to generate an interrupt. */ uint64_t acto : 1; /**< Allows PESC_DBG_INFO[12] to generate an interrupt. */ uint64_t rvdm : 1; /**< Allows PESC_DBG_INFO[11] to generate an interrupt. */ uint64_t rumep : 1; /**< Allows PESC_DBG_INFO[10] to generate an interrupt. */ uint64_t rptamrc : 1; /**< Allows PESC_DBG_INFO[9] to generate an interrupt. */ uint64_t rpmerc : 1; /**< Allows PESC_DBG_INFO[8] to generate an interrupt. */ uint64_t rfemrc : 1; /**< Allows PESC_DBG_INFO[7] to generate an interrupt. */ uint64_t rnfemrc : 1; /**< Allows PESC_DBG_INFO[6] to generate an interrupt. */ uint64_t rcemrc : 1; /**< Allows PESC_DBG_INFO[5] to generate an interrupt. */ uint64_t rpoison : 1; /**< Allows PESC_DBG_INFO[4] to generate an interrupt. */ uint64_t recrce : 1; /**< Allows PESC_DBG_INFO[3] to generate an interrupt. */ uint64_t rtlplle : 1; /**< Allows PESC_DBG_INFO[2] to generate an interrupt. */ uint64_t rtlpmal : 1; /**< Allows PESC_DBG_INFO[1] to generate an interrupt. */ uint64_t spoison : 1; /**< Allows PESC_DBG_INFO[0] to generate an interrupt. */ #else uint64_t spoison : 1; uint64_t rtlpmal : 1; uint64_t rtlplle : 1; uint64_t recrce : 1; uint64_t rpoison : 1; uint64_t rcemrc : 1; uint64_t rnfemrc : 1; uint64_t rfemrc : 1; uint64_t rpmerc : 1; uint64_t rptamrc : 1; uint64_t rumep : 1; uint64_t rvdm : 1; uint64_t acto : 1; uint64_t rte : 1; uint64_t mre : 1; uint64_t rdwdle : 1; uint64_t rtwdle : 1; uint64_t dpeoosd : 1; uint64_t fcpvwt : 1; uint64_t rpe : 1; uint64_t fcuv : 1; uint64_t rqo : 1; uint64_t rauc : 1; uint64_t racur : 1; uint64_t racca : 1; uint64_t caar : 1; uint64_t rarwdns : 1; uint64_t ramtlp : 1; uint64_t racpp : 1; uint64_t rawwpp : 1; uint64_t ecrc_e : 1; uint64_t reserved_31_63 : 33; #endif } s; struct cvmx_pescx_dbg_info_en_s cn52xx; struct cvmx_pescx_dbg_info_en_s cn52xxp1; struct cvmx_pescx_dbg_info_en_s cn56xx; struct cvmx_pescx_dbg_info_en_s cn56xxp1; } cvmx_pescx_dbg_info_en_t; /** * cvmx_pesc#_diag_status * * PESC_DIAG_STATUS = PESC Diagnostic Status * * Selection control for the cores diagnostic bus. */ typedef union { uint64_t u64; struct cvmx_pescx_diag_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t pm_dst : 1; /**< Current power management DSTATE. */ uint64_t pm_stat : 1; /**< Power Management Status. */ uint64_t pm_en : 1; /**< Power Management Event Enable. */ uint64_t aux_en : 1; /**< Auxilary Power Enable. */ #else uint64_t aux_en : 1; uint64_t pm_en : 1; uint64_t pm_stat : 1; uint64_t pm_dst : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_pescx_diag_status_s cn52xx; struct cvmx_pescx_diag_status_s cn52xxp1; struct cvmx_pescx_diag_status_s cn56xx; struct cvmx_pescx_diag_status_s cn56xxp1; } cvmx_pescx_diag_status_t; /** * cvmx_pesc#_p2n_bar0_start * * PESC_P2N_BAR0_START = PESC PCIe to Npei BAR0 Start * * The starting address for addresses to forwarded to the NPEI in RC Mode. */ typedef union { uint64_t u64; struct cvmx_pescx_p2n_bar0_start_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t addr : 50; /**< The starting address of the 16KB address space that is the BAR0 address space. */ uint64_t reserved_0_13 : 14; #else uint64_t reserved_0_13 : 14; uint64_t addr : 50; #endif } s; struct cvmx_pescx_p2n_bar0_start_s cn52xx; struct cvmx_pescx_p2n_bar0_start_s cn52xxp1; struct cvmx_pescx_p2n_bar0_start_s cn56xx; struct cvmx_pescx_p2n_bar0_start_s cn56xxp1; } cvmx_pescx_p2n_bar0_start_t; /** * cvmx_pesc#_p2n_bar1_start * * PESC_P2N_BAR1_START = PESC PCIe to Npei BAR1 Start * * The starting address for addresses to forwarded to the NPEI in RC Mode. */ typedef union { uint64_t u64; struct cvmx_pescx_p2n_bar1_start_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t addr : 38; /**< The starting address of the 64KB address space that is the BAR1 address space. */ uint64_t reserved_0_25 : 26; #else uint64_t reserved_0_25 : 26; uint64_t addr : 38; #endif } s; struct cvmx_pescx_p2n_bar1_start_s cn52xx; struct cvmx_pescx_p2n_bar1_start_s cn52xxp1; struct cvmx_pescx_p2n_bar1_start_s cn56xx; struct cvmx_pescx_p2n_bar1_start_s cn56xxp1; } cvmx_pescx_p2n_bar1_start_t; /** * cvmx_pesc#_p2n_bar2_start * * PESC_P2N_BAR2_START = PESC PCIe to Npei BAR2 Start * * The starting address for addresses to forwarded to the NPEI in RC Mode. */ typedef union { uint64_t u64; struct cvmx_pescx_p2n_bar2_start_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t addr : 25; /**< The starting address of the 2^39 address space that is the BAR2 address space. */ uint64_t reserved_0_38 : 39; #else uint64_t reserved_0_38 : 39; uint64_t addr : 25; #endif } s; struct cvmx_pescx_p2n_bar2_start_s cn52xx; struct cvmx_pescx_p2n_bar2_start_s cn52xxp1; struct cvmx_pescx_p2n_bar2_start_s cn56xx; struct cvmx_pescx_p2n_bar2_start_s cn56xxp1; } cvmx_pescx_p2n_bar2_start_t; /** * cvmx_pesc#_p2p_bar#_end * * PESC_P2P_BAR#_END = PESC Peer-To-Peer BAR0 End * * The ending address for addresses to forwarded to the PCIe peer port. */ typedef union { uint64_t u64; struct cvmx_pescx_p2p_barx_end_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t addr : 52; /**< The ending address of the address window created this field and the PESC_P2P_BAR0_START[63:12] field. The full 64-bits of address are created by: [ADDR[63:12], 12'b0]. */ uint64_t reserved_0_11 : 12; #else uint64_t reserved_0_11 : 12; uint64_t addr : 52; #endif } s; struct cvmx_pescx_p2p_barx_end_s cn52xx; struct cvmx_pescx_p2p_barx_end_s cn52xxp1; struct cvmx_pescx_p2p_barx_end_s cn56xx; struct cvmx_pescx_p2p_barx_end_s cn56xxp1; } cvmx_pescx_p2p_barx_end_t; /** * cvmx_pesc#_p2p_bar#_start * * PESC_P2P_BAR#_START = PESC Peer-To-Peer BAR0 Start * * The starting address and enable for addresses to forwarded to the PCIe peer port. */ typedef union { uint64_t u64; struct cvmx_pescx_p2p_barx_start_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t addr : 52; /**< The starting address of the address window created this field and the PESC_P2P_BAR0_END[63:12] field. The full 64-bits of address are created by: [ADDR[63:12], 12'b0]. */ uint64_t reserved_0_11 : 12; #else uint64_t reserved_0_11 : 12; uint64_t addr : 52; #endif } s; struct cvmx_pescx_p2p_barx_start_s cn52xx; struct cvmx_pescx_p2p_barx_start_s cn52xxp1; struct cvmx_pescx_p2p_barx_start_s cn56xx; struct cvmx_pescx_p2p_barx_start_s cn56xxp1; } cvmx_pescx_p2p_barx_start_t; /** * cvmx_pesc#_tlp_credits * * PESC_TLP_CREDITS = PESC TLP Credits * * Specifies the number of credits the PESC for use in moving TLPs. When this register is written the credit values are * reset to the register value. A write to this register should take place BEFORE traffic flow starts. */ typedef union { uint64_t u64; struct cvmx_pescx_tlp_credits_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_pescx_tlp_credits_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_56_63 : 8; uint64_t peai_ppf : 8; /**< TLP credits for Completion TLPs in the Peer. Legal values are 0x24 to 0x80. */ uint64_t pesc_cpl : 8; /**< TLP credits for Completion TLPs in the Peer. Legal values are 0x24 to 0x80. */ uint64_t pesc_np : 8; /**< TLP credits for Non-Posted TLPs in the Peer. Legal values are 0x4 to 0x10. */ uint64_t pesc_p : 8; /**< TLP credits for Posted TLPs in the Peer. Legal values are 0x24 to 0x80. */ uint64_t npei_cpl : 8; /**< TLP credits for Completion TLPs in the NPEI. Legal values are 0x24 to 0x80. */ uint64_t npei_np : 8; /**< TLP credits for Non-Posted TLPs in the NPEI. Legal values are 0x4 to 0x10. */ uint64_t npei_p : 8; /**< TLP credits for Posted TLPs in the NPEI. Legal values are 0x24 to 0x80. */ #else uint64_t npei_p : 8; uint64_t npei_np : 8; uint64_t npei_cpl : 8; uint64_t pesc_p : 8; uint64_t pesc_np : 8; uint64_t pesc_cpl : 8; uint64_t peai_ppf : 8; uint64_t reserved_56_63 : 8; #endif } cn52xx; struct cvmx_pescx_tlp_credits_cn52xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t peai_ppf : 8; /**< TLP credits in core clk pre-buffer that holds TLPs being sent from PCIe Core to NPEI or PEER. */ uint64_t pesc_cpl : 5; /**< TLP credits for Completion TLPs in the Peer. */ uint64_t pesc_np : 5; /**< TLP credits for Non-Posted TLPs in the Peer. */ uint64_t pesc_p : 5; /**< TLP credits for Posted TLPs in the Peer. */ uint64_t npei_cpl : 5; /**< TLP credits for Completion TLPs in the NPEI. */ uint64_t npei_np : 5; /**< TLP credits for Non-Posted TLPs in the NPEI. */ uint64_t npei_p : 5; /**< TLP credits for Posted TLPs in the NPEI. */ #else uint64_t npei_p : 5; uint64_t npei_np : 5; uint64_t npei_cpl : 5; uint64_t pesc_p : 5; uint64_t pesc_np : 5; uint64_t pesc_cpl : 5; uint64_t peai_ppf : 8; uint64_t reserved_38_63 : 26; #endif } cn52xxp1; struct cvmx_pescx_tlp_credits_cn52xx cn56xx; struct cvmx_pescx_tlp_credits_cn52xxp1 cn56xxp1; } cvmx_pescx_tlp_credits_t; /** * cvmx_pip_bck_prs * * PIP_BCK_PRS = PIP's Back Pressure Register * * When to assert backpressure based on the todo list filling up */ typedef union { uint64_t u64; struct cvmx_pip_bck_prs_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bckprs : 1; /**< PIP is currently asserting backpressure to IOB Backpressure from PIP will assert when the entries to the todo list exceed HIWATER. Backpressure will be held until the todo entries is less than or equal to LOWATER. */ uint64_t reserved_13_62 : 50; uint64_t hiwater : 5; /**< Water mark in the todo list to assert backpressure Legal values are 1-26. A 0 value will deadlock the machine. A value > 26, will trash memory */ uint64_t reserved_5_7 : 3; uint64_t lowater : 5; /**< Water mark in the todo list to release backpressure The LOWATER value should be < HIWATER. */ #else uint64_t lowater : 5; uint64_t reserved_5_7 : 3; uint64_t hiwater : 5; uint64_t reserved_13_62 : 50; uint64_t bckprs : 1; #endif } s; struct cvmx_pip_bck_prs_s cn38xx; struct cvmx_pip_bck_prs_s cn38xxp2; struct cvmx_pip_bck_prs_s cn56xx; struct cvmx_pip_bck_prs_s cn56xxp1; struct cvmx_pip_bck_prs_s cn58xx; struct cvmx_pip_bck_prs_s cn58xxp1; } cvmx_pip_bck_prs_t; /** * cvmx_pip_bist_status * * PIP_BIST_STATUS = PIP's BIST Results * */ typedef union { uint64_t u64; struct cvmx_pip_bist_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t bist : 18; /**< BIST Results. HW sets a bit in BIST for for memory that fails BIST. */ #else uint64_t bist : 18; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_pip_bist_status_s cn30xx; struct cvmx_pip_bist_status_s cn31xx; struct cvmx_pip_bist_status_s cn38xx; struct cvmx_pip_bist_status_s cn38xxp2; struct cvmx_pip_bist_status_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t bist : 17; /**< BIST Results. HW sets a bit in BIST for for memory that fails BIST. */ #else uint64_t bist : 17; uint64_t reserved_17_63 : 47; #endif } cn50xx; struct cvmx_pip_bist_status_s cn52xx; struct cvmx_pip_bist_status_s cn52xxp1; struct cvmx_pip_bist_status_s cn56xx; struct cvmx_pip_bist_status_s cn56xxp1; struct cvmx_pip_bist_status_s cn58xx; struct cvmx_pip_bist_status_s cn58xxp1; } cvmx_pip_bist_status_t; /** * cvmx_pip_crc_ctl# * * PIP_CRC_CTL = PIP CRC Control Register * * Controls datapath reflection when calculating CRC */ typedef union { uint64_t u64; struct cvmx_pip_crc_ctlx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t invres : 1; /**< Invert the result */ uint64_t reflect : 1; /**< Reflect the bits in each byte. Byte order does not change. - 0: CRC is calculated MSB to LSB - 1: CRC is calculated LSB to MSB */ #else uint64_t reflect : 1; uint64_t invres : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_pip_crc_ctlx_s cn38xx; struct cvmx_pip_crc_ctlx_s cn38xxp2; struct cvmx_pip_crc_ctlx_s cn58xx; struct cvmx_pip_crc_ctlx_s cn58xxp1; } cvmx_pip_crc_ctlx_t; /** * cvmx_pip_crc_iv# * * PIP_CRC_IV = PIP CRC IV Register * * Determines the IV used by the CRC algorithm * * Notes: * * PIP_CRC_IV * PIP_CRC_IV controls the initial state of the CRC algorithm. Octane can * support a wide range of CRC algorithms and as such, the IV must be * carefully constructed to meet the specific algorithm. The code below * determines the value to program into Octane based on the algorthim's IV * and width. In the case of Octane, the width should always be 32. * * PIP_CRC_IV0 sets the IV for ports 0-15 while PIP_CRC_IV1 sets the IV for * ports 16-31. * * unsigned octane_crc_iv(unsigned algorithm_iv, unsigned poly, unsigned w) * [ * int i; * int doit; * unsigned int current_val = algorithm_iv; * * for(i = 0; i < w; i++) [ * doit = current_val & 0x1; * * if(doit) current_val ^= poly; * assert(!(current_val & 0x1)); * * current_val = (current_val >> 1) | (doit << (w-1)); * ] * * return current_val; * ] */ typedef union { uint64_t u64; struct cvmx_pip_crc_ivx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t iv : 32; /**< IV used by the CRC algorithm. Default is FCS32. */ #else uint64_t iv : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pip_crc_ivx_s cn38xx; struct cvmx_pip_crc_ivx_s cn38xxp2; struct cvmx_pip_crc_ivx_s cn58xx; struct cvmx_pip_crc_ivx_s cn58xxp1; } cvmx_pip_crc_ivx_t; /** * cvmx_pip_dec_ipsec# * * PIP_DEC_IPSEC = UDP or TCP ports to watch for DEC IPSEC * * PIP sets the dec_ipsec based on TCP or UDP destination port. */ typedef union { uint64_t u64; struct cvmx_pip_dec_ipsecx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t tcp : 1; /**< This DPRT should be used for TCP packets */ uint64_t udp : 1; /**< This DPRT should be used for UDP packets */ uint64_t dprt : 16; /**< UDP or TCP destination port to match on */ #else uint64_t dprt : 16; uint64_t udp : 1; uint64_t tcp : 1; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_pip_dec_ipsecx_s cn30xx; struct cvmx_pip_dec_ipsecx_s cn31xx; struct cvmx_pip_dec_ipsecx_s cn38xx; struct cvmx_pip_dec_ipsecx_s cn38xxp2; struct cvmx_pip_dec_ipsecx_s cn50xx; struct cvmx_pip_dec_ipsecx_s cn52xx; struct cvmx_pip_dec_ipsecx_s cn52xxp1; struct cvmx_pip_dec_ipsecx_s cn56xx; struct cvmx_pip_dec_ipsecx_s cn56xxp1; struct cvmx_pip_dec_ipsecx_s cn58xx; struct cvmx_pip_dec_ipsecx_s cn58xxp1; } cvmx_pip_dec_ipsecx_t; /** * cvmx_pip_dsa_src_grp */ typedef union { uint64_t u64; struct cvmx_pip_dsa_src_grp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t map15 : 4; /**< DSA Group Algorithm */ uint64_t map14 : 4; /**< DSA Group Algorithm */ uint64_t map13 : 4; /**< DSA Group Algorithm */ uint64_t map12 : 4; /**< DSA Group Algorithm */ uint64_t map11 : 4; /**< DSA Group Algorithm */ uint64_t map10 : 4; /**< DSA Group Algorithm */ uint64_t map9 : 4; /**< DSA Group Algorithm */ uint64_t map8 : 4; /**< DSA Group Algorithm */ uint64_t map7 : 4; /**< DSA Group Algorithm */ uint64_t map6 : 4; /**< DSA Group Algorithm */ uint64_t map5 : 4; /**< DSA Group Algorithm */ uint64_t map4 : 4; /**< DSA Group Algorithm */ uint64_t map3 : 4; /**< DSA Group Algorithm */ uint64_t map2 : 4; /**< DSA Group Algorithm */ uint64_t map1 : 4; /**< DSA Group Algorithm */ uint64_t map0 : 4; /**< DSA Group Algorithm Use the DSA source id to compute GRP (56xx pass2 only) */ #else uint64_t map0 : 4; uint64_t map1 : 4; uint64_t map2 : 4; uint64_t map3 : 4; uint64_t map4 : 4; uint64_t map5 : 4; uint64_t map6 : 4; uint64_t map7 : 4; uint64_t map8 : 4; uint64_t map9 : 4; uint64_t map10 : 4; uint64_t map11 : 4; uint64_t map12 : 4; uint64_t map13 : 4; uint64_t map14 : 4; uint64_t map15 : 4; #endif } s; struct cvmx_pip_dsa_src_grp_s cn52xx; struct cvmx_pip_dsa_src_grp_s cn52xxp1; struct cvmx_pip_dsa_src_grp_s cn56xx; } cvmx_pip_dsa_src_grp_t; /** * cvmx_pip_dsa_vid_grp */ typedef union { uint64_t u64; struct cvmx_pip_dsa_vid_grp_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t map15 : 4; /**< DSA Group Algorithm */ uint64_t map14 : 4; /**< DSA Group Algorithm */ uint64_t map13 : 4; /**< DSA Group Algorithm */ uint64_t map12 : 4; /**< DSA Group Algorithm */ uint64_t map11 : 4; /**< DSA Group Algorithm */ uint64_t map10 : 4; /**< DSA Group Algorithm */ uint64_t map9 : 4; /**< DSA Group Algorithm */ uint64_t map8 : 4; /**< DSA Group Algorithm */ uint64_t map7 : 4; /**< DSA Group Algorithm */ uint64_t map6 : 4; /**< DSA Group Algorithm */ uint64_t map5 : 4; /**< DSA Group Algorithm */ uint64_t map4 : 4; /**< DSA Group Algorithm */ uint64_t map3 : 4; /**< DSA Group Algorithm */ uint64_t map2 : 4; /**< DSA Group Algorithm */ uint64_t map1 : 4; /**< DSA Group Algorithm */ uint64_t map0 : 4; /**< DSA Group Algorithm Use the DSA source id to compute GRP (56xx pass2 only) */ #else uint64_t map0 : 4; uint64_t map1 : 4; uint64_t map2 : 4; uint64_t map3 : 4; uint64_t map4 : 4; uint64_t map5 : 4; uint64_t map6 : 4; uint64_t map7 : 4; uint64_t map8 : 4; uint64_t map9 : 4; uint64_t map10 : 4; uint64_t map11 : 4; uint64_t map12 : 4; uint64_t map13 : 4; uint64_t map14 : 4; uint64_t map15 : 4; #endif } s; struct cvmx_pip_dsa_vid_grp_s cn52xx; struct cvmx_pip_dsa_vid_grp_s cn52xxp1; struct cvmx_pip_dsa_vid_grp_s cn56xx; } cvmx_pip_dsa_vid_grp_t; /** * cvmx_pip_frm_len_chk# * * Notes: * PIP_FRM_LEN_CHK0 is used for packets on packet interface0, PCI, and PKO loopback ports. * PIP_FRM_LEN_CHK1 is used for PCI RAW packets. */ typedef union { uint64_t u64; struct cvmx_pip_frm_len_chkx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t maxlen : 16; /**< Byte count for Max-sized frame check Failing packets set the MAXERR interrupt and are optionally sent with opcode==MAXERR The effective MAXLEN used by HW is PIP_FRM_LEN_CHK[MAXLEN] + 4*VV + 4*VS */ uint64_t minlen : 16; /**< Byte count for Min-sized frame check Failing packets set the MINERR interrupt and are optionally sent with opcode==MINERR */ #else uint64_t minlen : 16; uint64_t maxlen : 16; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pip_frm_len_chkx_s cn50xx; struct cvmx_pip_frm_len_chkx_s cn52xx; struct cvmx_pip_frm_len_chkx_s cn52xxp1; struct cvmx_pip_frm_len_chkx_s cn56xx; struct cvmx_pip_frm_len_chkx_s cn56xxp1; } cvmx_pip_frm_len_chkx_t; /** * cvmx_pip_gbl_cfg * * PIP_GBL_CFG = PIP's Global Config Register * * Global config information that applies to all ports. * * Notes: * * IP6_UDP * IPv4 allows optional UDP checksum by sending the all 0's patterns. IPv6 * outlaws this and the spec says to always check UDP checksum. This mode * bit allows the user to treat IPv6 as IPv4, meaning that the all 0's * pattern will cause a UDP checksum pass. */ typedef union { uint64_t u64; struct cvmx_pip_gbl_cfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_19_63 : 45; uint64_t tag_syn : 1; /**< Do not include src_crc for TCP/SYN&!ACK packets 0 = include src_crc 1 = tag hash is dst_crc for TCP/SYN&!ACK packets */ uint64_t ip6_udp : 1; /**< IPv6/UDP checksum is not optional 0 = Allow optional checksum code 1 = Do not allow optional checksum code */ uint64_t max_l2 : 1; /**< Config bit to choose the largest L2 frame size Chooses the value of the L2 Type/Length field to classify the frame as length. 0 = 1500 / 0x5dc 1 = 1535 / 0x5ff */ uint64_t reserved_11_15 : 5; uint64_t raw_shf : 3; /**< RAW Packet shift amount Number of bytes to pad a packet that has been received on a PCI RAW port. */ uint64_t reserved_3_7 : 5; uint64_t nip_shf : 3; /**< Non-IP shift amount Number of bytes to pad a packet that has been classified as not IP. */ #else uint64_t nip_shf : 3; uint64_t reserved_3_7 : 5; uint64_t raw_shf : 3; uint64_t reserved_11_15 : 5; uint64_t max_l2 : 1; uint64_t ip6_udp : 1; uint64_t tag_syn : 1; uint64_t reserved_19_63 : 45; #endif } s; struct cvmx_pip_gbl_cfg_s cn30xx; struct cvmx_pip_gbl_cfg_s cn31xx; struct cvmx_pip_gbl_cfg_s cn38xx; struct cvmx_pip_gbl_cfg_s cn38xxp2; struct cvmx_pip_gbl_cfg_s cn50xx; struct cvmx_pip_gbl_cfg_s cn52xx; struct cvmx_pip_gbl_cfg_s cn52xxp1; struct cvmx_pip_gbl_cfg_s cn56xx; struct cvmx_pip_gbl_cfg_s cn56xxp1; struct cvmx_pip_gbl_cfg_s cn58xx; struct cvmx_pip_gbl_cfg_s cn58xxp1; } cvmx_pip_gbl_cfg_t; /** * cvmx_pip_gbl_ctl * * PIP_GBL_CTL = PIP's Global Control Register * * Global control information. These are the global checker enables for * IPv4/IPv6 and TCP/UDP parsing. The enables effect all ports. * * Notes: * The following text describes the conditions in which each checker will * assert and flag an exception. By disabling the checker, the exception will * not be flagged and the packet will be parsed as best it can. Note, by * disabling conditions, packets can be parsed incorrectly (.i.e. IP_MAL and * L4_MAL could cause bits to be seen in the wrong place. IP_CHK and L4_CHK * means that the packet was corrupted). * * * IP_CHK * Indicates that an IPv4 packet contained an IPv4 header checksum * violations. Only applies to packets classified as IPv4. * * * IP_MAL * Indicates that the packet was malformed. Malformed packets are defined as * packets that are not long enough to cover the IP header or not long enough * to cover the length in the IP header. * * * IP_HOP * Indicates that the IPv4 TTL field or IPv6 HOP field is zero. * * * IP4_OPTS * Indicates the presence of IPv4 options. It is set when the length != 5. * This only applies to packets classified as IPv4. * * * IP6_EEXT * Indicate the presence of IPv6 early extension headers. These bits only * apply to packets classified as IPv6. Bit 0 will flag early extensions * when next_header is any one of the following... * * - hop-by-hop (0) * - destination (60) * - routing (43) * * Bit 1 will flag early extentions when next_header is NOT any of the * following... * * - TCP (6) * - UDP (17) * - fragmentation (44) * - ICMP (58) * - IPSEC ESP (50) * - IPSEC AH (51) * - IPCOMP * * * L4_MAL * Indicates that a TCP or UDP packet is not long enough to cover the TCP or * UDP header. * * * L4_PRT * Indicates that a TCP or UDP packet has an illegal port number - either the * source or destination port is zero. * * * L4_CHK * Indicates that a packet classified as either TCP or UDP contains an L4 * checksum failure * * * L4_LEN * Indicates that the TCP or UDP length does not match the the IP length. * * * TCP_FLAG * Indicates any of the following conditions... * * [URG, ACK, PSH, RST, SYN, FIN] : tcp_flag * 6'b000001: (FIN only) * 6'b000000: (0) * 6'bxxx1x1: (RST+FIN+*) * 6'b1xxx1x: (URG+SYN+*) * 6'bxxx11x: (RST+SYN+*) * 6'bxxxx11: (SYN+FIN+*) */ typedef union { uint64_t u64; struct cvmx_pip_gbl_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_27_63 : 37; uint64_t dsa_grp_tvid : 1; /**< DSA Group Algorithm Use the DSA source id to compute GRP (56xx pass2 only) */ uint64_t dsa_grp_scmd : 1; /**< DSA Group Algorithm Use the DSA source id to compute GRP when the DSA tag command to TO_CPU (56xx pass2 only) */ uint64_t dsa_grp_sid : 1; /**< DSA Group Algorithm Use the DSA VLAN id to compute GRP (56xx pass2 only) */ uint64_t reserved_21_23 : 3; uint64_t ring_en : 1; /**< Enable PCIe ring information in WQE */ uint64_t reserved_17_19 : 3; uint64_t ignrs : 1; /**< Ignore the PKT_INST_HDR[RS] bit when set Only applies to the packet interface prts (0-31) (PASS2 only) */ uint64_t vs_wqe : 1; /**< Which VLAN CFI and ID to use when VLAN Stacking 0=use the 1st (network order) VLAN 1=use the 2nd (network order) VLAN (PASS2 only) */ uint64_t vs_qos : 1; /**< Which VLAN priority to use when VLAN Stacking 0=use the 1st (network order) VLAN 1=use the 2nd (network order) VLAN (PASS2 only) */ uint64_t l2_mal : 1; /**< Enable L2 malformed packet check */ uint64_t tcp_flag : 1; /**< Enable TCP flags checks */ uint64_t l4_len : 1; /**< Enable TCP/UDP length check */ uint64_t l4_chk : 1; /**< Enable TCP/UDP checksum check */ uint64_t l4_prt : 1; /**< Enable TCP/UDP illegal port check */ uint64_t l4_mal : 1; /**< Enable TCP/UDP malformed packet check */ uint64_t reserved_6_7 : 2; uint64_t ip6_eext : 2; /**< Enable IPv6 early extension headers */ uint64_t ip4_opts : 1; /**< Enable IPv4 options check */ uint64_t ip_hop : 1; /**< Enable TTL (IPv4) / hop (IPv6) check */ uint64_t ip_mal : 1; /**< Enable malformed check */ uint64_t ip_chk : 1; /**< Enable IPv4 header checksum check */ #else uint64_t ip_chk : 1; uint64_t ip_mal : 1; uint64_t ip_hop : 1; uint64_t ip4_opts : 1; uint64_t ip6_eext : 2; uint64_t reserved_6_7 : 2; uint64_t l4_mal : 1; uint64_t l4_prt : 1; uint64_t l4_chk : 1; uint64_t l4_len : 1; uint64_t tcp_flag : 1; uint64_t l2_mal : 1; uint64_t vs_qos : 1; uint64_t vs_wqe : 1; uint64_t ignrs : 1; uint64_t reserved_17_19 : 3; uint64_t ring_en : 1; uint64_t reserved_21_23 : 3; uint64_t dsa_grp_sid : 1; uint64_t dsa_grp_scmd : 1; uint64_t dsa_grp_tvid : 1; uint64_t reserved_27_63 : 37; #endif } s; struct cvmx_pip_gbl_ctl_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t ignrs : 1; /**< Ignore the PKT_INST_HDR[RS] bit when set Only applies to the packet interface prts (0-31) */ uint64_t vs_wqe : 1; /**< Which VLAN CFI and ID to use when VLAN Stacking 0=use the 1st (network order) VLAN 1=use the 2nd (network order) VLAN */ uint64_t vs_qos : 1; /**< Which VLAN priority to use when VLAN Stacking 0=use the 1st (network order) VLAN 1=use the 2nd (network order) VLAN */ uint64_t l2_mal : 1; /**< Enable L2 malformed packet check */ uint64_t tcp_flag : 1; /**< Enable TCP flags checks */ uint64_t l4_len : 1; /**< Enable TCP/UDP length check */ uint64_t l4_chk : 1; /**< Enable TCP/UDP checksum check */ uint64_t l4_prt : 1; /**< Enable TCP/UDP illegal port check */ uint64_t l4_mal : 1; /**< Enable TCP/UDP malformed packet check */ uint64_t reserved_6_7 : 2; uint64_t ip6_eext : 2; /**< Enable IPv6 early extension headers */ uint64_t ip4_opts : 1; /**< Enable IPv4 options check */ uint64_t ip_hop : 1; /**< Enable TTL (IPv4) / hop (IPv6) check */ uint64_t ip_mal : 1; /**< Enable malformed check */ uint64_t ip_chk : 1; /**< Enable IPv4 header checksum check */ #else uint64_t ip_chk : 1; uint64_t ip_mal : 1; uint64_t ip_hop : 1; uint64_t ip4_opts : 1; uint64_t ip6_eext : 2; uint64_t reserved_6_7 : 2; uint64_t l4_mal : 1; uint64_t l4_prt : 1; uint64_t l4_chk : 1; uint64_t l4_len : 1; uint64_t tcp_flag : 1; uint64_t l2_mal : 1; uint64_t vs_qos : 1; uint64_t vs_wqe : 1; uint64_t ignrs : 1; uint64_t reserved_17_63 : 47; #endif } cn30xx; struct cvmx_pip_gbl_ctl_cn30xx cn31xx; struct cvmx_pip_gbl_ctl_cn30xx cn38xx; struct cvmx_pip_gbl_ctl_cn30xx cn38xxp2; struct cvmx_pip_gbl_ctl_cn30xx cn50xx; struct cvmx_pip_gbl_ctl_s cn52xx; struct cvmx_pip_gbl_ctl_s cn52xxp1; struct cvmx_pip_gbl_ctl_s cn56xx; struct cvmx_pip_gbl_ctl_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_21_63 : 43; uint64_t ring_en : 1; /**< Enable PCIe ring information in WQE */ uint64_t reserved_17_19 : 3; uint64_t ignrs : 1; /**< Ignore the PKT_INST_HDR[RS] bit when set Only applies to the packet interface prts (0-31) */ uint64_t vs_wqe : 1; /**< Which VLAN CFI and ID to use when VLAN Stacking 0=use the 1st (network order) VLAN 1=use the 2nd (network order) VLAN */ uint64_t vs_qos : 1; /**< Which VLAN priority to use when VLAN Stacking 0=use the 1st (network order) VLAN 1=use the 2nd (network order) VLAN */ uint64_t l2_mal : 1; /**< Enable L2 malformed packet check */ uint64_t tcp_flag : 1; /**< Enable TCP flags checks */ uint64_t l4_len : 1; /**< Enable TCP/UDP length check */ uint64_t l4_chk : 1; /**< Enable TCP/UDP checksum check */ uint64_t l4_prt : 1; /**< Enable TCP/UDP illegal port check */ uint64_t l4_mal : 1; /**< Enable TCP/UDP malformed packet check */ uint64_t reserved_6_7 : 2; uint64_t ip6_eext : 2; /**< Enable IPv6 early extension headers */ uint64_t ip4_opts : 1; /**< Enable IPv4 options check */ uint64_t ip_hop : 1; /**< Enable TTL (IPv4) / hop (IPv6) check */ uint64_t ip_mal : 1; /**< Enable malformed check */ uint64_t ip_chk : 1; /**< Enable IPv4 header checksum check */ #else uint64_t ip_chk : 1; uint64_t ip_mal : 1; uint64_t ip_hop : 1; uint64_t ip4_opts : 1; uint64_t ip6_eext : 2; uint64_t reserved_6_7 : 2; uint64_t l4_mal : 1; uint64_t l4_prt : 1; uint64_t l4_chk : 1; uint64_t l4_len : 1; uint64_t tcp_flag : 1; uint64_t l2_mal : 1; uint64_t vs_qos : 1; uint64_t vs_wqe : 1; uint64_t ignrs : 1; uint64_t reserved_17_19 : 3; uint64_t ring_en : 1; uint64_t reserved_21_63 : 43; #endif } cn56xxp1; struct cvmx_pip_gbl_ctl_cn30xx cn58xx; struct cvmx_pip_gbl_ctl_cn30xx cn58xxp1; } cvmx_pip_gbl_ctl_t; /** * cvmx_pip_hg_pri_qos * * Notes: * This register controls accesses to the HG_QOS_TABLE. To write an entry of * the table, write PIP_HG_PRI_QOS with PRI=table address, QOS=priority level, * UP_QOS=1. To read an entry of the table, write PIP_HG_PRI_QOS with * PRI=table address, QOS=dont_carepriority level, UP_QOS=0 and then read * PIP_HG_PRI_QOS. The table data will be in PIP_HG_PRI_QOS[QOS]. */ typedef union { uint64_t u64; struct cvmx_pip_hg_pri_qos_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t up_qos : 1; /**< When written to '1', updates the entry in the HG_QOS_TABLE as specified by PRI to a value of QOS as follows HG_QOS_TABLE[PRI] = QOS */ uint64_t reserved_11_11 : 1; uint64_t qos : 3; /**< QOS Map level to priority (56xx pass2 only) */ uint64_t reserved_6_7 : 2; uint64_t pri : 6; /**< The priority level from HiGig header HiGig/HiGig+ PRI = [1'b0, CNG[1:0], COS[2:0]] HiGig2 PRI = [DP[1:0], TC[3:0]] (56xx pass2 only) */ #else uint64_t pri : 6; uint64_t reserved_6_7 : 2; uint64_t qos : 3; uint64_t reserved_11_11 : 1; uint64_t up_qos : 1; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_pip_hg_pri_qos_s cn52xx; struct cvmx_pip_hg_pri_qos_s cn52xxp1; struct cvmx_pip_hg_pri_qos_s cn56xx; } cvmx_pip_hg_pri_qos_t; /** * cvmx_pip_int_en * * PIP_INT_EN = PIP's Interrupt Enable Register * * Determines if hardward should raise an interrupt to software * when an exception event occurs. */ typedef union { uint64_t u64; struct cvmx_pip_int_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC stripping in IPD is enable */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure */ uint64_t crcerr : 1; /**< PIP calculated bad CRC */ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t crcerr : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t minerr : 1; uint64_t maxerr : 1; uint64_t lenerr : 1; uint64_t punyerr : 1; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_pip_int_en_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow (not used in O2P) */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure (not used in O2P) */ uint64_t crcerr : 1; /**< PIP calculated bad CRC (not used in O2P) */ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t crcerr : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t reserved_9_63 : 55; #endif } cn30xx; struct cvmx_pip_int_en_cn30xx cn31xx; struct cvmx_pip_int_en_cn30xx cn38xx; struct cvmx_pip_int_en_cn30xx cn38xxp2; struct cvmx_pip_int_en_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure */ uint64_t reserved_1_1 : 1; uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t reserved_1_1 : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t minerr : 1; uint64_t maxerr : 1; uint64_t lenerr : 1; uint64_t reserved_12_63 : 52; #endif } cn50xx; struct cvmx_pip_int_en_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC stripping in IPD is enable */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure */ uint64_t reserved_1_1 : 1; uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t reserved_1_1 : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t minerr : 1; uint64_t maxerr : 1; uint64_t lenerr : 1; uint64_t punyerr : 1; uint64_t reserved_13_63 : 51; #endif } cn52xx; struct cvmx_pip_int_en_cn52xx cn52xxp1; struct cvmx_pip_int_en_s cn56xx; struct cvmx_pip_int_en_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure */ uint64_t crcerr : 1; /**< PIP calculated bad CRC (Disabled in 56xx) */ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t crcerr : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t minerr : 1; uint64_t maxerr : 1; uint64_t lenerr : 1; uint64_t reserved_12_63 : 52; #endif } cn56xxp1; struct cvmx_pip_int_en_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC stripping in IPD is enable */ uint64_t reserved_9_11 : 3; uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure */ uint64_t crcerr : 1; /**< PIP calculated bad CRC */ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t crcerr : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t reserved_9_11 : 3; uint64_t punyerr : 1; uint64_t reserved_13_63 : 51; #endif } cn58xx; struct cvmx_pip_int_en_cn30xx cn58xxp1; } cvmx_pip_int_en_t; /** * cvmx_pip_int_reg * * PIP_INT_REG = PIP's Interrupt Register * * Any exception event that occurs is captured in the PIP_INT_REG. * PIP_INT_REG will set the exception bit regardless of the value * of PIP_INT_EN. PIP_INT_EN only controls if an interrupt is * raised to software. * * Notes: * * TODOOVR * The PIP Todo list stores packets that have been received and require work * queue entry generation. * * * SKPRUNT * If a packet size is less then the amount programmed in the per port * skippers, then there will be nothing to parse and the entire packet will * basically be skipped over. This is probably not what the user desired, so * there is an indication to software. * * * BADTAG * A tag is considered bad when it is resued by a new packet before it was * released by PIP. PIP considers a tag released by one of two methods. * . QOS dropped so that it is released over the pip__ipd_release bus. * . WorkQ entry is validated by the pip__ipd_done signal * * * PRTNXA * If PIP receives a packet that is not in the valid port range, the port * processed will be mapped into the valid port space (the mapping is * currently unpredictable) and the PRTNXA bit will be set. PRTNXA will be * set for packets received under the following conditions: * * * packet ports (ports 0-31) * - GMX_INF_MODE[TYPE]==0 (SGMII), received port is 4-31 * - GMX_INF_MODE[TYPE]==1 (XAUI), received port is 1-31 * * upper ports (pci and loopback ports 32-63) * - received port is 40-47 or 52-63 * * * BCKPRS * PIP can assert backpressure to the receive logic when the todo list * exceeds a high-water mark. When this * occurs, PIP can raise an interrupt to software. * * * PKTDRP * PIP can drop packets based on QOS results received from IPD. If the QOS * algorithm decides to drop a packet, PIP will assert an interrupt. */ typedef union { uint64_t u64; struct cvmx_pip_int_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC stripping in IPD is enable */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper This interrupt can occur with received PARTIAL packets that are truncated to SKIP bytes or smaller. */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure */ uint64_t crcerr : 1; /**< PIP calculated bad CRC */ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t crcerr : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t minerr : 1; uint64_t maxerr : 1; uint64_t lenerr : 1; uint64_t punyerr : 1; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_pip_int_reg_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow (not used in O2P) */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper This interrupt can occur with received PARTIAL packets that are truncated to SKIP bytes or smaller. */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure (not used in O2P) */ uint64_t crcerr : 1; /**< PIP calculated bad CRC (not used in O2P) */ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t crcerr : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t reserved_9_63 : 55; #endif } cn30xx; struct cvmx_pip_int_reg_cn30xx cn31xx; struct cvmx_pip_int_reg_cn30xx cn38xx; struct cvmx_pip_int_reg_cn30xx cn38xxp2; struct cvmx_pip_int_reg_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper This interrupt can occur with received PARTIAL packets that are truncated to SKIP bytes or smaller. */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure */ uint64_t reserved_1_1 : 1; uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t reserved_1_1 : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t minerr : 1; uint64_t maxerr : 1; uint64_t lenerr : 1; uint64_t reserved_12_63 : 52; #endif } cn50xx; struct cvmx_pip_int_reg_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC stripping in IPD is enable */ uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper This interrupt can occur with received PARTIAL packets that are truncated to SKIP bytes or smaller. */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure */ uint64_t reserved_1_1 : 1; uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t reserved_1_1 : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t minerr : 1; uint64_t maxerr : 1; uint64_t lenerr : 1; uint64_t punyerr : 1; uint64_t reserved_13_63 : 51; #endif } cn52xx; struct cvmx_pip_int_reg_cn52xx cn52xxp1; struct cvmx_pip_int_reg_s cn56xx; struct cvmx_pip_int_reg_cn56xxp1 { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t lenerr : 1; /**< Frame was received with length error */ uint64_t maxerr : 1; /**< Frame was received with length > max_length */ uint64_t minerr : 1; /**< Frame was received with length < min_length */ uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper This interrupt can occur with received PARTIAL packets that are truncated to SKIP bytes or smaller. */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure */ uint64_t crcerr : 1; /**< PIP calculated bad CRC (Disabled in 56xx) */ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t crcerr : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t minerr : 1; uint64_t maxerr : 1; uint64_t lenerr : 1; uint64_t reserved_12_63 : 52; #endif } cn56xxp1; struct cvmx_pip_int_reg_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t punyerr : 1; /**< Frame was received with length <=4B when CRC stripping in IPD is enable */ uint64_t reserved_9_11 : 3; uint64_t beperr : 1; /**< Parity Error in back end memory */ uint64_t feperr : 1; /**< Parity Error in front end memory */ uint64_t todoovr : 1; /**< Todo list overflow (see PIP_BCK_PRS[HIWATER]) */ uint64_t skprunt : 1; /**< Packet was engulfed by skipper This interrupt can occur with received PARTIAL packets that are truncated to SKIP bytes or smaller. */ uint64_t badtag : 1; /**< A bad tag was sent from IPD */ uint64_t prtnxa : 1; /**< Non-existent port */ uint64_t bckprs : 1; /**< PIP asserted backpressure */ uint64_t crcerr : 1; /**< PIP calculated bad CRC */ uint64_t pktdrp : 1; /**< Packet Dropped due to QOS */ #else uint64_t pktdrp : 1; uint64_t crcerr : 1; uint64_t bckprs : 1; uint64_t prtnxa : 1; uint64_t badtag : 1; uint64_t skprunt : 1; uint64_t todoovr : 1; uint64_t feperr : 1; uint64_t beperr : 1; uint64_t reserved_9_11 : 3; uint64_t punyerr : 1; uint64_t reserved_13_63 : 51; #endif } cn58xx; struct cvmx_pip_int_reg_cn30xx cn58xxp1; } cvmx_pip_int_reg_t; /** * cvmx_pip_ip_offset * * PIP_IP_OFFSET = Location of the IP in the workQ entry * * An 8-byte offset to find the start of the IP header in the data portion of IP workQ entires * * Notes: * In normal configurations, OFFSET must be set in the 0..4 range to allow the * entire IP and TCP/UDP headers to be buffered in HW and calculate the L4 * checksum for TCP/UDP packets. * * The MAX value of OFFSET is determined by the the types of packets that can * be sent to PIP as follows... * * Packet Type MAX OFFSET * IPv4/TCP/UDP 7 * IPv6/TCP/UDP 5 * IPv6/without L4 parsing 6 * * If the L4 can be ignored, then the MAX OFFSET for IPv6 packets can increase * to 6. Here are the following programming restrictions for IPv6 packets and * OFFSET==6: * * . PIP_GBL_CTL[TCP_FLAG] == 0 * . PIP_GBL_CTL[L4_LEN] == 0 * . PIP_GBL_CTL[L4_CHK] == 0 * . PIP_GBL_CTL[L4_PRT] == 0 * . PIP_GBL_CTL[L4_MAL] == 0 * . PIP_DEC_IPSEC[TCP] == 0 * . PIP_DEC_IPSEC[UDP] == 0 * . PIP_PRT_TAG[IP6_DPRT] == 0 * . PIP_PRT_TAG[IP6_SPRT] == 0 * . PIP_PRT_TAG[TCP6_TAG] == 0 * . PIP_GBL_CFG[TAG_SYN] == 0 */ typedef union { uint64_t u64; struct cvmx_pip_ip_offset_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t offset : 3; /**< Number of 8B ticks to include in workQ entry prior to IP data - 0: 0 Bytes / IP start at WORD4 of workQ entry - 1: 8 Bytes / IP start at WORD5 of workQ entry - 2: 16 Bytes / IP start at WORD6 of workQ entry - 3: 24 Bytes / IP start at WORD7 of workQ entry - 4: 32 Bytes / IP start at WORD8 of workQ entry - 5: 40 Bytes / IP start at WORD9 of workQ entry - 6: 48 Bytes / IP start at WORD10 of workQ entry - 7: 56 Bytes / IP start at WORD11 of workQ entry */ #else uint64_t offset : 3; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_pip_ip_offset_s cn30xx; struct cvmx_pip_ip_offset_s cn31xx; struct cvmx_pip_ip_offset_s cn38xx; struct cvmx_pip_ip_offset_s cn38xxp2; struct cvmx_pip_ip_offset_s cn50xx; struct cvmx_pip_ip_offset_s cn52xx; struct cvmx_pip_ip_offset_s cn52xxp1; struct cvmx_pip_ip_offset_s cn56xx; struct cvmx_pip_ip_offset_s cn56xxp1; struct cvmx_pip_ip_offset_s cn58xx; struct cvmx_pip_ip_offset_s cn58xxp1; } cvmx_pip_ip_offset_t; /** * cvmx_pip_prt_cfg# * * PIP_PRT_CFGX = Per port config information * */ typedef union { uint64_t u64; struct cvmx_pip_prt_cfgx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_53_63 : 11; uint64_t pad_len : 1; /**< When set, disables the length check for pkts with padding in the client data */ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */ uint64_t lenerr_en : 1; /**< L2 length error check enable Frame was received with length error */ uint64_t maxerr_en : 1; /**< Max frame error check enable Frame was received with length > max_length */ uint64_t minerr_en : 1; /**< Min frame error check enable Frame was received with length < min_length */ uint64_t grp_wat_47 : 4; /**< GRP Watcher enable (Watchers 4-7) */ uint64_t qos_wat_47 : 4; /**< QOS Watcher enable (Watchers 4-7) */ uint64_t reserved_37_39 : 3; uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet. Normally, IPD will never drop a packet that PIP indicates is RAW. 0=never drop RAW packets based on RED algorithm 1=allow RAW packet drops based on RED algorithm (PASS2 only) */ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when calculating mask tag hash (PASS2 only) */ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size (PASS2 only) */ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets (not for PCI prts, 32-35) (PASS2 only) */ uint64_t grp_wat : 4; /**< GRP Watcher enable (PASS2 only) */ uint64_t hg_qos : 1; /**< When set, uses the HiGig priority bits as a lookup into the HG_QOS_TABLE (PIP_HG_PRI_QOS) to determine the QOS value HG_QOS must not be set when HIGIG_EN=0 (56xx pass2 only) */ uint64_t qos : 3; /**< Default QOS level of the port */ uint64_t qos_wat : 4; /**< QOS Watcher enable */ uint64_t qos_vsel : 1; /**< Which QOS in PIP_QOS_VLAN to use 0 = PIP_QOS_VLAN[QOS] 1 = PIP_QOS_VLAN[QOS1] (56xx pass2 only) */ uint64_t qos_vod : 1; /**< QOS VLAN over Diffserv if VLAN exists, it is used else if IP exists, Diffserv is used else the per port default is used Watchers are still highest priority */ uint64_t qos_diff : 1; /**< QOS Diffserv */ uint64_t qos_vlan : 1; /**< QOS VLAN */ uint64_t reserved_13_15 : 3; uint64_t crc_en : 1; /**< CRC Checking enabled (for ports 0-31 only) */ uint64_t higig_en : 1; /**< Enable HiGig parsing Should not be set for PCIe ports (ports 32-35) When HIGIG_EN=1: DSA_EN field below must be zero SKIP field below is both Skip I size and the size of the HiGig* header (12 or 16 bytes) (56xx pass2 only) */ uint64_t dsa_en : 1; /**< Enable DSA tag parsing When DSA_EN=1: HIGIG_EN field above must be zero SKIP field below is size of DSA tag (4, 8, or 12 bytes) rather than the size of Skip I total SKIP (Skip I + header + Skip II must be zero INST_HDR field above must be zero (non-PCIe ports) For PCIe ports, NPEI_PKT*_INSTR_HDR[USE_IHDR] and PCIE_INST_HDR[R] should be clear MODE field below must be "skip to L2" (56xx pass2 only) */ cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode 0 = no packet inspection (Uninterpreted) 1 = L2 parsing / skip to L2 2 = IP parsing / skip to L3 3 = PCI Raw (illegal for software to set) */ uint64_t reserved_7_7 : 1; uint64_t skip : 7; /**< Optional Skip I amount for packets. Does not apply to packets on PCI ports when a PKT_INST_HDR is present. See section 7.2.7 - Legal Skip Values for further details. */ #else uint64_t skip : 7; uint64_t reserved_7_7 : 1; cvmx_pip_port_parse_mode_t mode : 2; uint64_t dsa_en : 1; uint64_t higig_en : 1; uint64_t crc_en : 1; uint64_t reserved_13_15 : 3; uint64_t qos_vlan : 1; uint64_t qos_diff : 1; uint64_t qos_vod : 1; uint64_t qos_vsel : 1; uint64_t qos_wat : 4; uint64_t qos : 3; uint64_t hg_qos : 1; uint64_t grp_wat : 4; uint64_t inst_hdr : 1; uint64_t dyn_rs : 1; uint64_t tag_inc : 2; uint64_t rawdrp : 1; uint64_t reserved_37_39 : 3; uint64_t qos_wat_47 : 4; uint64_t grp_wat_47 : 4; uint64_t minerr_en : 1; uint64_t maxerr_en : 1; uint64_t lenerr_en : 1; uint64_t vlan_len : 1; uint64_t pad_len : 1; uint64_t reserved_53_63 : 11; #endif } s; struct cvmx_pip_prt_cfgx_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_37_63 : 27; uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet. Normally, IPD will never drop a packet that PIP indicates is RAW. 0=never drop RAW packets based on RED algorithm 1=allow RAW packet drops based on RED algorithm */ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when calculating mask tag hash */ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size */ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets (not for PCI prts, 32-35) */ uint64_t grp_wat : 4; /**< GRP Watcher enable */ uint64_t reserved_27_27 : 1; uint64_t qos : 3; /**< Default QOS level of the port */ uint64_t qos_wat : 4; /**< QOS Watcher enable */ uint64_t reserved_18_19 : 2; uint64_t qos_diff : 1; /**< QOS Diffserv */ uint64_t qos_vlan : 1; /**< QOS VLAN */ uint64_t reserved_10_15 : 6; cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode 0 = no packet inspection (Uninterpreted) 1 = L2 parsing / skip to L2 2 = IP parsing / skip to L3 3 = PCI Raw (illegal for software to set) */ uint64_t reserved_7_7 : 1; uint64_t skip : 7; /**< Optional Skip I amount for packets. Does not apply to packets on PCI ports when a PKT_INST_HDR is present. See section 7.2.7 - Legal Skip Values for further details. */ #else uint64_t skip : 7; uint64_t reserved_7_7 : 1; cvmx_pip_port_parse_mode_t mode : 2; uint64_t reserved_10_15 : 6; uint64_t qos_vlan : 1; uint64_t qos_diff : 1; uint64_t reserved_18_19 : 2; uint64_t qos_wat : 4; uint64_t qos : 3; uint64_t reserved_27_27 : 1; uint64_t grp_wat : 4; uint64_t inst_hdr : 1; uint64_t dyn_rs : 1; uint64_t tag_inc : 2; uint64_t rawdrp : 1; uint64_t reserved_37_63 : 27; #endif } cn30xx; struct cvmx_pip_prt_cfgx_cn30xx cn31xx; struct cvmx_pip_prt_cfgx_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_37_63 : 27; uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet. Normally, IPD will never drop a packet that PIP indicates is RAW. 0=never drop RAW packets based on RED algorithm 1=allow RAW packet drops based on RED algorithm (PASS2 only) */ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when calculating mask tag hash (PASS2 only) */ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size (PASS2 only) */ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets (not for PCI prts, 32-35) (PASS2 only) */ uint64_t grp_wat : 4; /**< GRP Watcher enable (PASS2 only) */ uint64_t reserved_27_27 : 1; uint64_t qos : 3; /**< Default QOS level of the port */ uint64_t qos_wat : 4; /**< QOS Watcher enable */ uint64_t reserved_18_19 : 2; uint64_t qos_diff : 1; /**< QOS Diffserv */ uint64_t qos_vlan : 1; /**< QOS VLAN */ uint64_t reserved_13_15 : 3; uint64_t crc_en : 1; /**< CRC Checking enabled (for ports 0-31 only) */ uint64_t reserved_10_11 : 2; cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode 0 = no packet inspection (Uninterpreted) 1 = L2 parsing / skip to L2 2 = IP parsing / skip to L3 3 = PCI Raw (illegal for software to set) */ uint64_t reserved_7_7 : 1; uint64_t skip : 7; /**< Optional Skip I amount for packets. Does not apply to packets on PCI ports when a PKT_INST_HDR is present. See section 7.2.7 - Legal Skip Values for further details. */ #else uint64_t skip : 7; uint64_t reserved_7_7 : 1; cvmx_pip_port_parse_mode_t mode : 2; uint64_t reserved_10_11 : 2; uint64_t crc_en : 1; uint64_t reserved_13_15 : 3; uint64_t qos_vlan : 1; uint64_t qos_diff : 1; uint64_t reserved_18_19 : 2; uint64_t qos_wat : 4; uint64_t qos : 3; uint64_t reserved_27_27 : 1; uint64_t grp_wat : 4; uint64_t inst_hdr : 1; uint64_t dyn_rs : 1; uint64_t tag_inc : 2; uint64_t rawdrp : 1; uint64_t reserved_37_63 : 27; #endif } cn38xx; struct cvmx_pip_prt_cfgx_cn38xx cn38xxp2; struct cvmx_pip_prt_cfgx_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_53_63 : 11; uint64_t pad_len : 1; /**< When set, disables the length check for pkts with padding in the client data */ uint64_t vlan_len : 1; /**< When set, disables the length check for VLAN pkts */ uint64_t lenerr_en : 1; /**< L2 length error check enable Frame was received with length error */ uint64_t maxerr_en : 1; /**< Max frame error check enable Frame was received with length > max_length */ uint64_t minerr_en : 1; /**< Min frame error check enable Frame was received with length < min_length */ uint64_t grp_wat_47 : 4; /**< GRP Watcher enable (Watchers 4-7) */ uint64_t qos_wat_47 : 4; /**< QOS Watcher enable (Watchers 4-7) */ uint64_t reserved_37_39 : 3; uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet. Normally, IPD will never drop a packet that PIP indicates is RAW. 0=never drop RAW packets based on RED algorithm 1=allow RAW packet drops based on RED algorithm */ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when calculating mask tag hash */ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size */ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets (not for PCI prts, 32-35) */ uint64_t grp_wat : 4; /**< GRP Watcher enable */ uint64_t reserved_27_27 : 1; uint64_t qos : 3; /**< Default QOS level of the port */ uint64_t qos_wat : 4; /**< QOS Watcher enable (Watchers 0-3) */ uint64_t reserved_19_19 : 1; uint64_t qos_vod : 1; /**< QOS VLAN over Diffserv if VLAN exists, it is used else if IP exists, Diffserv is used else the per port default is used Watchers are still highest priority */ uint64_t qos_diff : 1; /**< QOS Diffserv */ uint64_t qos_vlan : 1; /**< QOS VLAN */ uint64_t reserved_13_15 : 3; uint64_t crc_en : 1; /**< CRC Checking enabled (Disabled in 5020) */ uint64_t reserved_10_11 : 2; cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode 0 = no packet inspection (Uninterpreted) 1 = L2 parsing / skip to L2 2 = IP parsing / skip to L3 3 = PCI Raw (illegal for software to set) */ uint64_t reserved_7_7 : 1; uint64_t skip : 7; /**< Optional Skip I amount for packets. Does not apply to packets on PCI ports when a PKT_INST_HDR is present. See section 7.2.7 - Legal Skip Values for further details. */ #else uint64_t skip : 7; uint64_t reserved_7_7 : 1; cvmx_pip_port_parse_mode_t mode : 2; uint64_t reserved_10_11 : 2; uint64_t crc_en : 1; uint64_t reserved_13_15 : 3; uint64_t qos_vlan : 1; uint64_t qos_diff : 1; uint64_t qos_vod : 1; uint64_t reserved_19_19 : 1; uint64_t qos_wat : 4; uint64_t qos : 3; uint64_t reserved_27_27 : 1; uint64_t grp_wat : 4; uint64_t inst_hdr : 1; uint64_t dyn_rs : 1; uint64_t tag_inc : 2; uint64_t rawdrp : 1; uint64_t reserved_37_39 : 3; uint64_t qos_wat_47 : 4; uint64_t grp_wat_47 : 4; uint64_t minerr_en : 1; uint64_t maxerr_en : 1; uint64_t lenerr_en : 1; uint64_t vlan_len : 1; uint64_t pad_len : 1; uint64_t reserved_53_63 : 11; #endif } cn50xx; struct cvmx_pip_prt_cfgx_s cn52xx; struct cvmx_pip_prt_cfgx_s cn52xxp1; struct cvmx_pip_prt_cfgx_s cn56xx; struct cvmx_pip_prt_cfgx_cn50xx cn56xxp1; struct cvmx_pip_prt_cfgx_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_37_63 : 27; uint64_t rawdrp : 1; /**< Allow the IPD to RED drop a packet. Normally, IPD will never drop a packet that PIP indicates is RAW. 0=never drop RAW packets based on RED algorithm 1=allow RAW packet drops based on RED algorithm (PASS2 only) */ uint64_t tag_inc : 2; /**< Which of the 4 PIP_TAG_INC to use when calculating mask tag hash (PASS2 only) */ uint64_t dyn_rs : 1; /**< Dynamically calculate RS based on pkt size (PASS2 only) */ uint64_t inst_hdr : 1; /**< 8-byte INST_HDR is present on all packets (not for PCI prts, 32-35) (PASS2 only) */ uint64_t grp_wat : 4; /**< GRP Watcher enable (PASS2 only) */ uint64_t reserved_27_27 : 1; uint64_t qos : 3; /**< Default QOS level of the port */ uint64_t qos_wat : 4; /**< QOS Watcher enable */ uint64_t reserved_19_19 : 1; uint64_t qos_vod : 1; /**< QOS VLAN over Diffserv if VLAN exists, it is used else if IP exists, Diffserv is used else the per port default is used Watchers are still highest priority */ uint64_t qos_diff : 1; /**< QOS Diffserv */ uint64_t qos_vlan : 1; /**< QOS VLAN */ uint64_t reserved_13_15 : 3; uint64_t crc_en : 1; /**< CRC Checking enabled (for ports 0-31 only) */ uint64_t reserved_10_11 : 2; cvmx_pip_port_parse_mode_t mode : 2; /**< Parse Mode 0 = no packet inspection (Uninterpreted) 1 = L2 parsing / skip to L2 2 = IP parsing / skip to L3 3 = PCI Raw (illegal for software to set) */ uint64_t reserved_7_7 : 1; uint64_t skip : 7; /**< Optional Skip I amount for packets. Does not apply to packets on PCI ports when a PKT_INST_HDR is present. See section 7.2.7 - Legal Skip Values for further details. */ #else uint64_t skip : 7; uint64_t reserved_7_7 : 1; cvmx_pip_port_parse_mode_t mode : 2; uint64_t reserved_10_11 : 2; uint64_t crc_en : 1; uint64_t reserved_13_15 : 3; uint64_t qos_vlan : 1; uint64_t qos_diff : 1; uint64_t qos_vod : 1; uint64_t reserved_19_19 : 1; uint64_t qos_wat : 4; uint64_t qos : 3; uint64_t reserved_27_27 : 1; uint64_t grp_wat : 4; uint64_t inst_hdr : 1; uint64_t dyn_rs : 1; uint64_t tag_inc : 2; uint64_t rawdrp : 1; uint64_t reserved_37_63 : 27; #endif } cn58xx; struct cvmx_pip_prt_cfgx_cn58xx cn58xxp1; } cvmx_pip_prt_cfgx_t; /** * cvmx_pip_prt_tag# * * PIP_PRT_TAGX = Per port config information * */ typedef union { uint64_t u64; struct cvmx_pip_prt_tagx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t grptagbase : 4; /**< Offset to use when computing group from tag bits when GRPTAG is set. (PASS2 only) */ uint64_t grptagmask : 4; /**< Which bits of the tag to exclude when computing group when GRPTAG is set. (PASS2 only) */ uint64_t grptag : 1; /**< When set, use the lower bit of the tag to compute the group in the work queue entry GRP = WQE[TAG[3:0]] & ~GRPTAGMASK + GRPTAGBASE (PASS2 only) */ uint64_t grptag_mskip : 1; /**< When set, GRPTAG will be used regardless if the packet IS_IP. */ uint64_t tag_mode : 2; /**< Which tag algorithm to use 0 = always use tuple tag algorithm 1 = always use mask tag algorithm 2 = if packet is IP, use tuple else use mask 3 = tuple XOR mask (PASS2 only) */ uint64_t inc_vs : 2; /**< determines the VLAN ID (VID) to be included in tuple tag when VLAN stacking is detected 0 = do not include VID in tuple tag generation 1 = include VID (VLAN0) in hash 2 = include VID (VLAN1) in hash 3 = include VID ([VLAN0,VLAN1]) in hash (PASS2 only) */ uint64_t inc_vlan : 1; /**< when set, the VLAN ID is included in tuple tag when VLAN stacking is not detected 0 = do not include VID in tuple tag generation 1 = include VID in hash (PASS2 only) */ uint64_t inc_prt_flag : 1; /**< sets whether the port is included in tuple tag */ uint64_t ip6_dprt_flag : 1; /**< sets whether the TCP/UDP dst port is included in tuple tag for IPv6 packets */ uint64_t ip4_dprt_flag : 1; /**< sets whether the TCP/UDP dst port is included in tuple tag for IPv4 */ uint64_t ip6_sprt_flag : 1; /**< sets whether the TCP/UDP src port is included in tuple tag for IPv6 packets */ uint64_t ip4_sprt_flag : 1; /**< sets whether the TCP/UDP src port is included in tuple tag for IPv4 */ uint64_t ip6_nxth_flag : 1; /**< sets whether ipv6 includes next header in tuple tag hash */ uint64_t ip4_pctl_flag : 1; /**< sets whether ipv4 includes protocol in tuple tag hash */ uint64_t ip6_dst_flag : 1; /**< sets whether ipv6 includes dst address in tuple tag hash */ uint64_t ip4_dst_flag : 1; /**< sets whether ipv4 includes dst address in tuple tag hash */ uint64_t ip6_src_flag : 1; /**< sets whether ipv6 includes src address in tuple tag hash */ uint64_t ip4_src_flag : 1; /**< sets whether ipv4 includes src address in tuple tag hash */ cvmx_pow_tag_type_t tcp6_tag_type : 2; /**< sets the tag_type of a TCP packet (IPv6) 0 = ordered tags 1 = atomic tags 2 = Null tags */ cvmx_pow_tag_type_t tcp4_tag_type : 2; /**< sets the tag_type of a TCP packet (IPv4) 0 = ordered tags 1 = atomic tags 2 = Null tags */ cvmx_pow_tag_type_t ip6_tag_type : 2; /**< sets whether IPv6 packet tag type 0 = ordered tags 1 = atomic tags 2 = Null tags */ cvmx_pow_tag_type_t ip4_tag_type : 2; /**< sets whether IPv4 packet tag type 0 = ordered tags 1 = atomic tags 2 = Null tags */ cvmx_pow_tag_type_t non_tag_type : 2; /**< sets whether non-IP packet tag type 0 = ordered tags 1 = atomic tags 2 = Null tags */ uint64_t grp : 4; /**< 4-bit value indicating the group to schedule to */ #else uint64_t grp : 4; cvmx_pow_tag_type_t non_tag_type : 2; cvmx_pow_tag_type_t ip4_tag_type : 2; cvmx_pow_tag_type_t ip6_tag_type : 2; cvmx_pow_tag_type_t tcp4_tag_type : 2; cvmx_pow_tag_type_t tcp6_tag_type : 2; uint64_t ip4_src_flag : 1; uint64_t ip6_src_flag : 1; uint64_t ip4_dst_flag : 1; uint64_t ip6_dst_flag : 1; uint64_t ip4_pctl_flag : 1; uint64_t ip6_nxth_flag : 1; uint64_t ip4_sprt_flag : 1; uint64_t ip6_sprt_flag : 1; uint64_t ip4_dprt_flag : 1; uint64_t ip6_dprt_flag : 1; uint64_t inc_prt_flag : 1; uint64_t inc_vlan : 1; uint64_t inc_vs : 2; uint64_t tag_mode : 2; uint64_t grptag_mskip : 1; uint64_t grptag : 1; uint64_t grptagmask : 4; uint64_t grptagbase : 4; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_pip_prt_tagx_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t grptagbase : 4; /**< Offset to use when computing group from tag bits when GRPTAG is set. */ uint64_t grptagmask : 4; /**< Which bits of the tag to exclude when computing group when GRPTAG is set. */ uint64_t grptag : 1; /**< When set, use the lower bit of the tag to compute the group in the work queue entry GRP = WQE[TAG[3:0]] & ~GRPTAGMASK + GRPTAGBASE */ uint64_t reserved_30_30 : 1; uint64_t tag_mode : 2; /**< Which tag algorithm to use 0 = always use tuple tag algorithm 1 = always use mask tag algorithm 2 = if packet is IP, use tuple else use mask 3 = tuple XOR mask */ uint64_t inc_vs : 2; /**< determines the VLAN ID (VID) to be included in tuple tag when VLAN stacking is detected 0 = do not include VID in tuple tag generation 1 = include VID (VLAN0) in hash 2 = include VID (VLAN1) in hash 3 = include VID ([VLAN0,VLAN1]) in hash */ uint64_t inc_vlan : 1; /**< when set, the VLAN ID is included in tuple tag when VLAN stacking is not detected 0 = do not include VID in tuple tag generation 1 = include VID in hash */ uint64_t inc_prt_flag : 1; /**< sets whether the port is included in tuple tag */ uint64_t ip6_dprt_flag : 1; /**< sets whether the TCP/UDP dst port is included in tuple tag for IPv6 packets */ uint64_t ip4_dprt_flag : 1; /**< sets whether the TCP/UDP dst port is included in tuple tag for IPv4 */ uint64_t ip6_sprt_flag : 1; /**< sets whether the TCP/UDP src port is included in tuple tag for IPv6 packets */ uint64_t ip4_sprt_flag : 1; /**< sets whether the TCP/UDP src port is included in tuple tag for IPv4 */ uint64_t ip6_nxth_flag : 1; /**< sets whether ipv6 includes next header in tuple tag hash */ uint64_t ip4_pctl_flag : 1; /**< sets whether ipv4 includes protocol in tuple tag hash */ uint64_t ip6_dst_flag : 1; /**< sets whether ipv6 includes dst address in tuple tag hash */ uint64_t ip4_dst_flag : 1; /**< sets whether ipv4 includes dst address in tuple tag hash */ uint64_t ip6_src_flag : 1; /**< sets whether ipv6 includes src address in tuple tag hash */ uint64_t ip4_src_flag : 1; /**< sets whether ipv4 includes src address in tuple tag hash */ cvmx_pow_tag_type_t tcp6_tag_type : 2; /**< sets the tag_type of a TCP packet (IPv6) 0 = ordered tags 1 = atomic tags 2 = Null tags */ cvmx_pow_tag_type_t tcp4_tag_type : 2; /**< sets the tag_type of a TCP packet (IPv4) 0 = ordered tags 1 = atomic tags 2 = Null tags */ cvmx_pow_tag_type_t ip6_tag_type : 2; /**< sets whether IPv6 packet tag type 0 = ordered tags 1 = atomic tags 2 = Null tags */ cvmx_pow_tag_type_t ip4_tag_type : 2; /**< sets whether IPv4 packet tag type 0 = ordered tags 1 = atomic tags 2 = Null tags */ cvmx_pow_tag_type_t non_tag_type : 2; /**< sets whether non-IP packet tag type 0 = ordered tags 1 = atomic tags 2 = Null tags */ uint64_t grp : 4; /**< 4-bit value indicating the group to schedule to */ #else uint64_t grp : 4; cvmx_pow_tag_type_t non_tag_type : 2; cvmx_pow_tag_type_t ip4_tag_type : 2; cvmx_pow_tag_type_t ip6_tag_type : 2; cvmx_pow_tag_type_t tcp4_tag_type : 2; cvmx_pow_tag_type_t tcp6_tag_type : 2; uint64_t ip4_src_flag : 1; uint64_t ip6_src_flag : 1; uint64_t ip4_dst_flag : 1; uint64_t ip6_dst_flag : 1; uint64_t ip4_pctl_flag : 1; uint64_t ip6_nxth_flag : 1; uint64_t ip4_sprt_flag : 1; uint64_t ip6_sprt_flag : 1; uint64_t ip4_dprt_flag : 1; uint64_t ip6_dprt_flag : 1; uint64_t inc_prt_flag : 1; uint64_t inc_vlan : 1; uint64_t inc_vs : 2; uint64_t tag_mode : 2; uint64_t reserved_30_30 : 1; uint64_t grptag : 1; uint64_t grptagmask : 4; uint64_t grptagbase : 4; uint64_t reserved_40_63 : 24; #endif } cn30xx; struct cvmx_pip_prt_tagx_cn30xx cn31xx; struct cvmx_pip_prt_tagx_cn30xx cn38xx; struct cvmx_pip_prt_tagx_cn30xx cn38xxp2; struct cvmx_pip_prt_tagx_s cn50xx; struct cvmx_pip_prt_tagx_s cn52xx; struct cvmx_pip_prt_tagx_s cn52xxp1; struct cvmx_pip_prt_tagx_s cn56xx; struct cvmx_pip_prt_tagx_s cn56xxp1; struct cvmx_pip_prt_tagx_s cn58xx; struct cvmx_pip_prt_tagx_s cn58xxp1; } cvmx_pip_prt_tagx_t; /** * cvmx_pip_qos_diff# * * PIP_QOS_DIFFX = QOS Diffserv Tables * */ typedef union { uint64_t u64; struct cvmx_pip_qos_diffx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t qos : 3; /**< Diffserv QOS level */ #else uint64_t qos : 3; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_pip_qos_diffx_s cn30xx; struct cvmx_pip_qos_diffx_s cn31xx; struct cvmx_pip_qos_diffx_s cn38xx; struct cvmx_pip_qos_diffx_s cn38xxp2; struct cvmx_pip_qos_diffx_s cn50xx; struct cvmx_pip_qos_diffx_s cn52xx; struct cvmx_pip_qos_diffx_s cn52xxp1; struct cvmx_pip_qos_diffx_s cn56xx; struct cvmx_pip_qos_diffx_s cn56xxp1; struct cvmx_pip_qos_diffx_s cn58xx; struct cvmx_pip_qos_diffx_s cn58xxp1; } cvmx_pip_qos_diffx_t; /** * cvmx_pip_qos_vlan# * * PIP_QOS_VLANX = QOS VLAN Tables * * If the PIP indentifies a packet to be DSA/VLAN tagged, then the QOS * can be set based on the DSA/VLAN user priority. These eight register * comprise the QOS values for all DSA/VLAN user priority values. */ typedef union { uint64_t u64; struct cvmx_pip_qos_vlanx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t qos1 : 3; /**< DSA/VLAN QOS level Selected when PIP_PRT_CFGx[QOS_VSEL] = 1 (56xx pass2 only) */ uint64_t reserved_3_3 : 1; uint64_t qos : 3; /**< VLAN QOS level */ #else uint64_t qos : 3; uint64_t reserved_3_3 : 1; uint64_t qos1 : 3; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_pip_qos_vlanx_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t qos : 3; /**< VLAN QOS level */ #else uint64_t qos : 3; uint64_t reserved_3_63 : 61; #endif } cn30xx; struct cvmx_pip_qos_vlanx_cn30xx cn31xx; struct cvmx_pip_qos_vlanx_cn30xx cn38xx; struct cvmx_pip_qos_vlanx_cn30xx cn38xxp2; struct cvmx_pip_qos_vlanx_cn30xx cn50xx; struct cvmx_pip_qos_vlanx_s cn52xx; struct cvmx_pip_qos_vlanx_s cn52xxp1; struct cvmx_pip_qos_vlanx_s cn56xx; struct cvmx_pip_qos_vlanx_cn30xx cn56xxp1; struct cvmx_pip_qos_vlanx_cn30xx cn58xx; struct cvmx_pip_qos_vlanx_cn30xx cn58xxp1; } cvmx_pip_qos_vlanx_t; /** * cvmx_pip_qos_watch# * * PIP_QOS_WATCHX = QOS Watcher Tables * * Sets up the Configuration CSRs for the four QOS Watchers. * Each Watcher can be set to look for a specific protocol, * TCP/UDP destination port, or Ethertype to override the * default QOS value. */ typedef union { uint64_t u64; struct cvmx_pip_qos_watchx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t mask : 16; /**< Mask off a range of values (PASS2 only) */ uint64_t reserved_28_31 : 4; uint64_t grp : 4; /**< The GRP number of the watcher (PASS2 only) */ uint64_t reserved_23_23 : 1; uint64_t qos : 3; /**< The QOS level of the watcher */ uint64_t reserved_19_19 : 1; cvmx_pip_qos_watch_types match_type : 3; /**< The field for the watcher match against 0 = disable across all ports 1 = protocol (ipv4) = next_header (ipv6) 2 = TCP destination port 3 = UDP destination port */ uint64_t match_value : 16; /**< The value to watch for */ #else uint64_t match_value : 16; cvmx_pip_qos_watch_types match_type : 3; uint64_t reserved_19_19 : 1; uint64_t qos : 3; uint64_t reserved_23_23 : 1; uint64_t grp : 4; uint64_t reserved_28_31 : 4; uint64_t mask : 16; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_pip_qos_watchx_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t mask : 16; /**< Mask off a range of values */ uint64_t reserved_28_31 : 4; uint64_t grp : 4; /**< The GRP number of the watcher */ uint64_t reserved_23_23 : 1; uint64_t qos : 3; /**< The QOS level of the watcher */ uint64_t reserved_18_19 : 2; cvmx_pip_qos_watch_types match_type : 2; /**< The field for the watcher match against 0 = disable across all ports 1 = protocol (ipv4) = next_header (ipv6) 2 = TCP destination port 3 = UDP destination port */ uint64_t match_value : 16; /**< The value to watch for */ #else uint64_t match_value : 16; cvmx_pip_qos_watch_types match_type : 2; uint64_t reserved_18_19 : 2; uint64_t qos : 3; uint64_t reserved_23_23 : 1; uint64_t grp : 4; uint64_t reserved_28_31 : 4; uint64_t mask : 16; uint64_t reserved_48_63 : 16; #endif } cn30xx; struct cvmx_pip_qos_watchx_cn30xx cn31xx; struct cvmx_pip_qos_watchx_cn30xx cn38xx; struct cvmx_pip_qos_watchx_cn30xx cn38xxp2; struct cvmx_pip_qos_watchx_s cn50xx; struct cvmx_pip_qos_watchx_s cn52xx; struct cvmx_pip_qos_watchx_s cn52xxp1; struct cvmx_pip_qos_watchx_s cn56xx; struct cvmx_pip_qos_watchx_s cn56xxp1; struct cvmx_pip_qos_watchx_cn30xx cn58xx; struct cvmx_pip_qos_watchx_cn30xx cn58xxp1; } cvmx_pip_qos_watchx_t; /** * cvmx_pip_raw_word * * PIP_RAW_WORD = The RAW Word2 of the workQ entry. * * The RAW Word2 to be inserted into the workQ entry of RAWFULL packets. */ typedef union { uint64_t u64; struct cvmx_pip_raw_word_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_56_63 : 8; uint64_t word : 56; /**< Word2 of the workQ entry The 8-bit bufs field is still set by HW (IPD) */ #else uint64_t word : 56; uint64_t reserved_56_63 : 8; #endif } s; struct cvmx_pip_raw_word_s cn30xx; struct cvmx_pip_raw_word_s cn31xx; struct cvmx_pip_raw_word_s cn38xx; struct cvmx_pip_raw_word_s cn38xxp2; struct cvmx_pip_raw_word_s cn50xx; struct cvmx_pip_raw_word_s cn52xx; struct cvmx_pip_raw_word_s cn52xxp1; struct cvmx_pip_raw_word_s cn56xx; struct cvmx_pip_raw_word_s cn56xxp1; struct cvmx_pip_raw_word_s cn58xx; struct cvmx_pip_raw_word_s cn58xxp1; } cvmx_pip_raw_word_t; /** * cvmx_pip_sft_rst * * PIP_SFT_RST = PIP Soft Reset * * When written to a '1', resets the pip block * * Notes: * When RST is set to a '1' by SW, PIP will get a short reset pulse (3 cycles * in duration). Although this will reset much of PIP's internal state, some * CSRs will not reset. * * . PIP_BIST_STATUS * . PIP_STAT0_PRT* * . PIP_STAT1_PRT* * . PIP_STAT2_PRT* * . PIP_STAT3_PRT* * . PIP_STAT4_PRT* * . PIP_STAT5_PRT* * . PIP_STAT6_PRT* * . PIP_STAT7_PRT* * . PIP_STAT8_PRT* * . PIP_STAT9_PRT* * . PIP_STAT_INB_PKTS* * . PIP_STAT_INB_OCTS* * . PIP_STAT_INB_ERRS* * . PIP_TAG_INC* */ typedef union { uint64_t u64; struct cvmx_pip_sft_rst_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t rst : 1; /**< Soft Reset */ #else uint64_t rst : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_pip_sft_rst_s cn30xx; struct cvmx_pip_sft_rst_s cn31xx; struct cvmx_pip_sft_rst_s cn38xx; struct cvmx_pip_sft_rst_s cn50xx; struct cvmx_pip_sft_rst_s cn52xx; struct cvmx_pip_sft_rst_s cn52xxp1; struct cvmx_pip_sft_rst_s cn56xx; struct cvmx_pip_sft_rst_s cn56xxp1; struct cvmx_pip_sft_rst_s cn58xx; struct cvmx_pip_sft_rst_s cn58xxp1; } cvmx_pip_sft_rst_t; /** * cvmx_pip_stat0_prt# * * PIP Statistics Counters * * Note: special stat counter behavior * * 1) Read and write operations must arbitrate for the statistics resources * along with the packet engines which are incrementing the counters. * In order to not drop packet information, the packet HW is always a * higher priority and the CSR requests will only be satisified when * there are idle cycles. This can potentially cause long delays if the * system becomes full. * * 2) stat counters can be cleared in two ways. If PIP_STAT_CTL[RDCLR] is * set, then all read accesses will clear the register. In addition, * any write to a stats register will also reset the register to zero. * Please note that the clearing operations must obey rule \#1 above. * * 3) all counters are wrapping - software must ensure they are read periodically * PIP_STAT0_PRT = PIP_STAT_DRP_PKTS / PIP_STAT_DRP_OCTS */ typedef union { uint64_t u64; struct cvmx_pip_stat0_prtx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t drp_pkts : 32; /**< Inbound packets marked to be dropped by the IPD QOS widget per port */ uint64_t drp_octs : 32; /**< Inbound octets marked to be dropped by the IPD QOS widget per port */ #else uint64_t drp_octs : 32; uint64_t drp_pkts : 32; #endif } s; struct cvmx_pip_stat0_prtx_s cn30xx; struct cvmx_pip_stat0_prtx_s cn31xx; struct cvmx_pip_stat0_prtx_s cn38xx; struct cvmx_pip_stat0_prtx_s cn38xxp2; struct cvmx_pip_stat0_prtx_s cn50xx; struct cvmx_pip_stat0_prtx_s cn52xx; struct cvmx_pip_stat0_prtx_s cn52xxp1; struct cvmx_pip_stat0_prtx_s cn56xx; struct cvmx_pip_stat0_prtx_s cn56xxp1; struct cvmx_pip_stat0_prtx_s cn58xx; struct cvmx_pip_stat0_prtx_s cn58xxp1; } cvmx_pip_stat0_prtx_t; /** * cvmx_pip_stat1_prt# * * PIP_STAT1_PRTX = PIP_STAT_OCTS * */ typedef union { uint64_t u64; struct cvmx_pip_stat1_prtx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t octs : 48; /**< Number of octets received by PIP (good and bad) */ #else uint64_t octs : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_pip_stat1_prtx_s cn30xx; struct cvmx_pip_stat1_prtx_s cn31xx; struct cvmx_pip_stat1_prtx_s cn38xx; struct cvmx_pip_stat1_prtx_s cn38xxp2; struct cvmx_pip_stat1_prtx_s cn50xx; struct cvmx_pip_stat1_prtx_s cn52xx; struct cvmx_pip_stat1_prtx_s cn52xxp1; struct cvmx_pip_stat1_prtx_s cn56xx; struct cvmx_pip_stat1_prtx_s cn56xxp1; struct cvmx_pip_stat1_prtx_s cn58xx; struct cvmx_pip_stat1_prtx_s cn58xxp1; } cvmx_pip_stat1_prtx_t; /** * cvmx_pip_stat2_prt# * * PIP_STAT2_PRTX = PIP_STAT_PKTS / PIP_STAT_RAW * */ typedef union { uint64_t u64; struct cvmx_pip_stat2_prtx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t pkts : 32; /**< Number of packets processed by PIP */ uint64_t raw : 32; /**< RAWFULL + RAWSCH Packets without an L1/L2 error received by PIP per port */ #else uint64_t raw : 32; uint64_t pkts : 32; #endif } s; struct cvmx_pip_stat2_prtx_s cn30xx; struct cvmx_pip_stat2_prtx_s cn31xx; struct cvmx_pip_stat2_prtx_s cn38xx; struct cvmx_pip_stat2_prtx_s cn38xxp2; struct cvmx_pip_stat2_prtx_s cn50xx; struct cvmx_pip_stat2_prtx_s cn52xx; struct cvmx_pip_stat2_prtx_s cn52xxp1; struct cvmx_pip_stat2_prtx_s cn56xx; struct cvmx_pip_stat2_prtx_s cn56xxp1; struct cvmx_pip_stat2_prtx_s cn58xx; struct cvmx_pip_stat2_prtx_s cn58xxp1; } cvmx_pip_stat2_prtx_t; /** * cvmx_pip_stat3_prt# * * PIP_STAT3_PRTX = PIP_STAT_BCST / PIP_STAT_MCST * */ typedef union { uint64_t u64; struct cvmx_pip_stat3_prtx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bcst : 32; /**< Number of indentified L2 broadcast packets Does not include multicast packets Only includes packets whose parse mode is SKIP_TO_L2. */ uint64_t mcst : 32; /**< Number of indentified L2 multicast packets Does not include broadcast packets Only includes packets whose parse mode is SKIP_TO_L2. */ #else uint64_t mcst : 32; uint64_t bcst : 32; #endif } s; struct cvmx_pip_stat3_prtx_s cn30xx; struct cvmx_pip_stat3_prtx_s cn31xx; struct cvmx_pip_stat3_prtx_s cn38xx; struct cvmx_pip_stat3_prtx_s cn38xxp2; struct cvmx_pip_stat3_prtx_s cn50xx; struct cvmx_pip_stat3_prtx_s cn52xx; struct cvmx_pip_stat3_prtx_s cn52xxp1; struct cvmx_pip_stat3_prtx_s cn56xx; struct cvmx_pip_stat3_prtx_s cn56xxp1; struct cvmx_pip_stat3_prtx_s cn58xx; struct cvmx_pip_stat3_prtx_s cn58xxp1; } cvmx_pip_stat3_prtx_t; /** * cvmx_pip_stat4_prt# * * PIP_STAT4_PRTX = PIP_STAT_HIST1 / PIP_STAT_HIST0 * */ typedef union { uint64_t u64; struct cvmx_pip_stat4_prtx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t h65to127 : 32; /**< Number of 65-127B packets */ uint64_t h64 : 32; /**< Number of 1-64B packets */ #else uint64_t h64 : 32; uint64_t h65to127 : 32; #endif } s; struct cvmx_pip_stat4_prtx_s cn30xx; struct cvmx_pip_stat4_prtx_s cn31xx; struct cvmx_pip_stat4_prtx_s cn38xx; struct cvmx_pip_stat4_prtx_s cn38xxp2; struct cvmx_pip_stat4_prtx_s cn50xx; struct cvmx_pip_stat4_prtx_s cn52xx; struct cvmx_pip_stat4_prtx_s cn52xxp1; struct cvmx_pip_stat4_prtx_s cn56xx; struct cvmx_pip_stat4_prtx_s cn56xxp1; struct cvmx_pip_stat4_prtx_s cn58xx; struct cvmx_pip_stat4_prtx_s cn58xxp1; } cvmx_pip_stat4_prtx_t; /** * cvmx_pip_stat5_prt# * * PIP_STAT5_PRTX = PIP_STAT_HIST3 / PIP_STAT_HIST2 * */ typedef union { uint64_t u64; struct cvmx_pip_stat5_prtx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t h256to511 : 32; /**< Number of 256-511B packets */ uint64_t h128to255 : 32; /**< Number of 128-255B packets */ #else uint64_t h128to255 : 32; uint64_t h256to511 : 32; #endif } s; struct cvmx_pip_stat5_prtx_s cn30xx; struct cvmx_pip_stat5_prtx_s cn31xx; struct cvmx_pip_stat5_prtx_s cn38xx; struct cvmx_pip_stat5_prtx_s cn38xxp2; struct cvmx_pip_stat5_prtx_s cn50xx; struct cvmx_pip_stat5_prtx_s cn52xx; struct cvmx_pip_stat5_prtx_s cn52xxp1; struct cvmx_pip_stat5_prtx_s cn56xx; struct cvmx_pip_stat5_prtx_s cn56xxp1; struct cvmx_pip_stat5_prtx_s cn58xx; struct cvmx_pip_stat5_prtx_s cn58xxp1; } cvmx_pip_stat5_prtx_t; /** * cvmx_pip_stat6_prt# * * PIP_STAT6_PRTX = PIP_STAT_HIST5 / PIP_STAT_HIST4 * */ typedef union { uint64_t u64; struct cvmx_pip_stat6_prtx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t h1024to1518 : 32; /**< Number of 1024-1518B packets */ uint64_t h512to1023 : 32; /**< Number of 512-1023B packets */ #else uint64_t h512to1023 : 32; uint64_t h1024to1518 : 32; #endif } s; struct cvmx_pip_stat6_prtx_s cn30xx; struct cvmx_pip_stat6_prtx_s cn31xx; struct cvmx_pip_stat6_prtx_s cn38xx; struct cvmx_pip_stat6_prtx_s cn38xxp2; struct cvmx_pip_stat6_prtx_s cn50xx; struct cvmx_pip_stat6_prtx_s cn52xx; struct cvmx_pip_stat6_prtx_s cn52xxp1; struct cvmx_pip_stat6_prtx_s cn56xx; struct cvmx_pip_stat6_prtx_s cn56xxp1; struct cvmx_pip_stat6_prtx_s cn58xx; struct cvmx_pip_stat6_prtx_s cn58xxp1; } cvmx_pip_stat6_prtx_t; /** * cvmx_pip_stat7_prt# * * PIP_STAT7_PRTX = PIP_STAT_FCS / PIP_STAT_HIST6 * * * Notes: * FCS is not checked on the PCI ports 32..35. * */ typedef union { uint64_t u64; struct cvmx_pip_stat7_prtx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t fcs : 32; /**< Number of packets with FCS or Align opcode errors */ uint64_t h1519 : 32; /**< Number of 1519-max packets */ #else uint64_t h1519 : 32; uint64_t fcs : 32; #endif } s; struct cvmx_pip_stat7_prtx_s cn30xx; struct cvmx_pip_stat7_prtx_s cn31xx; struct cvmx_pip_stat7_prtx_s cn38xx; struct cvmx_pip_stat7_prtx_s cn38xxp2; struct cvmx_pip_stat7_prtx_s cn50xx; struct cvmx_pip_stat7_prtx_s cn52xx; struct cvmx_pip_stat7_prtx_s cn52xxp1; struct cvmx_pip_stat7_prtx_s cn56xx; struct cvmx_pip_stat7_prtx_s cn56xxp1; struct cvmx_pip_stat7_prtx_s cn58xx; struct cvmx_pip_stat7_prtx_s cn58xxp1; } cvmx_pip_stat7_prtx_t; /** * cvmx_pip_stat8_prt# * * PIP_STAT8_PRTX = PIP_STAT_FRAG / PIP_STAT_UNDER * * * Notes: * FCS is not checked on the PCI ports 32..35. * */ typedef union { uint64_t u64; struct cvmx_pip_stat8_prtx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t frag : 32; /**< Number of packets with length < min and FCS error */ uint64_t undersz : 32; /**< Number of packets with length < min */ #else uint64_t undersz : 32; uint64_t frag : 32; #endif } s; struct cvmx_pip_stat8_prtx_s cn30xx; struct cvmx_pip_stat8_prtx_s cn31xx; struct cvmx_pip_stat8_prtx_s cn38xx; struct cvmx_pip_stat8_prtx_s cn38xxp2; struct cvmx_pip_stat8_prtx_s cn50xx; struct cvmx_pip_stat8_prtx_s cn52xx; struct cvmx_pip_stat8_prtx_s cn52xxp1; struct cvmx_pip_stat8_prtx_s cn56xx; struct cvmx_pip_stat8_prtx_s cn56xxp1; struct cvmx_pip_stat8_prtx_s cn58xx; struct cvmx_pip_stat8_prtx_s cn58xxp1; } cvmx_pip_stat8_prtx_t; /** * cvmx_pip_stat9_prt# * * PIP_STAT9_PRTX = PIP_STAT_JABBER / PIP_STAT_OVER * * * Notes: * FCS is not checked on the PCI ports 32..35. * */ typedef union { uint64_t u64; struct cvmx_pip_stat9_prtx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t jabber : 32; /**< Number of packets with length > max and FCS error */ uint64_t oversz : 32; /**< Number of packets with length > max */ #else uint64_t oversz : 32; uint64_t jabber : 32; #endif } s; struct cvmx_pip_stat9_prtx_s cn30xx; struct cvmx_pip_stat9_prtx_s cn31xx; struct cvmx_pip_stat9_prtx_s cn38xx; struct cvmx_pip_stat9_prtx_s cn38xxp2; struct cvmx_pip_stat9_prtx_s cn50xx; struct cvmx_pip_stat9_prtx_s cn52xx; struct cvmx_pip_stat9_prtx_s cn52xxp1; struct cvmx_pip_stat9_prtx_s cn56xx; struct cvmx_pip_stat9_prtx_s cn56xxp1; struct cvmx_pip_stat9_prtx_s cn58xx; struct cvmx_pip_stat9_prtx_s cn58xxp1; } cvmx_pip_stat9_prtx_t; /** * cvmx_pip_stat_ctl * * PIP_STAT_CTL = PIP's Stat Control Register * * Controls how the PIP statistics counters are handled. */ typedef union { uint64_t u64; struct cvmx_pip_stat_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t rdclr : 1; /**< Stat registers are read and clear 0 = stat registers hold value when read 1 = stat registers are cleared when read */ #else uint64_t rdclr : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_pip_stat_ctl_s cn30xx; struct cvmx_pip_stat_ctl_s cn31xx; struct cvmx_pip_stat_ctl_s cn38xx; struct cvmx_pip_stat_ctl_s cn38xxp2; struct cvmx_pip_stat_ctl_s cn50xx; struct cvmx_pip_stat_ctl_s cn52xx; struct cvmx_pip_stat_ctl_s cn52xxp1; struct cvmx_pip_stat_ctl_s cn56xx; struct cvmx_pip_stat_ctl_s cn56xxp1; struct cvmx_pip_stat_ctl_s cn58xx; struct cvmx_pip_stat_ctl_s cn58xxp1; } cvmx_pip_stat_ctl_t; /** * cvmx_pip_stat_inb_errs# * * PIP_STAT_INB_ERRSX = Inbound error packets received by PIP per port * * Inbound stats collect all data sent to PIP from all packet interfaces. * Its the raw counts of everything that comes into the block. The counts * will reflect all error packets and packets dropped by the PKI RED engine. * These counts are intended for system debug, but could convey useful * information in production systems. */ typedef union { uint64_t u64; struct cvmx_pip_stat_inb_errsx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t errs : 16; /**< Number of packets with GMX/SPX/PCI errors received by PIP */ #else uint64_t errs : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pip_stat_inb_errsx_s cn30xx; struct cvmx_pip_stat_inb_errsx_s cn31xx; struct cvmx_pip_stat_inb_errsx_s cn38xx; struct cvmx_pip_stat_inb_errsx_s cn38xxp2; struct cvmx_pip_stat_inb_errsx_s cn50xx; struct cvmx_pip_stat_inb_errsx_s cn52xx; struct cvmx_pip_stat_inb_errsx_s cn52xxp1; struct cvmx_pip_stat_inb_errsx_s cn56xx; struct cvmx_pip_stat_inb_errsx_s cn56xxp1; struct cvmx_pip_stat_inb_errsx_s cn58xx; struct cvmx_pip_stat_inb_errsx_s cn58xxp1; } cvmx_pip_stat_inb_errsx_t; /** * cvmx_pip_stat_inb_octs# * * PIP_STAT_INB_OCTSX = Inbound octets received by PIP per port * * Inbound stats collect all data sent to PIP from all packet interfaces. * Its the raw counts of everything that comes into the block. The counts * will reflect all error packets and packets dropped by the PKI RED engine. * These counts are intended for system debug, but could convey useful * information in production systems. */ typedef union { uint64_t u64; struct cvmx_pip_stat_inb_octsx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t octs : 48; /**< Total number of octets from all packets received by PIP */ #else uint64_t octs : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_pip_stat_inb_octsx_s cn30xx; struct cvmx_pip_stat_inb_octsx_s cn31xx; struct cvmx_pip_stat_inb_octsx_s cn38xx; struct cvmx_pip_stat_inb_octsx_s cn38xxp2; struct cvmx_pip_stat_inb_octsx_s cn50xx; struct cvmx_pip_stat_inb_octsx_s cn52xx; struct cvmx_pip_stat_inb_octsx_s cn52xxp1; struct cvmx_pip_stat_inb_octsx_s cn56xx; struct cvmx_pip_stat_inb_octsx_s cn56xxp1; struct cvmx_pip_stat_inb_octsx_s cn58xx; struct cvmx_pip_stat_inb_octsx_s cn58xxp1; } cvmx_pip_stat_inb_octsx_t; /** * cvmx_pip_stat_inb_pkts# * * PIP_STAT_INB_PKTSX = Inbound packets received by PIP per port * * Inbound stats collect all data sent to PIP from all packet interfaces. * Its the raw counts of everything that comes into the block. The counts * will reflect all error packets and packets dropped by the PKI RED engine. * These counts are intended for system debug, but could convey useful * information in production systems. */ typedef union { uint64_t u64; struct cvmx_pip_stat_inb_pktsx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t pkts : 32; /**< Number of packets without GMX/SPX/PCI errors received by PIP */ #else uint64_t pkts : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pip_stat_inb_pktsx_s cn30xx; struct cvmx_pip_stat_inb_pktsx_s cn31xx; struct cvmx_pip_stat_inb_pktsx_s cn38xx; struct cvmx_pip_stat_inb_pktsx_s cn38xxp2; struct cvmx_pip_stat_inb_pktsx_s cn50xx; struct cvmx_pip_stat_inb_pktsx_s cn52xx; struct cvmx_pip_stat_inb_pktsx_s cn52xxp1; struct cvmx_pip_stat_inb_pktsx_s cn56xx; struct cvmx_pip_stat_inb_pktsx_s cn56xxp1; struct cvmx_pip_stat_inb_pktsx_s cn58xx; struct cvmx_pip_stat_inb_pktsx_s cn58xxp1; } cvmx_pip_stat_inb_pktsx_t; /** * cvmx_pip_tag_inc# * * PIP_TAG_INC = Which bytes to include in the new tag hash algorithm * * # $PIP_TAG_INCX = 0x300+X X=(0..63) RegType=(RSL) RtlReg=(pip_tag_inc_csr_direct_TestbuilderTask) */ typedef union { uint64_t u64; struct cvmx_pip_tag_incx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t en : 8; /**< Which bytes to include in mask tag algorithm Broken into 4, 16-entry masks to cover 128B PIP_PRT_CFG[TAG_INC] selects 1 of 4 to use registers 0-15 map to PIP_PRT_CFG[TAG_INC] == 0 registers 16-31 map to PIP_PRT_CFG[TAG_INC] == 1 registers 32-47 map to PIP_PRT_CFG[TAG_INC] == 2 registers 48-63 map to PIP_PRT_CFG[TAG_INC] == 3 [7] coresponds to the MSB of the 8B word [0] coresponds to the LSB of the 8B word (PASS2 only) */ #else uint64_t en : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_pip_tag_incx_s cn30xx; struct cvmx_pip_tag_incx_s cn31xx; struct cvmx_pip_tag_incx_s cn38xx; struct cvmx_pip_tag_incx_s cn38xxp2; struct cvmx_pip_tag_incx_s cn50xx; struct cvmx_pip_tag_incx_s cn52xx; struct cvmx_pip_tag_incx_s cn52xxp1; struct cvmx_pip_tag_incx_s cn56xx; struct cvmx_pip_tag_incx_s cn56xxp1; struct cvmx_pip_tag_incx_s cn58xx; struct cvmx_pip_tag_incx_s cn58xxp1; } cvmx_pip_tag_incx_t; /** * cvmx_pip_tag_mask * * PIP_TAG_MASK = Mask bit in the tag generation * */ typedef union { uint64_t u64; struct cvmx_pip_tag_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t mask : 16; /**< When set, MASK clears individual bits of lower 16 bits of the computed tag. Does not effect RAW or INSTR HDR packets. */ #else uint64_t mask : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pip_tag_mask_s cn30xx; struct cvmx_pip_tag_mask_s cn31xx; struct cvmx_pip_tag_mask_s cn38xx; struct cvmx_pip_tag_mask_s cn38xxp2; struct cvmx_pip_tag_mask_s cn50xx; struct cvmx_pip_tag_mask_s cn52xx; struct cvmx_pip_tag_mask_s cn52xxp1; struct cvmx_pip_tag_mask_s cn56xx; struct cvmx_pip_tag_mask_s cn56xxp1; struct cvmx_pip_tag_mask_s cn58xx; struct cvmx_pip_tag_mask_s cn58xxp1; } cvmx_pip_tag_mask_t; /** * cvmx_pip_tag_secret * * PIP_TAG_SECRET = Initial value in tag generation * * The source and destination IV's provide a mechanism for each Octeon to be unique. */ typedef union { uint64_t u64; struct cvmx_pip_tag_secret_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t dst : 16; /**< Secret for the destination tuple tag CRC calc */ uint64_t src : 16; /**< Secret for the source tuple tag CRC calc */ #else uint64_t src : 16; uint64_t dst : 16; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pip_tag_secret_s cn30xx; struct cvmx_pip_tag_secret_s cn31xx; struct cvmx_pip_tag_secret_s cn38xx; struct cvmx_pip_tag_secret_s cn38xxp2; struct cvmx_pip_tag_secret_s cn50xx; struct cvmx_pip_tag_secret_s cn52xx; struct cvmx_pip_tag_secret_s cn52xxp1; struct cvmx_pip_tag_secret_s cn56xx; struct cvmx_pip_tag_secret_s cn56xxp1; struct cvmx_pip_tag_secret_s cn58xx; struct cvmx_pip_tag_secret_s cn58xxp1; } cvmx_pip_tag_secret_t; /** * cvmx_pip_todo_entry * * PIP_TODO_ENTRY = Head entry of the Todo list (debug only) * * Summary of the current packet that has completed and waiting to be processed */ typedef union { uint64_t u64; struct cvmx_pip_todo_entry_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t val : 1; /**< Entry is valid */ uint64_t reserved_62_62 : 1; uint64_t entry : 62; /**< Todo list entry summary */ #else uint64_t entry : 62; uint64_t reserved_62_62 : 1; uint64_t val : 1; #endif } s; struct cvmx_pip_todo_entry_s cn30xx; struct cvmx_pip_todo_entry_s cn31xx; struct cvmx_pip_todo_entry_s cn38xx; struct cvmx_pip_todo_entry_s cn38xxp2; struct cvmx_pip_todo_entry_s cn50xx; struct cvmx_pip_todo_entry_s cn52xx; struct cvmx_pip_todo_entry_s cn52xxp1; struct cvmx_pip_todo_entry_s cn56xx; struct cvmx_pip_todo_entry_s cn56xxp1; struct cvmx_pip_todo_entry_s cn58xx; struct cvmx_pip_todo_entry_s cn58xxp1; } cvmx_pip_todo_entry_t; /** * cvmx_pko_mem_count0 * * Notes: * Total number of packets seen by PKO, per port * A write to this address will clear the entry whose index is specified as COUNT[5:0]. * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. A read of any entry that has not been * previously written is illegal and will result in unpredictable CSR read data. */ typedef union { uint64_t u64; struct cvmx_pko_mem_count0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t count : 32; /**< Total number of packets seen by PKO */ #else uint64_t count : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pko_mem_count0_s cn30xx; struct cvmx_pko_mem_count0_s cn31xx; struct cvmx_pko_mem_count0_s cn38xx; struct cvmx_pko_mem_count0_s cn38xxp2; struct cvmx_pko_mem_count0_s cn50xx; struct cvmx_pko_mem_count0_s cn52xx; struct cvmx_pko_mem_count0_s cn52xxp1; struct cvmx_pko_mem_count0_s cn56xx; struct cvmx_pko_mem_count0_s cn56xxp1; struct cvmx_pko_mem_count0_s cn58xx; struct cvmx_pko_mem_count0_s cn58xxp1; } cvmx_pko_mem_count0_t; /** * cvmx_pko_mem_count1 * * Notes: * Total number of bytes seen by PKO, per port * A write to this address will clear the entry whose index is specified as COUNT[5:0]. * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. A read of any entry that has not been * previously written is illegal and will result in unpredictable CSR read data. */ typedef union { uint64_t u64; struct cvmx_pko_mem_count1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t count : 48; /**< Total number of bytes seen by PKO */ #else uint64_t count : 48; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_pko_mem_count1_s cn30xx; struct cvmx_pko_mem_count1_s cn31xx; struct cvmx_pko_mem_count1_s cn38xx; struct cvmx_pko_mem_count1_s cn38xxp2; struct cvmx_pko_mem_count1_s cn50xx; struct cvmx_pko_mem_count1_s cn52xx; struct cvmx_pko_mem_count1_s cn52xxp1; struct cvmx_pko_mem_count1_s cn56xx; struct cvmx_pko_mem_count1_s cn56xxp1; struct cvmx_pko_mem_count1_s cn58xx; struct cvmx_pko_mem_count1_s cn58xxp1; } cvmx_pko_mem_count1_t; /** * cvmx_pko_mem_debug0 * * Notes: * Internal per-port state intended for debug use only - pko_prt_psb.cmnd[63:0] * This CSR is a memory of 10 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t fau : 28; /**< Fetch and add command words */ uint64_t cmd : 14; /**< Command word */ uint64_t segs : 6; /**< Number of segments/gather size */ uint64_t size : 16; /**< Packet length in bytes */ #else uint64_t size : 16; uint64_t segs : 6; uint64_t cmd : 14; uint64_t fau : 28; #endif } s; struct cvmx_pko_mem_debug0_s cn30xx; struct cvmx_pko_mem_debug0_s cn31xx; struct cvmx_pko_mem_debug0_s cn38xx; struct cvmx_pko_mem_debug0_s cn38xxp2; struct cvmx_pko_mem_debug0_s cn50xx; struct cvmx_pko_mem_debug0_s cn52xx; struct cvmx_pko_mem_debug0_s cn52xxp1; struct cvmx_pko_mem_debug0_s cn56xx; struct cvmx_pko_mem_debug0_s cn56xxp1; struct cvmx_pko_mem_debug0_s cn58xx; struct cvmx_pko_mem_debug0_s cn58xxp1; } cvmx_pko_mem_debug0_t; /** * cvmx_pko_mem_debug1 * * Notes: * Internal per-port state intended for debug use only - pko_prt_psb.curr[63:0] * This CSR is a memory of 10 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t i : 1; /**< "I" value used for free operation */ uint64_t back : 4; /**< Back value used for free operation */ uint64_t pool : 3; /**< Pool value used for free operation */ uint64_t size : 16; /**< Size in bytes */ uint64_t ptr : 40; /**< Data pointer */ #else uint64_t ptr : 40; uint64_t size : 16; uint64_t pool : 3; uint64_t back : 4; uint64_t i : 1; #endif } s; struct cvmx_pko_mem_debug1_s cn30xx; struct cvmx_pko_mem_debug1_s cn31xx; struct cvmx_pko_mem_debug1_s cn38xx; struct cvmx_pko_mem_debug1_s cn38xxp2; struct cvmx_pko_mem_debug1_s cn50xx; struct cvmx_pko_mem_debug1_s cn52xx; struct cvmx_pko_mem_debug1_s cn52xxp1; struct cvmx_pko_mem_debug1_s cn56xx; struct cvmx_pko_mem_debug1_s cn56xxp1; struct cvmx_pko_mem_debug1_s cn58xx; struct cvmx_pko_mem_debug1_s cn58xxp1; } cvmx_pko_mem_debug1_t; /** * cvmx_pko_mem_debug10 * * Notes: * Internal per-port state intended for debug use only - pko.dat.ptr.ptrs1, pko.dat.ptr.ptrs2 * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug10_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_pko_mem_debug10_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t fau : 28; /**< Fetch and add command words */ uint64_t cmd : 14; /**< Command word */ uint64_t segs : 6; /**< Number of segments/gather size */ uint64_t size : 16; /**< Packet length in bytes */ #else uint64_t size : 16; uint64_t segs : 6; uint64_t cmd : 14; uint64_t fau : 28; #endif } cn30xx; struct cvmx_pko_mem_debug10_cn30xx cn31xx; struct cvmx_pko_mem_debug10_cn30xx cn38xx; struct cvmx_pko_mem_debug10_cn30xx cn38xxp2; struct cvmx_pko_mem_debug10_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t ptrs1 : 17; /**< Internal state */ uint64_t reserved_17_31 : 15; uint64_t ptrs2 : 17; /**< Internal state */ #else uint64_t ptrs2 : 17; uint64_t reserved_17_31 : 15; uint64_t ptrs1 : 17; uint64_t reserved_49_63 : 15; #endif } cn50xx; struct cvmx_pko_mem_debug10_cn50xx cn52xx; struct cvmx_pko_mem_debug10_cn50xx cn52xxp1; struct cvmx_pko_mem_debug10_cn50xx cn56xx; struct cvmx_pko_mem_debug10_cn50xx cn56xxp1; struct cvmx_pko_mem_debug10_cn50xx cn58xx; struct cvmx_pko_mem_debug10_cn50xx cn58xxp1; } cvmx_pko_mem_debug10_t; /** * cvmx_pko_mem_debug11 * * Notes: * Internal per-port state intended for debug use only - pko.out.sta.state[22:0] * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug11_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t i : 1; /**< "I" value used for free operation */ uint64_t back : 4; /**< Back value used for free operation */ uint64_t pool : 3; /**< Pool value used for free operation */ uint64_t size : 16; /**< Size in bytes */ uint64_t reserved_0_39 : 40; #else uint64_t reserved_0_39 : 40; uint64_t size : 16; uint64_t pool : 3; uint64_t back : 4; uint64_t i : 1; #endif } s; struct cvmx_pko_mem_debug11_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t i : 1; /**< "I" value used for free operation */ uint64_t back : 4; /**< Back value used for free operation */ uint64_t pool : 3; /**< Pool value used for free operation */ uint64_t size : 16; /**< Size in bytes */ uint64_t ptr : 40; /**< Data pointer */ #else uint64_t ptr : 40; uint64_t size : 16; uint64_t pool : 3; uint64_t back : 4; uint64_t i : 1; #endif } cn30xx; struct cvmx_pko_mem_debug11_cn30xx cn31xx; struct cvmx_pko_mem_debug11_cn30xx cn38xx; struct cvmx_pko_mem_debug11_cn30xx cn38xxp2; struct cvmx_pko_mem_debug11_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t maj : 1; /**< Internal state */ uint64_t uid : 3; /**< Internal state */ uint64_t sop : 1; /**< Internal state */ uint64_t len : 1; /**< Internal state */ uint64_t chk : 1; /**< Internal state */ uint64_t cnt : 13; /**< Internal state */ uint64_t mod : 3; /**< Internal state */ #else uint64_t mod : 3; uint64_t cnt : 13; uint64_t chk : 1; uint64_t len : 1; uint64_t sop : 1; uint64_t uid : 3; uint64_t maj : 1; uint64_t reserved_23_63 : 41; #endif } cn50xx; struct cvmx_pko_mem_debug11_cn50xx cn52xx; struct cvmx_pko_mem_debug11_cn50xx cn52xxp1; struct cvmx_pko_mem_debug11_cn50xx cn56xx; struct cvmx_pko_mem_debug11_cn50xx cn56xxp1; struct cvmx_pko_mem_debug11_cn50xx cn58xx; struct cvmx_pko_mem_debug11_cn50xx cn58xxp1; } cvmx_pko_mem_debug11_t; /** * cvmx_pko_mem_debug12 * * Notes: * Internal per-port state intended for debug use only - pko.out.ctl.cmnd[63:0] * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug12_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_pko_mem_debug12_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< WorkQ data or Store0 pointer */ #else uint64_t data : 64; #endif } cn30xx; struct cvmx_pko_mem_debug12_cn30xx cn31xx; struct cvmx_pko_mem_debug12_cn30xx cn38xx; struct cvmx_pko_mem_debug12_cn30xx cn38xxp2; struct cvmx_pko_mem_debug12_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t fau : 28; /**< Fetch and add command words */ uint64_t cmd : 14; /**< Command word */ uint64_t segs : 6; /**< Number of segments/gather size */ uint64_t size : 16; /**< Packet length in bytes */ #else uint64_t size : 16; uint64_t segs : 6; uint64_t cmd : 14; uint64_t fau : 28; #endif } cn50xx; struct cvmx_pko_mem_debug12_cn50xx cn52xx; struct cvmx_pko_mem_debug12_cn50xx cn52xxp1; struct cvmx_pko_mem_debug12_cn50xx cn56xx; struct cvmx_pko_mem_debug12_cn50xx cn56xxp1; struct cvmx_pko_mem_debug12_cn50xx cn58xx; struct cvmx_pko_mem_debug12_cn50xx cn58xxp1; } cvmx_pko_mem_debug12_t; /** * cvmx_pko_mem_debug13 * * Notes: * Internal per-port state intended for debug use only - pko.out.ctl.head[63:0] * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug13_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t i : 1; /**< "I" value used for free operation */ uint64_t back : 4; /**< Back value used for free operation */ uint64_t pool : 3; /**< Pool value used for free operation */ uint64_t reserved_0_55 : 56; #else uint64_t reserved_0_55 : 56; uint64_t pool : 3; uint64_t back : 4; uint64_t i : 1; #endif } s; struct cvmx_pko_mem_debug13_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_51_63 : 13; uint64_t widx : 17; /**< PDB widx */ uint64_t ridx2 : 17; /**< PDB ridx2 */ uint64_t widx2 : 17; /**< PDB widx2 */ #else uint64_t widx2 : 17; uint64_t ridx2 : 17; uint64_t widx : 17; uint64_t reserved_51_63 : 13; #endif } cn30xx; struct cvmx_pko_mem_debug13_cn30xx cn31xx; struct cvmx_pko_mem_debug13_cn30xx cn38xx; struct cvmx_pko_mem_debug13_cn30xx cn38xxp2; struct cvmx_pko_mem_debug13_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t i : 1; /**< "I" value used for free operation */ uint64_t back : 4; /**< Back value used for free operation */ uint64_t pool : 3; /**< Pool value used for free operation */ uint64_t size : 16; /**< Size in bytes */ uint64_t ptr : 40; /**< Data pointer */ #else uint64_t ptr : 40; uint64_t size : 16; uint64_t pool : 3; uint64_t back : 4; uint64_t i : 1; #endif } cn50xx; struct cvmx_pko_mem_debug13_cn50xx cn52xx; struct cvmx_pko_mem_debug13_cn50xx cn52xxp1; struct cvmx_pko_mem_debug13_cn50xx cn56xx; struct cvmx_pko_mem_debug13_cn50xx cn56xxp1; struct cvmx_pko_mem_debug13_cn50xx cn58xx; struct cvmx_pko_mem_debug13_cn50xx cn58xxp1; } cvmx_pko_mem_debug13_t; /** * cvmx_pko_mem_debug14 * * Notes: * Internal per-port state intended for debug use only - pko.prt.psb.save[63:0] * This CSR is a memory of 120 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug14_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_pko_mem_debug14_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t ridx : 17; /**< PDB ridx */ #else uint64_t ridx : 17; uint64_t reserved_17_63 : 47; #endif } cn30xx; struct cvmx_pko_mem_debug14_cn30xx cn31xx; struct cvmx_pko_mem_debug14_cn30xx cn38xx; struct cvmx_pko_mem_debug14_cn30xx cn38xxp2; struct cvmx_pko_mem_debug14_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< Command words */ #else uint64_t data : 64; #endif } cn52xx; struct cvmx_pko_mem_debug14_cn52xx cn52xxp1; struct cvmx_pko_mem_debug14_cn52xx cn56xx; struct cvmx_pko_mem_debug14_cn52xx cn56xxp1; } cvmx_pko_mem_debug14_t; /** * cvmx_pko_mem_debug2 * * Notes: * Internal per-port state intended for debug use only - pko_prt_psb.head[63:0] * This CSR is a memory of 10 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t i : 1; /**< "I" value used for free operation */ uint64_t back : 4; /**< Back value used for free operation */ uint64_t pool : 3; /**< Pool value used for free operation */ uint64_t size : 16; /**< Size in bytes */ uint64_t ptr : 40; /**< Data pointer */ #else uint64_t ptr : 40; uint64_t size : 16; uint64_t pool : 3; uint64_t back : 4; uint64_t i : 1; #endif } s; struct cvmx_pko_mem_debug2_s cn30xx; struct cvmx_pko_mem_debug2_s cn31xx; struct cvmx_pko_mem_debug2_s cn38xx; struct cvmx_pko_mem_debug2_s cn38xxp2; struct cvmx_pko_mem_debug2_s cn50xx; struct cvmx_pko_mem_debug2_s cn52xx; struct cvmx_pko_mem_debug2_s cn52xxp1; struct cvmx_pko_mem_debug2_s cn56xx; struct cvmx_pko_mem_debug2_s cn56xxp1; struct cvmx_pko_mem_debug2_s cn58xx; struct cvmx_pko_mem_debug2_s cn58xxp1; } cvmx_pko_mem_debug2_t; /** * cvmx_pko_mem_debug3 * * Notes: * Internal per-port state intended for debug use only - pko_prt_psb.resp[63:0] * This CSR is a memory of 10 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_pko_mem_debug3_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t i : 1; /**< "I" value used for free operation */ uint64_t back : 4; /**< Back value used for free operation */ uint64_t pool : 3; /**< Pool value used for free operation */ uint64_t size : 16; /**< Size in bytes */ uint64_t ptr : 40; /**< Data pointer */ #else uint64_t ptr : 40; uint64_t size : 16; uint64_t pool : 3; uint64_t back : 4; uint64_t i : 1; #endif } cn30xx; struct cvmx_pko_mem_debug3_cn30xx cn31xx; struct cvmx_pko_mem_debug3_cn30xx cn38xx; struct cvmx_pko_mem_debug3_cn30xx cn38xxp2; struct cvmx_pko_mem_debug3_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< WorkQ data or Store0 pointer */ #else uint64_t data : 64; #endif } cn50xx; struct cvmx_pko_mem_debug3_cn50xx cn52xx; struct cvmx_pko_mem_debug3_cn50xx cn52xxp1; struct cvmx_pko_mem_debug3_cn50xx cn56xx; struct cvmx_pko_mem_debug3_cn50xx cn56xxp1; struct cvmx_pko_mem_debug3_cn50xx cn58xx; struct cvmx_pko_mem_debug3_cn50xx cn58xxp1; } cvmx_pko_mem_debug3_t; /** * cvmx_pko_mem_debug4 * * Notes: * Internal per-port state intended for debug use only - pko_prt_psb.state[63:0] * This CSR is a memory of 10 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_pko_mem_debug4_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< WorkQ data or Store0 pointer */ #else uint64_t data : 64; #endif } cn30xx; struct cvmx_pko_mem_debug4_cn30xx cn31xx; struct cvmx_pko_mem_debug4_cn30xx cn38xx; struct cvmx_pko_mem_debug4_cn30xx cn38xxp2; struct cvmx_pko_mem_debug4_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t cmnd_segs : 3; /**< Internal state */ uint64_t cmnd_siz : 16; /**< Internal state */ uint64_t cmnd_off : 6; /**< Internal state */ uint64_t uid : 3; /**< Internal state */ uint64_t dread_sop : 1; /**< Internal state */ uint64_t init_dwrite : 1; /**< Internal state */ uint64_t chk_once : 1; /**< Internal state */ uint64_t chk_mode : 1; /**< Internal state */ uint64_t active : 1; /**< Internal state */ uint64_t static_p : 1; /**< Internal state */ uint64_t qos : 3; /**< Internal state */ uint64_t qcb_ridx : 5; /**< Internal state */ uint64_t qid_off_max : 4; /**< Internal state */ uint64_t qid_off : 4; /**< Internal state */ uint64_t qid_base : 8; /**< Internal state */ uint64_t wait : 1; /**< Internal state */ uint64_t minor : 2; /**< Internal state */ uint64_t major : 3; /**< Internal state */ #else uint64_t major : 3; uint64_t minor : 2; uint64_t wait : 1; uint64_t qid_base : 8; uint64_t qid_off : 4; uint64_t qid_off_max : 4; uint64_t qcb_ridx : 5; uint64_t qos : 3; uint64_t static_p : 1; uint64_t active : 1; uint64_t chk_mode : 1; uint64_t chk_once : 1; uint64_t init_dwrite : 1; uint64_t dread_sop : 1; uint64_t uid : 3; uint64_t cmnd_off : 6; uint64_t cmnd_siz : 16; uint64_t cmnd_segs : 3; #endif } cn50xx; struct cvmx_pko_mem_debug4_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t curr_siz : 8; /**< Internal state */ uint64_t curr_off : 16; /**< Internal state */ uint64_t cmnd_segs : 6; /**< Internal state */ uint64_t cmnd_siz : 16; /**< Internal state */ uint64_t cmnd_off : 6; /**< Internal state */ uint64_t uid : 2; /**< Internal state */ uint64_t dread_sop : 1; /**< Internal state */ uint64_t init_dwrite : 1; /**< Internal state */ uint64_t chk_once : 1; /**< Internal state */ uint64_t chk_mode : 1; /**< Internal state */ uint64_t wait : 1; /**< Internal state */ uint64_t minor : 2; /**< Internal state */ uint64_t major : 3; /**< Internal state */ #else uint64_t major : 3; uint64_t minor : 2; uint64_t wait : 1; uint64_t chk_mode : 1; uint64_t chk_once : 1; uint64_t init_dwrite : 1; uint64_t dread_sop : 1; uint64_t uid : 2; uint64_t cmnd_off : 6; uint64_t cmnd_siz : 16; uint64_t cmnd_segs : 6; uint64_t curr_off : 16; uint64_t curr_siz : 8; #endif } cn52xx; struct cvmx_pko_mem_debug4_cn52xx cn52xxp1; struct cvmx_pko_mem_debug4_cn52xx cn56xx; struct cvmx_pko_mem_debug4_cn52xx cn56xxp1; struct cvmx_pko_mem_debug4_cn50xx cn58xx; struct cvmx_pko_mem_debug4_cn50xx cn58xxp1; } cvmx_pko_mem_debug4_t; /** * cvmx_pko_mem_debug5 * * Notes: * Internal per-port state intended for debug use only - pko_prt_psb.state[127:64] * This CSR is a memory of 10 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug5_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_pko_mem_debug5_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t dwri_mod : 1; /**< Dwrite mod */ uint64_t dwri_sop : 1; /**< Dwrite sop needed */ uint64_t dwri_len : 1; /**< Dwrite len */ uint64_t dwri_cnt : 13; /**< Dwrite count */ uint64_t cmnd_siz : 16; /**< Copy of cmnd.size */ uint64_t uid : 1; /**< UID */ uint64_t xfer_wor : 1; /**< Transfer work needed */ uint64_t xfer_dwr : 1; /**< Transfer dwrite needed */ uint64_t cbuf_fre : 1; /**< Cbuf needs free */ uint64_t reserved_27_27 : 1; uint64_t chk_mode : 1; /**< Checksum mode */ uint64_t active : 1; /**< Port is active */ uint64_t qos : 3; /**< Current QOS round */ uint64_t qcb_ridx : 5; /**< Buffer read index for QCB */ uint64_t qid_off : 3; /**< Offset to be added to QID_BASE for current queue */ uint64_t qid_base : 7; /**< Absolute QID of the queue array base = &QUEUES[0] */ uint64_t wait : 1; /**< State wait when set */ uint64_t minor : 2; /**< State minor code */ uint64_t major : 4; /**< State major code */ #else uint64_t major : 4; uint64_t minor : 2; uint64_t wait : 1; uint64_t qid_base : 7; uint64_t qid_off : 3; uint64_t qcb_ridx : 5; uint64_t qos : 3; uint64_t active : 1; uint64_t chk_mode : 1; uint64_t reserved_27_27 : 1; uint64_t cbuf_fre : 1; uint64_t xfer_dwr : 1; uint64_t xfer_wor : 1; uint64_t uid : 1; uint64_t cmnd_siz : 16; uint64_t dwri_cnt : 13; uint64_t dwri_len : 1; uint64_t dwri_sop : 1; uint64_t dwri_mod : 1; #endif } cn30xx; struct cvmx_pko_mem_debug5_cn30xx cn31xx; struct cvmx_pko_mem_debug5_cn30xx cn38xx; struct cvmx_pko_mem_debug5_cn30xx cn38xxp2; struct cvmx_pko_mem_debug5_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t curr_ptr : 29; /**< Internal state */ uint64_t curr_siz : 16; /**< Internal state */ uint64_t curr_off : 16; /**< Internal state */ uint64_t cmnd_segs : 3; /**< Internal state */ #else uint64_t cmnd_segs : 3; uint64_t curr_off : 16; uint64_t curr_siz : 16; uint64_t curr_ptr : 29; #endif } cn50xx; struct cvmx_pko_mem_debug5_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_54_63 : 10; uint64_t nxt_inflt : 6; /**< Internal state */ uint64_t curr_ptr : 40; /**< Internal state */ uint64_t curr_siz : 8; /**< Internal state */ #else uint64_t curr_siz : 8; uint64_t curr_ptr : 40; uint64_t nxt_inflt : 6; uint64_t reserved_54_63 : 10; #endif } cn52xx; struct cvmx_pko_mem_debug5_cn52xx cn52xxp1; struct cvmx_pko_mem_debug5_cn52xx cn56xx; struct cvmx_pko_mem_debug5_cn52xx cn56xxp1; struct cvmx_pko_mem_debug5_cn50xx cn58xx; struct cvmx_pko_mem_debug5_cn50xx cn58xxp1; } cvmx_pko_mem_debug5_t; /** * cvmx_pko_mem_debug6 * * Notes: * Internal per-port state intended for debug use only - pko_prt_psb.port[63:0] * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug6_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_37_63 : 27; uint64_t qid_offres : 4; /**< Internal state */ uint64_t qid_offths : 4; /**< Internal state */ uint64_t preempter : 1; /**< Internal state */ uint64_t preemptee : 1; /**< Internal state */ uint64_t preempted : 1; /**< Internal state */ uint64_t active : 1; /**< Internal state */ uint64_t statc : 1; /**< Internal state */ uint64_t qos : 3; /**< Internal state */ uint64_t qcb_ridx : 5; /**< Internal state */ uint64_t qid_offmax : 4; /**< Internal state */ uint64_t reserved_0_11 : 12; #else uint64_t reserved_0_11 : 12; uint64_t qid_offmax : 4; uint64_t qcb_ridx : 5; uint64_t qos : 3; uint64_t statc : 1; uint64_t active : 1; uint64_t preempted : 1; uint64_t preemptee : 1; uint64_t preempter : 1; uint64_t qid_offths : 4; uint64_t qid_offres : 4; uint64_t reserved_37_63 : 27; #endif } s; struct cvmx_pko_mem_debug6_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t qid_offm : 3; /**< Qid offset max */ uint64_t static_p : 1; /**< Static port when set */ uint64_t work_min : 3; /**< Work minor */ uint64_t dwri_chk : 1; /**< Dwrite checksum mode */ uint64_t dwri_uid : 1; /**< Dwrite UID */ uint64_t dwri_mod : 2; /**< Dwrite mod */ #else uint64_t dwri_mod : 2; uint64_t dwri_uid : 1; uint64_t dwri_chk : 1; uint64_t work_min : 3; uint64_t static_p : 1; uint64_t qid_offm : 3; uint64_t reserved_11_63 : 53; #endif } cn30xx; struct cvmx_pko_mem_debug6_cn30xx cn31xx; struct cvmx_pko_mem_debug6_cn30xx cn38xx; struct cvmx_pko_mem_debug6_cn30xx cn38xxp2; struct cvmx_pko_mem_debug6_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t curr_ptr : 11; /**< Internal state */ #else uint64_t curr_ptr : 11; uint64_t reserved_11_63 : 53; #endif } cn50xx; struct cvmx_pko_mem_debug6_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_37_63 : 27; uint64_t qid_offres : 4; /**< Internal state */ uint64_t qid_offths : 4; /**< Internal state */ uint64_t preempter : 1; /**< Internal state */ uint64_t preemptee : 1; /**< Internal state */ uint64_t preempted : 1; /**< Internal state */ uint64_t active : 1; /**< Internal state */ uint64_t statc : 1; /**< Internal state */ uint64_t qos : 3; /**< Internal state */ uint64_t qcb_ridx : 5; /**< Internal state */ uint64_t qid_offmax : 4; /**< Internal state */ uint64_t qid_off : 4; /**< Internal state */ uint64_t qid_base : 8; /**< Internal state */ #else uint64_t qid_base : 8; uint64_t qid_off : 4; uint64_t qid_offmax : 4; uint64_t qcb_ridx : 5; uint64_t qos : 3; uint64_t statc : 1; uint64_t active : 1; uint64_t preempted : 1; uint64_t preemptee : 1; uint64_t preempter : 1; uint64_t qid_offths : 4; uint64_t qid_offres : 4; uint64_t reserved_37_63 : 27; #endif } cn52xx; struct cvmx_pko_mem_debug6_cn52xx cn52xxp1; struct cvmx_pko_mem_debug6_cn52xx cn56xx; struct cvmx_pko_mem_debug6_cn52xx cn56xxp1; struct cvmx_pko_mem_debug6_cn50xx cn58xx; struct cvmx_pko_mem_debug6_cn50xx cn58xxp1; } cvmx_pko_mem_debug6_t; /** * cvmx_pko_mem_debug7 * * Notes: * Internal per-queue state intended for debug use only - pko_prt_qsb.state[63:0] * This CSR is a memory of 256 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug7_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t qos : 5; /**< QOS mask to enable the queue when set */ uint64_t tail : 1; /**< This queue is the last (tail) in the queue array */ uint64_t reserved_0_57 : 58; #else uint64_t reserved_0_57 : 58; uint64_t tail : 1; uint64_t qos : 5; #endif } s; struct cvmx_pko_mem_debug7_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_58_63 : 6; uint64_t dwb : 9; /**< Calculated DWB count used for free operation */ uint64_t start : 33; /**< Calculated start address used for free operation */ uint64_t size : 16; /**< Packet length in bytes */ #else uint64_t size : 16; uint64_t start : 33; uint64_t dwb : 9; uint64_t reserved_58_63 : 6; #endif } cn30xx; struct cvmx_pko_mem_debug7_cn30xx cn31xx; struct cvmx_pko_mem_debug7_cn30xx cn38xx; struct cvmx_pko_mem_debug7_cn30xx cn38xxp2; struct cvmx_pko_mem_debug7_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t qos : 5; /**< QOS mask to enable the queue when set */ uint64_t tail : 1; /**< This queue is the last (tail) in the queue array */ uint64_t buf_siz : 13; /**< Command buffer remaining size in words */ uint64_t buf_ptr : 33; /**< Command word pointer */ uint64_t qcb_widx : 6; /**< Buffer write index for QCB */ uint64_t qcb_ridx : 6; /**< Buffer read index for QCB */ #else uint64_t qcb_ridx : 6; uint64_t qcb_widx : 6; uint64_t buf_ptr : 33; uint64_t buf_siz : 13; uint64_t tail : 1; uint64_t qos : 5; #endif } cn50xx; struct cvmx_pko_mem_debug7_cn50xx cn52xx; struct cvmx_pko_mem_debug7_cn50xx cn52xxp1; struct cvmx_pko_mem_debug7_cn50xx cn56xx; struct cvmx_pko_mem_debug7_cn50xx cn56xxp1; struct cvmx_pko_mem_debug7_cn50xx cn58xx; struct cvmx_pko_mem_debug7_cn50xx cn58xxp1; } cvmx_pko_mem_debug7_t; /** * cvmx_pko_mem_debug8 * * Notes: * Internal per-queue state intended for debug use only - pko_prt_qsb.state[91:64] * This CSR is a memory of 256 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug8_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_59_63 : 5; uint64_t tail : 1; /**< This queue is the last (tail) in the queue array */ uint64_t buf_siz : 13; /**< Command buffer remaining size in words */ uint64_t reserved_0_44 : 45; #else uint64_t reserved_0_44 : 45; uint64_t buf_siz : 13; uint64_t tail : 1; uint64_t reserved_59_63 : 5; #endif } s; struct cvmx_pko_mem_debug8_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t qos : 5; /**< QOS mask to enable the queue when set */ uint64_t tail : 1; /**< This queue is the last (tail) in the queue array */ uint64_t buf_siz : 13; /**< Command buffer remaining size in words */ uint64_t buf_ptr : 33; /**< Command word pointer */ uint64_t qcb_widx : 6; /**< Buffer write index for QCB */ uint64_t qcb_ridx : 6; /**< Buffer read index for QCB */ #else uint64_t qcb_ridx : 6; uint64_t qcb_widx : 6; uint64_t buf_ptr : 33; uint64_t buf_siz : 13; uint64_t tail : 1; uint64_t qos : 5; #endif } cn30xx; struct cvmx_pko_mem_debug8_cn30xx cn31xx; struct cvmx_pko_mem_debug8_cn30xx cn38xx; struct cvmx_pko_mem_debug8_cn30xx cn38xxp2; struct cvmx_pko_mem_debug8_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t doorbell : 20; /**< Doorbell count */ uint64_t reserved_6_7 : 2; uint64_t static_p : 1; /**< Static priority */ uint64_t s_tail : 1; /**< Static tail */ uint64_t static_q : 1; /**< Static priority */ uint64_t qos : 3; /**< QOS mask to enable the queue when set */ #else uint64_t qos : 3; uint64_t static_q : 1; uint64_t s_tail : 1; uint64_t static_p : 1; uint64_t reserved_6_7 : 2; uint64_t doorbell : 20; uint64_t reserved_28_63 : 36; #endif } cn50xx; struct cvmx_pko_mem_debug8_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t preempter : 1; /**< Preempter */ uint64_t doorbell : 20; /**< Doorbell count */ uint64_t reserved_7_7 : 1; uint64_t preemptee : 1; /**< Preemptee */ uint64_t static_p : 1; /**< Static priority */ uint64_t s_tail : 1; /**< Static tail */ uint64_t static_q : 1; /**< Static priority */ uint64_t qos : 3; /**< QOS mask to enable the queue when set */ #else uint64_t qos : 3; uint64_t static_q : 1; uint64_t s_tail : 1; uint64_t static_p : 1; uint64_t preemptee : 1; uint64_t reserved_7_7 : 1; uint64_t doorbell : 20; uint64_t preempter : 1; uint64_t reserved_29_63 : 35; #endif } cn52xx; struct cvmx_pko_mem_debug8_cn52xx cn52xxp1; struct cvmx_pko_mem_debug8_cn52xx cn56xx; struct cvmx_pko_mem_debug8_cn52xx cn56xxp1; struct cvmx_pko_mem_debug8_cn50xx cn58xx; struct cvmx_pko_mem_debug8_cn50xx cn58xxp1; } cvmx_pko_mem_debug8_t; /** * cvmx_pko_mem_debug9 * * Notes: * Internal per-port state intended for debug use only - pko.dat.ptr.ptrs0, pko.dat.ptr.ptrs3 * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_pko_mem_debug9_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t ptrs0 : 17; /**< Internal state */ uint64_t reserved_0_31 : 32; #else uint64_t reserved_0_31 : 32; uint64_t ptrs0 : 17; uint64_t reserved_49_63 : 15; #endif } s; struct cvmx_pko_mem_debug9_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t doorbell : 20; /**< Doorbell count */ uint64_t reserved_5_7 : 3; uint64_t s_tail : 1; /**< reads as zero (S_TAIL cannot be read) */ uint64_t static_q : 1; /**< reads as zero (STATIC_Q cannot be read) */ uint64_t qos : 3; /**< QOS mask to enable the queue when set */ #else uint64_t qos : 3; uint64_t static_q : 1; uint64_t s_tail : 1; uint64_t reserved_5_7 : 3; uint64_t doorbell : 20; uint64_t reserved_28_63 : 36; #endif } cn30xx; struct cvmx_pko_mem_debug9_cn30xx cn31xx; struct cvmx_pko_mem_debug9_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t doorbell : 20; /**< Doorbell count */ uint64_t reserved_6_7 : 2; uint64_t static_p : 1; /**< Static priority (port) */ uint64_t s_tail : 1; /**< Static tail */ uint64_t static_q : 1; /**< Static priority */ uint64_t qos : 3; /**< QOS mask to enable the queue when set */ #else uint64_t qos : 3; uint64_t static_q : 1; uint64_t s_tail : 1; uint64_t static_p : 1; uint64_t reserved_6_7 : 2; uint64_t doorbell : 20; uint64_t reserved_28_63 : 36; #endif } cn38xx; struct cvmx_pko_mem_debug9_cn38xx cn38xxp2; struct cvmx_pko_mem_debug9_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_49_63 : 15; uint64_t ptrs0 : 17; /**< Internal state */ uint64_t reserved_17_31 : 15; uint64_t ptrs3 : 17; /**< Internal state */ #else uint64_t ptrs3 : 17; uint64_t reserved_17_31 : 15; uint64_t ptrs0 : 17; uint64_t reserved_49_63 : 15; #endif } cn50xx; struct cvmx_pko_mem_debug9_cn50xx cn52xx; struct cvmx_pko_mem_debug9_cn50xx cn52xxp1; struct cvmx_pko_mem_debug9_cn50xx cn56xx; struct cvmx_pko_mem_debug9_cn50xx cn56xxp1; struct cvmx_pko_mem_debug9_cn50xx cn58xx; struct cvmx_pko_mem_debug9_cn50xx cn58xxp1; } cvmx_pko_mem_debug9_t; /** * cvmx_pko_mem_port_ptrs * * Notes: * Sets the port to engine mapping, per port. Ports marked as static priority need not be contiguous, * but they must be the lowest numbered PIDs mapped to this EID and must have QOS_MASK=0xff. If EID==8 * or EID==9, then PID[1:0] is used to direct the packet to the correct port on that interface. * EID==15 can be used for unused PKO-internal ports. * BP_PORT==63 means that the PKO-internal port is not backpressured. * BP_PORTs are assumed to belong to an interface as follows: * 36 <= BP_PORT < 40 -> loopback interface * 32 <= BP_PORT < 36 -> PCIe interface * 0 <= BP_PORT < 16 -> SGMII/Xaui interface 0 * The reset configuration is the following: * PID EID(ext port) BP_PORT QOS_MASK STATIC_P * ------------------------------------------- * 0 0( 0) 0 0xff 0 * 1 1( 1) 1 0xff 0 * 2 2( 2) 2 0xff 0 * 3 3( 3) 3 0xff 0 * 4 0( 0) 4 0xff 0 * 5 1( 1) 5 0xff 0 * 6 2( 2) 6 0xff 0 * 7 3( 3) 7 0xff 0 * 8 0( 0) 8 0xff 0 * 9 1( 1) 9 0xff 0 * 10 2( 2) 10 0xff 0 * 11 3( 3) 11 0xff 0 * 12 0( 0) 12 0xff 0 * 13 1( 1) 13 0xff 0 * 14 2( 2) 14 0xff 0 * 15 3( 3) 15 0xff 0 * ------------------------------------------- * 16 0( 0) 0 0xff 0 * 17 1( 1) 1 0xff 0 * 18 2( 2) 2 0xff 0 * 19 3( 3) 3 0xff 0 * 20 0( 0) 4 0xff 0 * 21 1( 1) 5 0xff 0 * 22 2( 2) 6 0xff 0 * 23 3( 3) 7 0xff 0 * 24 0( 0) 8 0xff 0 * 25 1( 1) 9 0xff 0 * 26 2( 2) 10 0xff 0 * 27 3( 3) 11 0xff 0 * 28 0( 0) 12 0xff 0 * 29 1( 1) 13 0xff 0 * 30 2( 2) 14 0xff 0 * 31 3( 3) 15 0xff 0 * ------------------------------------------- * 32 8(32) 32 0xff 0 * 33 8(33) 33 0xff 0 * 34 8(34) 34 0xff 0 * 35 8(35) 35 0xff 0 * ------------------------------------------- * 36 9(36) 36 0xff 0 * 37 9(37) 37 0xff 0 * 38 9(38) 38 0xff 0 * 39 9(39) 39 0xff 0 * * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. A read of any entry that has not been * previously written is illegal and will result in unpredictable CSR read data. */ typedef union { uint64_t u64; struct cvmx_pko_mem_port_ptrs_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_62_63 : 2; uint64_t static_p : 1; /**< Set if this PID has static priority */ uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */ uint64_t reserved_16_52 : 37; uint64_t bp_port : 6; /**< PID listens to BP_PORT for per-packet backpressure Legal BP_PORTs: 0-39, 63 (63 means no BP) */ uint64_t eid : 4; /**< Engine ID to which this port is mapped Legal EIDs: 0-9, 15 (15 only if port not used) */ uint64_t pid : 6; /**< Port ID[5:0] */ #else uint64_t pid : 6; uint64_t eid : 4; uint64_t bp_port : 6; uint64_t reserved_16_52 : 37; uint64_t qos_mask : 8; uint64_t static_p : 1; uint64_t reserved_62_63 : 2; #endif } s; struct cvmx_pko_mem_port_ptrs_s cn52xx; struct cvmx_pko_mem_port_ptrs_s cn52xxp1; struct cvmx_pko_mem_port_ptrs_s cn56xx; struct cvmx_pko_mem_port_ptrs_s cn56xxp1; } cvmx_pko_mem_port_ptrs_t; /** * cvmx_pko_mem_port_qos * * Notes: * Sets the QOS mask, per port. These QOS_MASK bits are logically and physically the same QOS_MASK * bits in PKO_MEM_PORT_PTRS. This CSR address allows the QOS_MASK bits to be written during PKO * operation without affecting any other port state. The engine to which port PID is mapped is engine * EID. Note that the port to engine mapping must be the same as was previously programmed via the * PKO_MEM_PORT_PTRS CSR. * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. A read of any entry that has not been * previously written is illegal and will result in unpredictable CSR read data. */ typedef union { uint64_t u64; struct cvmx_pko_mem_port_qos_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_61_63 : 3; uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */ uint64_t reserved_10_52 : 43; uint64_t eid : 4; /**< Engine ID to which this port is mapped Legal EIDs: 0-9 */ uint64_t pid : 6; /**< Port ID[5:0] */ #else uint64_t pid : 6; uint64_t eid : 4; uint64_t reserved_10_52 : 43; uint64_t qos_mask : 8; uint64_t reserved_61_63 : 3; #endif } s; struct cvmx_pko_mem_port_qos_s cn52xx; struct cvmx_pko_mem_port_qos_s cn52xxp1; struct cvmx_pko_mem_port_qos_s cn56xx; struct cvmx_pko_mem_port_qos_s cn56xxp1; } cvmx_pko_mem_port_qos_t; /** * cvmx_pko_mem_port_rate0 * * Notes: * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. A read of any entry that has not been * previously written is illegal and will result in unpredictable CSR read data. */ typedef union { uint64_t u64; struct cvmx_pko_mem_port_rate0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_51_63 : 13; uint64_t rate_word : 19; /**< Rate limiting adder per 8 byte */ uint64_t rate_pkt : 24; /**< Rate limiting adder per packet */ uint64_t reserved_6_7 : 2; uint64_t pid : 6; /**< Port ID[5:0] */ #else uint64_t pid : 6; uint64_t reserved_6_7 : 2; uint64_t rate_pkt : 24; uint64_t rate_word : 19; uint64_t reserved_51_63 : 13; #endif } s; struct cvmx_pko_mem_port_rate0_s cn52xx; struct cvmx_pko_mem_port_rate0_s cn52xxp1; struct cvmx_pko_mem_port_rate0_s cn56xx; struct cvmx_pko_mem_port_rate0_s cn56xxp1; } cvmx_pko_mem_port_rate0_t; /** * cvmx_pko_mem_port_rate1 * * Notes: * Writing PKO_MEM_PORT_RATE1[PID,RATE_LIM] has the side effect of setting the corresponding * accumulator to zero. * This CSR is a memory of 40 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. A read of any entry that has not been * previously written is illegal and will result in unpredictable CSR read data. */ typedef union { uint64_t u64; struct cvmx_pko_mem_port_rate1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t rate_lim : 24; /**< Rate limiting accumulator limit */ uint64_t reserved_6_7 : 2; uint64_t pid : 6; /**< Port ID[5:0] */ #else uint64_t pid : 6; uint64_t reserved_6_7 : 2; uint64_t rate_lim : 24; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pko_mem_port_rate1_s cn52xx; struct cvmx_pko_mem_port_rate1_s cn52xxp1; struct cvmx_pko_mem_port_rate1_s cn56xx; struct cvmx_pko_mem_port_rate1_s cn56xxp1; } cvmx_pko_mem_port_rate1_t; /** * cvmx_pko_mem_queue_ptrs * * Notes: * Sets the queue to port mapping and the initial command buffer pointer, per queue * Each queue may map to at most one port. No more than 16 queues may map to a port. The set of * queues that is mapped to a port must be a contiguous array of queues. The port to which queue QID * is mapped is port PID. The index of queue QID in port PID's queue list is IDX. The last queue in * port PID's queue array must have its TAIL bit set. Unused queues must be mapped to port 63. * STATIC_Q marks queue QID as having static priority. STATIC_P marks the port PID to which QID is * mapped as having at least one queue with static priority. If any QID that maps to PID has static * priority, then all QID that map to PID must have STATIC_P set. Queues marked as static priority * must be contiguous and begin at IDX 0. The last queue that is marked as having static priority * must have its S_TAIL bit set. * This CSR is a memory of 256 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. A read of any entry that has not been * previously written is illegal and will result in unpredictable CSR read data. */ typedef union { uint64_t u64; struct cvmx_pko_mem_queue_ptrs_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t s_tail : 1; /**< Set if this QID is the tail of the static queues */ uint64_t static_p : 1; /**< Set if any QID in this PID has static priority */ uint64_t static_q : 1; /**< Set if this QID has static priority */ uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */ uint64_t buf_ptr : 36; /**< Command buffer pointer, <23:17> MBZ */ uint64_t tail : 1; /**< Set if this QID is the tail of the queue array */ uint64_t index : 3; /**< Index[2:0] (distance from head) in the queue array */ uint64_t port : 6; /**< Port ID to which this queue is mapped */ uint64_t queue : 7; /**< Queue ID[6:0] */ #else uint64_t queue : 7; uint64_t port : 6; uint64_t index : 3; uint64_t tail : 1; uint64_t buf_ptr : 36; uint64_t qos_mask : 8; uint64_t static_q : 1; uint64_t static_p : 1; uint64_t s_tail : 1; #endif } s; struct cvmx_pko_mem_queue_ptrs_s cn30xx; struct cvmx_pko_mem_queue_ptrs_s cn31xx; struct cvmx_pko_mem_queue_ptrs_s cn38xx; struct cvmx_pko_mem_queue_ptrs_s cn38xxp2; struct cvmx_pko_mem_queue_ptrs_s cn50xx; struct cvmx_pko_mem_queue_ptrs_s cn52xx; struct cvmx_pko_mem_queue_ptrs_s cn52xxp1; struct cvmx_pko_mem_queue_ptrs_s cn56xx; struct cvmx_pko_mem_queue_ptrs_s cn56xxp1; struct cvmx_pko_mem_queue_ptrs_s cn58xx; struct cvmx_pko_mem_queue_ptrs_s cn58xxp1; } cvmx_pko_mem_queue_ptrs_t; /** * cvmx_pko_mem_queue_qos * * Notes: * Sets the QOS mask, per queue. These QOS_MASK bits are logically and physically the same QOS_MASK * bits in PKO_MEM_QUEUE_PTRS. This CSR address allows the QOS_MASK bits to be written during PKO * operation without affecting any other queue state. The port to which queue QID is mapped is port * PID. Note that the queue to port mapping must be the same as was previously programmed via the * PKO_MEM_QUEUE_PTRS CSR. * This CSR is a memory of 256 entries, and thus, the PKO_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. A read of any entry that has not been * previously written is illegal and will result in unpredictable CSR read data. */ typedef union { uint64_t u64; struct cvmx_pko_mem_queue_qos_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_61_63 : 3; uint64_t qos_mask : 8; /**< Mask to control priority across 8 QOS rounds */ uint64_t reserved_13_52 : 40; uint64_t pid : 6; /**< Port ID to which this queue is mapped */ uint64_t qid : 7; /**< Queue ID */ #else uint64_t qid : 7; uint64_t pid : 6; uint64_t reserved_13_52 : 40; uint64_t qos_mask : 8; uint64_t reserved_61_63 : 3; #endif } s; struct cvmx_pko_mem_queue_qos_s cn30xx; struct cvmx_pko_mem_queue_qos_s cn31xx; struct cvmx_pko_mem_queue_qos_s cn38xx; struct cvmx_pko_mem_queue_qos_s cn38xxp2; struct cvmx_pko_mem_queue_qos_s cn50xx; struct cvmx_pko_mem_queue_qos_s cn52xx; struct cvmx_pko_mem_queue_qos_s cn52xxp1; struct cvmx_pko_mem_queue_qos_s cn56xx; struct cvmx_pko_mem_queue_qos_s cn56xxp1; struct cvmx_pko_mem_queue_qos_s cn58xx; struct cvmx_pko_mem_queue_qos_s cn58xxp1; } cvmx_pko_mem_queue_qos_t; /** * cvmx_pko_reg_bist_result * * Notes: * Access to the internal BiST results * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail). */ typedef union { uint64_t u64; struct cvmx_pko_reg_bist_result_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_pko_reg_bist_result_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_27_63 : 37; uint64_t psb2 : 5; /**< BiST result of the PSB memories (0=pass, !0=fail) */ uint64_t count : 1; /**< BiST result of the COUNT memories (0=pass, !0=fail) */ uint64_t rif : 1; /**< BiST result of the RIF memories (0=pass, !0=fail) */ uint64_t wif : 1; /**< BiST result of the WIF memories (0=pass, !0=fail) */ uint64_t ncb : 1; /**< BiST result of the NCB memories (0=pass, !0=fail) */ uint64_t out : 1; /**< BiST result of the OUT memories (0=pass, !0=fail) */ uint64_t crc : 1; /**< BiST result of the CRC memories (0=pass, !0=fail) */ uint64_t chk : 1; /**< BiST result of the CHK memories (0=pass, !0=fail) */ uint64_t qsb : 2; /**< BiST result of the QSB memories (0=pass, !0=fail) */ uint64_t qcb : 2; /**< BiST result of the QCB memories (0=pass, !0=fail) */ uint64_t pdb : 4; /**< BiST result of the PDB memories (0=pass, !0=fail) */ uint64_t psb : 7; /**< BiST result of the PSB memories (0=pass, !0=fail) */ #else uint64_t psb : 7; uint64_t pdb : 4; uint64_t qcb : 2; uint64_t qsb : 2; uint64_t chk : 1; uint64_t crc : 1; uint64_t out : 1; uint64_t ncb : 1; uint64_t wif : 1; uint64_t rif : 1; uint64_t count : 1; uint64_t psb2 : 5; uint64_t reserved_27_63 : 37; #endif } cn30xx; struct cvmx_pko_reg_bist_result_cn30xx cn31xx; struct cvmx_pko_reg_bist_result_cn30xx cn38xx; struct cvmx_pko_reg_bist_result_cn30xx cn38xxp2; struct cvmx_pko_reg_bist_result_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_33_63 : 31; uint64_t csr : 1; /**< BiST result of CSR memories (0=pass, !0=fail) */ uint64_t iob : 1; /**< BiST result of IOB memories (0=pass, !0=fail) */ uint64_t out_crc : 1; /**< BiST result of OUT_CRC memories (0=pass, !0=fail) */ uint64_t out_ctl : 3; /**< BiST result of OUT_CTL memories (0=pass, !0=fail) */ uint64_t out_sta : 1; /**< BiST result of OUT_STA memories (0=pass, !0=fail) */ uint64_t out_wif : 1; /**< BiST result of OUT_WIF memories (0=pass, !0=fail) */ uint64_t prt_chk : 3; /**< BiST result of PRT_CHK memories (0=pass, !0=fail) */ uint64_t prt_nxt : 1; /**< BiST result of PRT_NXT memories (0=pass, !0=fail) */ uint64_t prt_psb : 6; /**< BiST result of PRT_PSB memories (0=pass, !0=fail) */ uint64_t ncb_inb : 2; /**< BiST result of NCB_INB memories (0=pass, !0=fail) */ uint64_t prt_qcb : 2; /**< BiST result of PRT_QCB memories (0=pass, !0=fail) */ uint64_t prt_qsb : 3; /**< BiST result of PRT_QSB memories (0=pass, !0=fail) */ uint64_t dat_dat : 4; /**< BiST result of DAT_DAT memories (0=pass, !0=fail) */ uint64_t dat_ptr : 4; /**< BiST result of DAT_PTR memories (0=pass, !0=fail) */ #else uint64_t dat_ptr : 4; uint64_t dat_dat : 4; uint64_t prt_qsb : 3; uint64_t prt_qcb : 2; uint64_t ncb_inb : 2; uint64_t prt_psb : 6; uint64_t prt_nxt : 1; uint64_t prt_chk : 3; uint64_t out_wif : 1; uint64_t out_sta : 1; uint64_t out_ctl : 3; uint64_t out_crc : 1; uint64_t iob : 1; uint64_t csr : 1; uint64_t reserved_33_63 : 31; #endif } cn50xx; struct cvmx_pko_reg_bist_result_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_35_63 : 29; uint64_t csr : 1; /**< BiST result of CSR memories (0=pass, !0=fail) */ uint64_t iob : 1; /**< BiST result of IOB memories (0=pass, !0=fail) */ uint64_t out_dat : 1; /**< BiST result of OUT_DAT memories (0=pass, !0=fail) */ uint64_t out_ctl : 3; /**< BiST result of OUT_CTL memories (0=pass, !0=fail) */ uint64_t out_sta : 1; /**< BiST result of OUT_STA memories (0=pass, !0=fail) */ uint64_t out_wif : 1; /**< BiST result of OUT_WIF memories (0=pass, !0=fail) */ uint64_t prt_chk : 3; /**< BiST result of PRT_CHK memories (0=pass, !0=fail) */ uint64_t prt_nxt : 1; /**< BiST result of PRT_NXT memories (0=pass, !0=fail) */ uint64_t prt_psb : 8; /**< BiST result of PRT_PSB memories (0=pass, !0=fail) */ uint64_t ncb_inb : 2; /**< BiST result of NCB_INB memories (0=pass, !0=fail) */ uint64_t prt_qcb : 2; /**< BiST result of PRT_QCB memories (0=pass, !0=fail) */ uint64_t prt_qsb : 3; /**< BiST result of PRT_QSB memories (0=pass, !0=fail) */ uint64_t prt_ctl : 2; /**< BiST result of PRT_CTL memories (0=pass, !0=fail) */ uint64_t dat_dat : 2; /**< BiST result of DAT_DAT memories (0=pass, !0=fail) */ uint64_t dat_ptr : 4; /**< BiST result of DAT_PTR memories (0=pass, !0=fail) */ #else uint64_t dat_ptr : 4; uint64_t dat_dat : 2; uint64_t prt_ctl : 2; uint64_t prt_qsb : 3; uint64_t prt_qcb : 2; uint64_t ncb_inb : 2; uint64_t prt_psb : 8; uint64_t prt_nxt : 1; uint64_t prt_chk : 3; uint64_t out_wif : 1; uint64_t out_sta : 1; uint64_t out_ctl : 3; uint64_t out_dat : 1; uint64_t iob : 1; uint64_t csr : 1; uint64_t reserved_35_63 : 29; #endif } cn52xx; struct cvmx_pko_reg_bist_result_cn52xx cn52xxp1; struct cvmx_pko_reg_bist_result_cn52xx cn56xx; struct cvmx_pko_reg_bist_result_cn52xx cn56xxp1; struct cvmx_pko_reg_bist_result_cn50xx cn58xx; struct cvmx_pko_reg_bist_result_cn50xx cn58xxp1; } cvmx_pko_reg_bist_result_t; /** * cvmx_pko_reg_cmd_buf * * Notes: * Sets the command buffer parameters * The size of the command buffer segments is measured in uint64s. The pool specifies (1 of 8 free * lists to be used when freeing command buffer segments. */ typedef union { uint64_t u64; struct cvmx_pko_reg_cmd_buf_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_23_63 : 41; uint64_t pool : 3; /**< Free list used to free command buffer segments */ uint64_t reserved_13_19 : 7; uint64_t size : 13; /**< Number of uint64s per command buffer segment */ #else uint64_t size : 13; uint64_t reserved_13_19 : 7; uint64_t pool : 3; uint64_t reserved_23_63 : 41; #endif } s; struct cvmx_pko_reg_cmd_buf_s cn30xx; struct cvmx_pko_reg_cmd_buf_s cn31xx; struct cvmx_pko_reg_cmd_buf_s cn38xx; struct cvmx_pko_reg_cmd_buf_s cn38xxp2; struct cvmx_pko_reg_cmd_buf_s cn50xx; struct cvmx_pko_reg_cmd_buf_s cn52xx; struct cvmx_pko_reg_cmd_buf_s cn52xxp1; struct cvmx_pko_reg_cmd_buf_s cn56xx; struct cvmx_pko_reg_cmd_buf_s cn56xxp1; struct cvmx_pko_reg_cmd_buf_s cn58xx; struct cvmx_pko_reg_cmd_buf_s cn58xxp1; } cvmx_pko_reg_cmd_buf_t; /** * cvmx_pko_reg_crc_ctl# * * Notes: * Controls datapath reflection when calculating CRC * */ typedef union { uint64_t u64; struct cvmx_pko_reg_crc_ctlx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t invres : 1; /**< Invert the result */ uint64_t refin : 1; /**< Reflect the bits in each byte. Byte order does not change. - 0: CRC is calculated MSB to LSB - 1: CRC is calculated MLB to MSB */ #else uint64_t refin : 1; uint64_t invres : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_pko_reg_crc_ctlx_s cn38xx; struct cvmx_pko_reg_crc_ctlx_s cn38xxp2; struct cvmx_pko_reg_crc_ctlx_s cn58xx; struct cvmx_pko_reg_crc_ctlx_s cn58xxp1; } cvmx_pko_reg_crc_ctlx_t; /** * cvmx_pko_reg_crc_enable * * Notes: * Enables CRC for the GMX ports. * */ typedef union { uint64_t u64; struct cvmx_pko_reg_crc_enable_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t enable : 32; /**< Mask for ports 31-0 to enable CRC Mask bit==0 means CRC not enabled Mask bit==1 means CRC enabled Note that CRC should be enabled only when using SPI4.2 */ #else uint64_t enable : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pko_reg_crc_enable_s cn38xx; struct cvmx_pko_reg_crc_enable_s cn38xxp2; struct cvmx_pko_reg_crc_enable_s cn58xx; struct cvmx_pko_reg_crc_enable_s cn58xxp1; } cvmx_pko_reg_crc_enable_t; /** * cvmx_pko_reg_crc_iv# * * Notes: * Determines the IV used by the CRC algorithm * * PKO_CRC_IV * PKO_CRC_IV controls the initial state of the CRC algorithm. Octane can * support a wide range of CRC algorithms and as such, the IV must be * carefully constructed to meet the specific algorithm. The code below * determines the value to program into Octane based on the algorthim's IV * and width. In the case of Octane, the width should always be 32. * * PKO_CRC_IV0 sets the IV for ports 0-15 while PKO_CRC_IV1 sets the IV for * ports 16-31. * * @verbatim * unsigned octane_crc_iv(unsigned algorithm_iv, unsigned poly, unsigned w) * [ * int i; * int doit; * unsigned int current_val = algorithm_iv; * * for(i = 0; i < w; i++) [ * doit = current_val & 0x1; * * if(doit) current_val ^= poly; * assert(!(current_val & 0x1)); * * current_val = (current_val >> 1) | (doit << (w-1)); * ] * * return current_val; * ] * @endverbatim */ typedef union { uint64_t u64; struct cvmx_pko_reg_crc_ivx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t iv : 32; /**< IV used by the CRC algorithm. Default is FCS32. */ #else uint64_t iv : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pko_reg_crc_ivx_s cn38xx; struct cvmx_pko_reg_crc_ivx_s cn38xxp2; struct cvmx_pko_reg_crc_ivx_s cn58xx; struct cvmx_pko_reg_crc_ivx_s cn58xxp1; } cvmx_pko_reg_crc_ivx_t; /** * cvmx_pko_reg_debug0 * * Notes: * Note that this CSR is present only in chip revisions beginning with pass2. * */ typedef union { uint64_t u64; struct cvmx_pko_reg_debug0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t asserts : 64; /**< Various assertion checks */ #else uint64_t asserts : 64; #endif } s; struct cvmx_pko_reg_debug0_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t asserts : 17; /**< Various assertion checks */ #else uint64_t asserts : 17; uint64_t reserved_17_63 : 47; #endif } cn30xx; struct cvmx_pko_reg_debug0_cn30xx cn31xx; struct cvmx_pko_reg_debug0_cn30xx cn38xx; struct cvmx_pko_reg_debug0_cn30xx cn38xxp2; struct cvmx_pko_reg_debug0_s cn50xx; struct cvmx_pko_reg_debug0_s cn52xx; struct cvmx_pko_reg_debug0_s cn52xxp1; struct cvmx_pko_reg_debug0_s cn56xx; struct cvmx_pko_reg_debug0_s cn56xxp1; struct cvmx_pko_reg_debug0_s cn58xx; struct cvmx_pko_reg_debug0_s cn58xxp1; } cvmx_pko_reg_debug0_t; /** * cvmx_pko_reg_debug1 */ typedef union { uint64_t u64; struct cvmx_pko_reg_debug1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t asserts : 64; /**< Various assertion checks */ #else uint64_t asserts : 64; #endif } s; struct cvmx_pko_reg_debug1_s cn50xx; struct cvmx_pko_reg_debug1_s cn52xx; struct cvmx_pko_reg_debug1_s cn52xxp1; struct cvmx_pko_reg_debug1_s cn56xx; struct cvmx_pko_reg_debug1_s cn56xxp1; struct cvmx_pko_reg_debug1_s cn58xx; struct cvmx_pko_reg_debug1_s cn58xxp1; } cvmx_pko_reg_debug1_t; /** * cvmx_pko_reg_debug2 */ typedef union { uint64_t u64; struct cvmx_pko_reg_debug2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t asserts : 64; /**< Various assertion checks */ #else uint64_t asserts : 64; #endif } s; struct cvmx_pko_reg_debug2_s cn50xx; struct cvmx_pko_reg_debug2_s cn52xx; struct cvmx_pko_reg_debug2_s cn52xxp1; struct cvmx_pko_reg_debug2_s cn56xx; struct cvmx_pko_reg_debug2_s cn56xxp1; struct cvmx_pko_reg_debug2_s cn58xx; struct cvmx_pko_reg_debug2_s cn58xxp1; } cvmx_pko_reg_debug2_t; /** * cvmx_pko_reg_debug3 */ typedef union { uint64_t u64; struct cvmx_pko_reg_debug3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t asserts : 64; /**< Various assertion checks */ #else uint64_t asserts : 64; #endif } s; struct cvmx_pko_reg_debug3_s cn50xx; struct cvmx_pko_reg_debug3_s cn52xx; struct cvmx_pko_reg_debug3_s cn52xxp1; struct cvmx_pko_reg_debug3_s cn56xx; struct cvmx_pko_reg_debug3_s cn56xxp1; struct cvmx_pko_reg_debug3_s cn58xx; struct cvmx_pko_reg_debug3_s cn58xxp1; } cvmx_pko_reg_debug3_t; /** * cvmx_pko_reg_engine_inflight * * Notes: * Sets the maximum number of inflight packets, per engine. Values greater than 4 are illegal. * Setting an engine's value to 0 effectively stops the engine. * Note that engines 4-7 do not exist */ typedef union { uint64_t u64; struct cvmx_pko_reg_engine_inflight_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t engine9 : 4; /**< Maximum number of inflight packets for engine9 */ uint64_t engine8 : 4; /**< Maximum number of inflight packets for engine8 */ uint64_t engine7 : 4; /**< Maximum number of inflight packets for engine7 */ uint64_t engine6 : 4; /**< Maximum number of inflight packets for engine6 */ uint64_t engine5 : 4; /**< Maximum number of inflight packets for engine5 */ uint64_t engine4 : 4; /**< Maximum number of inflight packets for engine4 */ uint64_t engine3 : 4; /**< Maximum number of inflight packets for engine3 */ uint64_t engine2 : 4; /**< Maximum number of inflight packets for engine2 */ uint64_t engine1 : 4; /**< Maximum number of inflight packets for engine1 */ uint64_t engine0 : 4; /**< Maximum number of inflight packets for engine0 */ #else uint64_t engine0 : 4; uint64_t engine1 : 4; uint64_t engine2 : 4; uint64_t engine3 : 4; uint64_t engine4 : 4; uint64_t engine5 : 4; uint64_t engine6 : 4; uint64_t engine7 : 4; uint64_t engine8 : 4; uint64_t engine9 : 4; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_pko_reg_engine_inflight_s cn52xx; struct cvmx_pko_reg_engine_inflight_s cn52xxp1; struct cvmx_pko_reg_engine_inflight_s cn56xx; struct cvmx_pko_reg_engine_inflight_s cn56xxp1; } cvmx_pko_reg_engine_inflight_t; /** * cvmx_pko_reg_engine_thresh * * Notes: * When not enabled, packet data may be sent as soon as it is written into PKO's internal buffers. * When enabled and the packet fits entirely in the PKO's internal buffer, none of the packet data will * be sent until all of it has been written into the PKO's internal buffer. Note that a packet is * considered to fit entirely only if the packet's size is <= BUFFER_SIZE-8. When enabled and the * packet does not fit entirely in the PKO's internal buffer, none of the packet data will be sent until * at least BUFFER_SIZE-256 bytes of the packet have been written into the PKO's internal buffer * (note that BUFFER_SIZE is a function of PKO_REG_GMX_PORT_MODE above) * Note that engines 4-7 do not exist, so MASK<7:4> MBZ */ typedef union { uint64_t u64; struct cvmx_pko_reg_engine_thresh_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t mask : 10; /**< Mask[n]=0 disables packet send threshold for engine n Mask[n]=1 enables packet send threshold for engine n $PR NS */ #else uint64_t mask : 10; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_pko_reg_engine_thresh_s cn52xx; struct cvmx_pko_reg_engine_thresh_s cn52xxp1; struct cvmx_pko_reg_engine_thresh_s cn56xx; struct cvmx_pko_reg_engine_thresh_s cn56xxp1; } cvmx_pko_reg_engine_thresh_t; /** * cvmx_pko_reg_error * * Notes: * Note that this CSR is present only in chip revisions beginning with pass2. * */ typedef union { uint64_t u64; struct cvmx_pko_reg_error_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t currzero : 1; /**< A packet data pointer has size=0 */ uint64_t doorbell : 1; /**< A doorbell count has overflowed */ uint64_t parity : 1; /**< Read parity error at port data buffer */ #else uint64_t parity : 1; uint64_t doorbell : 1; uint64_t currzero : 1; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_pko_reg_error_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t doorbell : 1; /**< A doorbell count has overflowed */ uint64_t parity : 1; /**< Read parity error at port data buffer */ #else uint64_t parity : 1; uint64_t doorbell : 1; uint64_t reserved_2_63 : 62; #endif } cn30xx; struct cvmx_pko_reg_error_cn30xx cn31xx; struct cvmx_pko_reg_error_cn30xx cn38xx; struct cvmx_pko_reg_error_cn30xx cn38xxp2; struct cvmx_pko_reg_error_s cn50xx; struct cvmx_pko_reg_error_s cn52xx; struct cvmx_pko_reg_error_s cn52xxp1; struct cvmx_pko_reg_error_s cn56xx; struct cvmx_pko_reg_error_s cn56xxp1; struct cvmx_pko_reg_error_s cn58xx; struct cvmx_pko_reg_error_s cn58xxp1; } cvmx_pko_reg_error_t; /** * cvmx_pko_reg_flags * * Notes: * When set, ENA_PKO enables the PKO picker and places the PKO in normal operation. When set, ENA_DWB * enables the use of DontWriteBacks during the buffer freeing operations. When not set, STORE_BE inverts * bits[2:0] of the STORE0 byte write address. When set, RESET causes a 4-cycle reset pulse to the * entire box. */ typedef union { uint64_t u64; struct cvmx_pko_reg_flags_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t reset : 1; /**< Reset oneshot pulse */ uint64_t store_be : 1; /**< Force STORE0 byte write address to big endian */ uint64_t ena_dwb : 1; /**< Set to enable DontWriteBacks */ uint64_t ena_pko : 1; /**< Set to enable the PKO picker */ #else uint64_t ena_pko : 1; uint64_t ena_dwb : 1; uint64_t store_be : 1; uint64_t reset : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_pko_reg_flags_s cn30xx; struct cvmx_pko_reg_flags_s cn31xx; struct cvmx_pko_reg_flags_s cn38xx; struct cvmx_pko_reg_flags_s cn38xxp2; struct cvmx_pko_reg_flags_s cn50xx; struct cvmx_pko_reg_flags_s cn52xx; struct cvmx_pko_reg_flags_s cn52xxp1; struct cvmx_pko_reg_flags_s cn56xx; struct cvmx_pko_reg_flags_s cn56xxp1; struct cvmx_pko_reg_flags_s cn58xx; struct cvmx_pko_reg_flags_s cn58xxp1; } cvmx_pko_reg_flags_t; /** * cvmx_pko_reg_gmx_port_mode * * Notes: * The system has a total of 4 + 0 + 4 + 4 ports and 4 + 0 + 1 + 1 engines (GM0 + GM1 + PCI + LOOP). * This CSR sets the number of GMX0 ports and amount of local storage per engine. * It has no effect on the number of ports or amount of local storage per engine for * PCI or LOOP. When all GMX ports are used (MODE0=2), each GMX engine has 2.5kB of local * storage. Increasing the value of MODEn by 1 decreases the number of GMX ports by a power of 2 and * increases the local storage per PKO GMX engine by a power of 2. * Modes 0 and 1 are illegal and, if selected, are treated as mode 2. * * MODE[n] GM[0] PCI LOOP GM[0] PCI LOOP * ports ports ports storage/engine storage/engine storage/engine * 0 4 4 4 2.5kB 2.5kB 2.5kB * 1 4 4 4 2.5kB 2.5kB 2.5kB * 2 4 4 4 2.5kB 2.5kB 2.5kB * 3 2 4 4 5.0kB 2.5kB 2.5kB * 4 1 4 4 10.0kB 2.5kB 2.5kB */ typedef union { uint64_t u64; struct cvmx_pko_reg_gmx_port_mode_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t mode1 : 3; /**< # of GM1 ports = 16 >> MODE1, 0 <= MODE1 <= 5 */ uint64_t mode0 : 3; /**< # of GM0 ports = 16 >> MODE0, 0 <= MODE0 <= 5 */ #else uint64_t mode0 : 3; uint64_t mode1 : 3; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_pko_reg_gmx_port_mode_s cn30xx; struct cvmx_pko_reg_gmx_port_mode_s cn31xx; struct cvmx_pko_reg_gmx_port_mode_s cn38xx; struct cvmx_pko_reg_gmx_port_mode_s cn38xxp2; struct cvmx_pko_reg_gmx_port_mode_s cn50xx; struct cvmx_pko_reg_gmx_port_mode_s cn52xx; struct cvmx_pko_reg_gmx_port_mode_s cn52xxp1; struct cvmx_pko_reg_gmx_port_mode_s cn56xx; struct cvmx_pko_reg_gmx_port_mode_s cn56xxp1; struct cvmx_pko_reg_gmx_port_mode_s cn58xx; struct cvmx_pko_reg_gmx_port_mode_s cn58xxp1; } cvmx_pko_reg_gmx_port_mode_t; /** * cvmx_pko_reg_int_mask * * Notes: * When a mask bit is set, the corresponding interrupt is enabled. * */ typedef union { uint64_t u64; struct cvmx_pko_reg_int_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t currzero : 1; /**< Bit mask corresponding to PKO_REG_ERROR[2] above */ uint64_t doorbell : 1; /**< Bit mask corresponding to PKO_REG_ERROR[1] above */ uint64_t parity : 1; /**< Bit mask corresponding to PKO_REG_ERROR[0] above */ #else uint64_t parity : 1; uint64_t doorbell : 1; uint64_t currzero : 1; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_pko_reg_int_mask_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t doorbell : 1; /**< Bit mask corresponding to PKO_REG_ERROR[1] above */ uint64_t parity : 1; /**< Bit mask corresponding to PKO_REG_ERROR[0] above */ #else uint64_t parity : 1; uint64_t doorbell : 1; uint64_t reserved_2_63 : 62; #endif } cn30xx; struct cvmx_pko_reg_int_mask_cn30xx cn31xx; struct cvmx_pko_reg_int_mask_cn30xx cn38xx; struct cvmx_pko_reg_int_mask_cn30xx cn38xxp2; struct cvmx_pko_reg_int_mask_s cn50xx; struct cvmx_pko_reg_int_mask_s cn52xx; struct cvmx_pko_reg_int_mask_s cn52xxp1; struct cvmx_pko_reg_int_mask_s cn56xx; struct cvmx_pko_reg_int_mask_s cn56xxp1; struct cvmx_pko_reg_int_mask_s cn58xx; struct cvmx_pko_reg_int_mask_s cn58xxp1; } cvmx_pko_reg_int_mask_t; /** * cvmx_pko_reg_queue_mode * * Notes: * Sets the number of queues and amount of local storage per queue * The system has a total of 256 queues and (256*8) words of local command storage. This CSR sets the * number of queues that are used. Increasing the value of MODE by 1 decreases the number of queues * by a power of 2 and increases the local storage per queue by a power of 2. * MODEn queues storage/queue * 0 256 64B ( 8 words) * 1 128 128B (16 words) * 2 64 256B (32 words) */ typedef union { uint64_t u64; struct cvmx_pko_reg_queue_mode_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t mode : 2; /**< # of queues = 256 >> MODE, 0 <= MODE <=2 */ #else uint64_t mode : 2; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_pko_reg_queue_mode_s cn30xx; struct cvmx_pko_reg_queue_mode_s cn31xx; struct cvmx_pko_reg_queue_mode_s cn38xx; struct cvmx_pko_reg_queue_mode_s cn38xxp2; struct cvmx_pko_reg_queue_mode_s cn50xx; struct cvmx_pko_reg_queue_mode_s cn52xx; struct cvmx_pko_reg_queue_mode_s cn52xxp1; struct cvmx_pko_reg_queue_mode_s cn56xx; struct cvmx_pko_reg_queue_mode_s cn56xxp1; struct cvmx_pko_reg_queue_mode_s cn58xx; struct cvmx_pko_reg_queue_mode_s cn58xxp1; } cvmx_pko_reg_queue_mode_t; /** * cvmx_pko_reg_queue_ptrs1 * * Notes: * This CSR is used with PKO_MEM_QUEUE_PTRS and PKO_MEM_QUEUE_QOS to allow access to queues 128-255 * and to allow up mapping of up to 16 queues per port. When programming queues 128-255, the * programming sequence must first write PKO_REG_QUEUE_PTRS1 and then write PKO_MEM_QUEUE_PTRS or * PKO_MEM_QUEUE_QOS for each queue. * See the descriptions of PKO_MEM_QUEUE_PTRS and PKO_MEM_QUEUE_QOS for further explanation of queue * programming. */ typedef union { uint64_t u64; struct cvmx_pko_reg_queue_ptrs1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t idx3 : 1; /**< [3] of Index (distance from head) in the queue array */ uint64_t qid7 : 1; /**< [7] of Queue ID */ #else uint64_t qid7 : 1; uint64_t idx3 : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_pko_reg_queue_ptrs1_s cn50xx; struct cvmx_pko_reg_queue_ptrs1_s cn52xx; struct cvmx_pko_reg_queue_ptrs1_s cn52xxp1; struct cvmx_pko_reg_queue_ptrs1_s cn56xx; struct cvmx_pko_reg_queue_ptrs1_s cn56xxp1; struct cvmx_pko_reg_queue_ptrs1_s cn58xx; struct cvmx_pko_reg_queue_ptrs1_s cn58xxp1; } cvmx_pko_reg_queue_ptrs1_t; /** * cvmx_pko_reg_read_idx * * Notes: * Provides the read index during a CSR read operation to any of the CSRs that are physically stored * as memories. The names of these CSRs begin with the prefix "PKO_MEM_". * IDX[7:0] is the read index. INC[7:0] is an increment that is added to IDX[7:0] after any CSR read. * The intended use is to initially write this CSR such that IDX=0 and INC=1. Then, the entire * contents of a CSR memory can be read with consecutive CSR read commands. */ typedef union { uint64_t u64; struct cvmx_pko_reg_read_idx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t inc : 8; /**< Increment to add to current index for next index */ uint64_t index : 8; /**< Index to use for next memory CSR read */ #else uint64_t index : 8; uint64_t inc : 8; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_pko_reg_read_idx_s cn30xx; struct cvmx_pko_reg_read_idx_s cn31xx; struct cvmx_pko_reg_read_idx_s cn38xx; struct cvmx_pko_reg_read_idx_s cn38xxp2; struct cvmx_pko_reg_read_idx_s cn50xx; struct cvmx_pko_reg_read_idx_s cn52xx; struct cvmx_pko_reg_read_idx_s cn52xxp1; struct cvmx_pko_reg_read_idx_s cn56xx; struct cvmx_pko_reg_read_idx_s cn56xxp1; struct cvmx_pko_reg_read_idx_s cn58xx; struct cvmx_pko_reg_read_idx_s cn58xxp1; } cvmx_pko_reg_read_idx_t; /** * cvmx_pow_bist_stat * * POW_BIST_STAT = POW BIST Status Register * * Contains the BIST status for the POW memories ('0' = pass, '1' = fail). * * Also contains the BIST status for the PP's. Each bit in the PP field is the OR of all BIST * results for the corresponding physical PP ('0' = pass, '1' = fail). */ typedef union { uint64_t u64; struct cvmx_pow_bist_stat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t pp : 16; /**< Physical PP BIST status */ uint64_t reserved_0_15 : 16; #else uint64_t reserved_0_15 : 16; uint64_t pp : 16; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pow_bist_stat_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t pp : 1; /**< Physical PP BIST status */ uint64_t reserved_9_15 : 7; uint64_t cam : 1; /**< POW CAM BIST status */ uint64_t nbt1 : 1; /**< NCB transmitter memory 1 BIST status */ uint64_t nbt0 : 1; /**< NCB transmitter memory 0 BIST status */ uint64_t index : 1; /**< Index memory BIST status */ uint64_t fidx : 1; /**< Forward index memory BIST status */ uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */ uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */ uint64_t pend : 1; /**< Pending switch memory BIST status */ uint64_t adr : 1; /**< Address memory BIST status */ #else uint64_t adr : 1; uint64_t pend : 1; uint64_t nbr0 : 1; uint64_t nbr1 : 1; uint64_t fidx : 1; uint64_t index : 1; uint64_t nbt0 : 1; uint64_t nbt1 : 1; uint64_t cam : 1; uint64_t reserved_9_15 : 7; uint64_t pp : 1; uint64_t reserved_17_63 : 47; #endif } cn30xx; struct cvmx_pow_bist_stat_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t pp : 2; /**< Physical PP BIST status */ uint64_t reserved_9_15 : 7; uint64_t cam : 1; /**< POW CAM BIST status */ uint64_t nbt1 : 1; /**< NCB transmitter memory 1 BIST status */ uint64_t nbt0 : 1; /**< NCB transmitter memory 0 BIST status */ uint64_t index : 1; /**< Index memory BIST status */ uint64_t fidx : 1; /**< Forward index memory BIST status */ uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */ uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */ uint64_t pend : 1; /**< Pending switch memory BIST status */ uint64_t adr : 1; /**< Address memory BIST status */ #else uint64_t adr : 1; uint64_t pend : 1; uint64_t nbr0 : 1; uint64_t nbr1 : 1; uint64_t fidx : 1; uint64_t index : 1; uint64_t nbt0 : 1; uint64_t nbt1 : 1; uint64_t cam : 1; uint64_t reserved_9_15 : 7; uint64_t pp : 2; uint64_t reserved_18_63 : 46; #endif } cn31xx; struct cvmx_pow_bist_stat_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t pp : 16; /**< Physical PP BIST status */ uint64_t reserved_10_15 : 6; uint64_t cam : 1; /**< POW CAM BIST status */ uint64_t nbt : 1; /**< NCB transmitter memory BIST status */ uint64_t index : 1; /**< Index memory BIST status */ uint64_t fidx : 1; /**< Forward index memory BIST status */ uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */ uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */ uint64_t pend1 : 1; /**< Pending switch memory 1 BIST status */ uint64_t pend0 : 1; /**< Pending switch memory 0 BIST status */ uint64_t adr1 : 1; /**< Address memory 1 BIST status */ uint64_t adr0 : 1; /**< Address memory 0 BIST status */ #else uint64_t adr0 : 1; uint64_t adr1 : 1; uint64_t pend0 : 1; uint64_t pend1 : 1; uint64_t nbr0 : 1; uint64_t nbr1 : 1; uint64_t fidx : 1; uint64_t index : 1; uint64_t nbt : 1; uint64_t cam : 1; uint64_t reserved_10_15 : 6; uint64_t pp : 16; uint64_t reserved_32_63 : 32; #endif } cn38xx; struct cvmx_pow_bist_stat_cn38xx cn38xxp2; struct cvmx_pow_bist_stat_cn31xx cn50xx; struct cvmx_pow_bist_stat_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t pp : 4; /**< Physical PP BIST status */ uint64_t reserved_9_15 : 7; uint64_t cam : 1; /**< POW CAM BIST status */ uint64_t nbt1 : 1; /**< NCB transmitter memory 1 BIST status */ uint64_t nbt0 : 1; /**< NCB transmitter memory 0 BIST status */ uint64_t index : 1; /**< Index memory BIST status */ uint64_t fidx : 1; /**< Forward index memory BIST status */ uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */ uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */ uint64_t pend : 1; /**< Pending switch memory BIST status */ uint64_t adr : 1; /**< Address memory BIST status */ #else uint64_t adr : 1; uint64_t pend : 1; uint64_t nbr0 : 1; uint64_t nbr1 : 1; uint64_t fidx : 1; uint64_t index : 1; uint64_t nbt0 : 1; uint64_t nbt1 : 1; uint64_t cam : 1; uint64_t reserved_9_15 : 7; uint64_t pp : 4; uint64_t reserved_20_63 : 44; #endif } cn52xx; struct cvmx_pow_bist_stat_cn52xx cn52xxp1; struct cvmx_pow_bist_stat_cn56xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t pp : 12; /**< Physical PP BIST status */ uint64_t reserved_10_15 : 6; uint64_t cam : 1; /**< POW CAM BIST status */ uint64_t nbt : 1; /**< NCB transmitter memory BIST status */ uint64_t index : 1; /**< Index memory BIST status */ uint64_t fidx : 1; /**< Forward index memory BIST status */ uint64_t nbr1 : 1; /**< NCB receiver memory 1 BIST status */ uint64_t nbr0 : 1; /**< NCB receiver memory 0 BIST status */ uint64_t pend1 : 1; /**< Pending switch memory 1 BIST status */ uint64_t pend0 : 1; /**< Pending switch memory 0 BIST status */ uint64_t adr1 : 1; /**< Address memory 1 BIST status */ uint64_t adr0 : 1; /**< Address memory 0 BIST status */ #else uint64_t adr0 : 1; uint64_t adr1 : 1; uint64_t pend0 : 1; uint64_t pend1 : 1; uint64_t nbr0 : 1; uint64_t nbr1 : 1; uint64_t fidx : 1; uint64_t index : 1; uint64_t nbt : 1; uint64_t cam : 1; uint64_t reserved_10_15 : 6; uint64_t pp : 12; uint64_t reserved_28_63 : 36; #endif } cn56xx; struct cvmx_pow_bist_stat_cn56xx cn56xxp1; struct cvmx_pow_bist_stat_cn38xx cn58xx; struct cvmx_pow_bist_stat_cn38xx cn58xxp1; } cvmx_pow_bist_stat_t; /** * cvmx_pow_ds_pc * * POW_DS_PC = POW De-Schedule Performance Counter * * Counts the number of de-schedule requests. Write to clear. */ typedef union { uint64_t u64; struct cvmx_pow_ds_pc_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ds_pc : 32; /**< De-schedule performance counter */ #else uint64_t ds_pc : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pow_ds_pc_s cn30xx; struct cvmx_pow_ds_pc_s cn31xx; struct cvmx_pow_ds_pc_s cn38xx; struct cvmx_pow_ds_pc_s cn38xxp2; struct cvmx_pow_ds_pc_s cn50xx; struct cvmx_pow_ds_pc_s cn52xx; struct cvmx_pow_ds_pc_s cn52xxp1; struct cvmx_pow_ds_pc_s cn56xx; struct cvmx_pow_ds_pc_s cn56xxp1; struct cvmx_pow_ds_pc_s cn58xx; struct cvmx_pow_ds_pc_s cn58xxp1; } cvmx_pow_ds_pc_t; /** * cvmx_pow_ecc_err * * POW_ECC_ERR = POW ECC Error Register * * Contains the single and double error bits and the corresponding interrupt enables for the ECC- * protected POW index memory. Also contains the syndrome value in the event of an ECC error. * * Also contains the remote pointer error bit and interrupt enable. RPE is set when the POW detected * corruption on one or more of the input queue lists in L2/DRAM (POW's local copy of the tail pointer * for the L2/DRAM input queue did not match the last entry on the the list). This is caused by * L2/DRAM corruption, and is generally a fatal error because it likely caused POW to load bad work * queue entries. * * This register also contains the illegal operation error bits and the corresponding interrupt * enables as follows: * * <0> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP from PP in NULL_NULL state * <1> Received SWTAG/SWTAG_DESCH/DESCH/UPD_WQP from PP in NULL state * <2> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/GET_WORK from PP with pending tag switch to ORDERED or ATOMIC * <3> Received SWTAG/SWTAG_FULL/SWTAG_DESCH from PP with tag specified as NULL_NULL * <4> Received SWTAG_FULL/SWTAG_DESCH from PP with tag specified as NULL * <5> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/NULL_RD from PP with GET_WORK pending * <6> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/NULL_RD from PP with NULL_RD pending * <7> Received CLR_NSCHED from PP with SWTAG_DESCH/DESCH/CLR_NSCHED pending * <8> Received SWTAG/SWTAG_FULL/SWTAG_DESCH/DESCH/UPD_WQP/GET_WORK/NULL_RD from PP with CLR_NSCHED pending * <9> Received illegal opcode * <10> Received ADD_WORK with tag specified as NULL_NULL * <11> Received DBG load from PP with DBG load pending * <12> Received CSR load from PP with CSR load pending */ typedef union { uint64_t u64; struct cvmx_pow_ecc_err_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_45_63 : 19; uint64_t iop_ie : 13; /**< Illegal operation interrupt enables */ uint64_t reserved_29_31 : 3; uint64_t iop : 13; /**< Illegal operation errors */ uint64_t reserved_14_15 : 2; uint64_t rpe_ie : 1; /**< Remote pointer error interrupt enable */ uint64_t rpe : 1; /**< Remote pointer error */ uint64_t reserved_9_11 : 3; uint64_t syn : 5; /**< Syndrome value (only valid when DBE or SBE is set) */ uint64_t dbe_ie : 1; /**< Double bit error interrupt enable */ uint64_t sbe_ie : 1; /**< Single bit error interrupt enable */ uint64_t dbe : 1; /**< Double bit error */ uint64_t sbe : 1; /**< Single bit error */ #else uint64_t sbe : 1; uint64_t dbe : 1; uint64_t sbe_ie : 1; uint64_t dbe_ie : 1; uint64_t syn : 5; uint64_t reserved_9_11 : 3; uint64_t rpe : 1; uint64_t rpe_ie : 1; uint64_t reserved_14_15 : 2; uint64_t iop : 13; uint64_t reserved_29_31 : 3; uint64_t iop_ie : 13; uint64_t reserved_45_63 : 19; #endif } s; struct cvmx_pow_ecc_err_s cn30xx; struct cvmx_pow_ecc_err_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t rpe_ie : 1; /**< Remote pointer error interrupt enable */ uint64_t rpe : 1; /**< Remote pointer error */ uint64_t reserved_9_11 : 3; uint64_t syn : 5; /**< Syndrome value (only valid when DBE or SBE is set) */ uint64_t dbe_ie : 1; /**< Double bit error interrupt enable */ uint64_t sbe_ie : 1; /**< Single bit error interrupt enable */ uint64_t dbe : 1; /**< Double bit error */ uint64_t sbe : 1; /**< Single bit error */ #else uint64_t sbe : 1; uint64_t dbe : 1; uint64_t sbe_ie : 1; uint64_t dbe_ie : 1; uint64_t syn : 5; uint64_t reserved_9_11 : 3; uint64_t rpe : 1; uint64_t rpe_ie : 1; uint64_t reserved_14_63 : 50; #endif } cn31xx; struct cvmx_pow_ecc_err_s cn38xx; struct cvmx_pow_ecc_err_cn31xx cn38xxp2; struct cvmx_pow_ecc_err_s cn50xx; struct cvmx_pow_ecc_err_s cn52xx; struct cvmx_pow_ecc_err_s cn52xxp1; struct cvmx_pow_ecc_err_s cn56xx; struct cvmx_pow_ecc_err_s cn56xxp1; struct cvmx_pow_ecc_err_s cn58xx; struct cvmx_pow_ecc_err_s cn58xxp1; } cvmx_pow_ecc_err_t; /** * cvmx_pow_int_ctl * * POW_INT_CTL = POW Internal Control Register * * Contains POW internal control values (for internal use, not typically for customer use): * * PFR_DIS = Disable high-performance pre-fetch reset mode. * * NBR_THR = Assert ncb__busy when the number of remaining coherent bus NBR credits equals is less * than or equal to this value. */ typedef union { uint64_t u64; struct cvmx_pow_int_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t pfr_dis : 1; /**< High-perf pre-fetch reset mode disable */ uint64_t nbr_thr : 5; /**< NBR busy threshold */ #else uint64_t nbr_thr : 5; uint64_t pfr_dis : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_pow_int_ctl_s cn30xx; struct cvmx_pow_int_ctl_s cn31xx; struct cvmx_pow_int_ctl_s cn38xx; struct cvmx_pow_int_ctl_s cn38xxp2; struct cvmx_pow_int_ctl_s cn50xx; struct cvmx_pow_int_ctl_s cn52xx; struct cvmx_pow_int_ctl_s cn52xxp1; struct cvmx_pow_int_ctl_s cn56xx; struct cvmx_pow_int_ctl_s cn56xxp1; struct cvmx_pow_int_ctl_s cn58xx; struct cvmx_pow_int_ctl_s cn58xxp1; } cvmx_pow_int_ctl_t; /** * cvmx_pow_iq_cnt# * * POW_IQ_CNTX = POW Input Queue Count Register (1 per QOS level) * * Contains a read-only count of the number of work queue entries for each QOS level. */ typedef union { uint64_t u64; struct cvmx_pow_iq_cntx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t iq_cnt : 32; /**< Input queue count for QOS level X */ #else uint64_t iq_cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pow_iq_cntx_s cn30xx; struct cvmx_pow_iq_cntx_s cn31xx; struct cvmx_pow_iq_cntx_s cn38xx; struct cvmx_pow_iq_cntx_s cn38xxp2; struct cvmx_pow_iq_cntx_s cn50xx; struct cvmx_pow_iq_cntx_s cn52xx; struct cvmx_pow_iq_cntx_s cn52xxp1; struct cvmx_pow_iq_cntx_s cn56xx; struct cvmx_pow_iq_cntx_s cn56xxp1; struct cvmx_pow_iq_cntx_s cn58xx; struct cvmx_pow_iq_cntx_s cn58xxp1; } cvmx_pow_iq_cntx_t; /** * cvmx_pow_iq_com_cnt * * POW_IQ_COM_CNT = POW Input Queue Combined Count Register * * Contains a read-only count of the total number of work queue entries in all QOS levels. */ typedef union { uint64_t u64; struct cvmx_pow_iq_com_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t iq_cnt : 32; /**< Input queue combined count */ #else uint64_t iq_cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pow_iq_com_cnt_s cn30xx; struct cvmx_pow_iq_com_cnt_s cn31xx; struct cvmx_pow_iq_com_cnt_s cn38xx; struct cvmx_pow_iq_com_cnt_s cn38xxp2; struct cvmx_pow_iq_com_cnt_s cn50xx; struct cvmx_pow_iq_com_cnt_s cn52xx; struct cvmx_pow_iq_com_cnt_s cn52xxp1; struct cvmx_pow_iq_com_cnt_s cn56xx; struct cvmx_pow_iq_com_cnt_s cn56xxp1; struct cvmx_pow_iq_com_cnt_s cn58xx; struct cvmx_pow_iq_com_cnt_s cn58xxp1; } cvmx_pow_iq_com_cnt_t; /** * cvmx_pow_iq_int * * POW_IQ_INT = POW Input Queue Interrupt Register * * Contains the bits (1 per QOS level) that can trigger the input queue interrupt. An IQ_INT bit * will be set if POW_IQ_CNT#QOS# changes and the resulting value is equal to POW_IQ_THR#QOS#. */ typedef union { uint64_t u64; struct cvmx_pow_iq_int_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t iq_int : 8; /**< Input queue interrupt bits */ #else uint64_t iq_int : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_pow_iq_int_s cn52xx; struct cvmx_pow_iq_int_s cn52xxp1; struct cvmx_pow_iq_int_s cn56xx; struct cvmx_pow_iq_int_s cn56xxp1; } cvmx_pow_iq_int_t; /** * cvmx_pow_iq_int_en * * POW_IQ_INT_EN = POW Input Queue Interrupt Enable Register * * Contains the bits (1 per QOS level) that enable the input queue interrupt. */ typedef union { uint64_t u64; struct cvmx_pow_iq_int_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t int_en : 8; /**< Input queue interrupt enable bits */ #else uint64_t int_en : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_pow_iq_int_en_s cn52xx; struct cvmx_pow_iq_int_en_s cn52xxp1; struct cvmx_pow_iq_int_en_s cn56xx; struct cvmx_pow_iq_int_en_s cn56xxp1; } cvmx_pow_iq_int_en_t; /** * cvmx_pow_iq_thr# * * POW_IQ_THRX = POW Input Queue Threshold Register (1 per QOS level) * * Threshold value for triggering input queue interrupts. */ typedef union { uint64_t u64; struct cvmx_pow_iq_thrx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t iq_thr : 32; /**< Input queue threshold for QOS level X */ #else uint64_t iq_thr : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pow_iq_thrx_s cn52xx; struct cvmx_pow_iq_thrx_s cn52xxp1; struct cvmx_pow_iq_thrx_s cn56xx; struct cvmx_pow_iq_thrx_s cn56xxp1; } cvmx_pow_iq_thrx_t; /** * cvmx_pow_nos_cnt * * POW_NOS_CNT = POW No-schedule Count Register * * Contains the number of work queue entries on the no-schedule list. */ typedef union { uint64_t u64; struct cvmx_pow_nos_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t nos_cnt : 12; /**< # of work queue entries on the no-schedule list */ #else uint64_t nos_cnt : 12; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_pow_nos_cnt_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t nos_cnt : 7; /**< # of work queue entries on the no-schedule list */ #else uint64_t nos_cnt : 7; uint64_t reserved_7_63 : 57; #endif } cn30xx; struct cvmx_pow_nos_cnt_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t nos_cnt : 9; /**< # of work queue entries on the no-schedule list */ #else uint64_t nos_cnt : 9; uint64_t reserved_9_63 : 55; #endif } cn31xx; struct cvmx_pow_nos_cnt_s cn38xx; struct cvmx_pow_nos_cnt_s cn38xxp2; struct cvmx_pow_nos_cnt_cn31xx cn50xx; struct cvmx_pow_nos_cnt_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t nos_cnt : 10; /**< # of work queue entries on the no-schedule list */ #else uint64_t nos_cnt : 10; uint64_t reserved_10_63 : 54; #endif } cn52xx; struct cvmx_pow_nos_cnt_cn52xx cn52xxp1; struct cvmx_pow_nos_cnt_s cn56xx; struct cvmx_pow_nos_cnt_s cn56xxp1; struct cvmx_pow_nos_cnt_s cn58xx; struct cvmx_pow_nos_cnt_s cn58xxp1; } cvmx_pow_nos_cnt_t; /** * cvmx_pow_nw_tim * * POW_NW_TIM = POW New Work Timer Period Register * * Sets the minimum period for a new work request timeout. Period is specified in n-1 notation * where the increment value is 1024 clock cycles. Thus, a value of 0x0 in this register translates * to 1024 cycles, 0x1 translates to 2048 cycles, 0x2 translates to 3072 cycles, etc... Note: the * maximum period for a new work request timeout is 2 times the minimum period. Note: the new work * request timeout counter is reset when this register is written. */ typedef union { uint64_t u64; struct cvmx_pow_nw_tim_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_10_63 : 54; uint64_t nw_tim : 10; /**< New work timer period */ #else uint64_t nw_tim : 10; uint64_t reserved_10_63 : 54; #endif } s; struct cvmx_pow_nw_tim_s cn30xx; struct cvmx_pow_nw_tim_s cn31xx; struct cvmx_pow_nw_tim_s cn38xx; struct cvmx_pow_nw_tim_s cn38xxp2; struct cvmx_pow_nw_tim_s cn50xx; struct cvmx_pow_nw_tim_s cn52xx; struct cvmx_pow_nw_tim_s cn52xxp1; struct cvmx_pow_nw_tim_s cn56xx; struct cvmx_pow_nw_tim_s cn56xxp1; struct cvmx_pow_nw_tim_s cn58xx; struct cvmx_pow_nw_tim_s cn58xxp1; } cvmx_pow_nw_tim_t; /** * cvmx_pow_pf_rst_msk * * POW_PF_RST_MSK = POW Prefetch Reset Mask * * Resets the work prefetch engine when work is stored in an internal buffer (either when the add * work arrives or when the work is reloaded from an external buffer) for an enabled QOS level * (1 bit per QOS level). */ typedef union { uint64_t u64; struct cvmx_pow_pf_rst_msk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t rst_msk : 8; /**< Prefetch engine reset mask */ #else uint64_t rst_msk : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_pow_pf_rst_msk_s cn50xx; struct cvmx_pow_pf_rst_msk_s cn52xx; struct cvmx_pow_pf_rst_msk_s cn52xxp1; struct cvmx_pow_pf_rst_msk_s cn56xx; struct cvmx_pow_pf_rst_msk_s cn56xxp1; struct cvmx_pow_pf_rst_msk_s cn58xx; struct cvmx_pow_pf_rst_msk_s cn58xxp1; } cvmx_pow_pf_rst_msk_t; /** * cvmx_pow_pp_grp_msk# * * POW_PP_GRP_MSKX = POW PP Group Mask Register (1 per PP) * * Selects which group(s) a PP belongs to. A '1' in any bit position sets the PP's membership in * the corresponding group. A value of 0x0 will prevent the PP from receiving new work. Note: * disabled or non-existent PP's should have this field set to 0xffff (the reset value) in order to * maximize POW performance. * * Also contains the QOS level priorities for each PP. 0x0 is highest priority, and 0x7 the lowest. * Setting the priority to 0xf will prevent that PP from receiving work from that QOS level. * Priority values 0x8 through 0xe are reserved and should not be used. For a given PP, priorities * should begin at 0x0 and remain contiguous throughout the range. */ typedef union { uint64_t u64; struct cvmx_pow_pp_grp_mskx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t qos7_pri : 4; /**< PPX priority for QOS level 7 */ uint64_t qos6_pri : 4; /**< PPX priority for QOS level 6 */ uint64_t qos5_pri : 4; /**< PPX priority for QOS level 5 */ uint64_t qos4_pri : 4; /**< PPX priority for QOS level 4 */ uint64_t qos3_pri : 4; /**< PPX priority for QOS level 3 */ uint64_t qos2_pri : 4; /**< PPX priority for QOS level 2 */ uint64_t qos1_pri : 4; /**< PPX priority for QOS level 1 */ uint64_t qos0_pri : 4; /**< PPX priority for QOS level 0 */ uint64_t grp_msk : 16; /**< PPX group mask */ #else uint64_t grp_msk : 16; uint64_t qos0_pri : 4; uint64_t qos1_pri : 4; uint64_t qos2_pri : 4; uint64_t qos3_pri : 4; uint64_t qos4_pri : 4; uint64_t qos5_pri : 4; uint64_t qos6_pri : 4; uint64_t qos7_pri : 4; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_pow_pp_grp_mskx_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t grp_msk : 16; /**< PPX group mask */ #else uint64_t grp_msk : 16; uint64_t reserved_16_63 : 48; #endif } cn30xx; struct cvmx_pow_pp_grp_mskx_cn30xx cn31xx; struct cvmx_pow_pp_grp_mskx_cn30xx cn38xx; struct cvmx_pow_pp_grp_mskx_cn30xx cn38xxp2; struct cvmx_pow_pp_grp_mskx_s cn50xx; struct cvmx_pow_pp_grp_mskx_s cn52xx; struct cvmx_pow_pp_grp_mskx_s cn52xxp1; struct cvmx_pow_pp_grp_mskx_s cn56xx; struct cvmx_pow_pp_grp_mskx_s cn56xxp1; struct cvmx_pow_pp_grp_mskx_s cn58xx; struct cvmx_pow_pp_grp_mskx_s cn58xxp1; } cvmx_pow_pp_grp_mskx_t; /** * cvmx_pow_qos_rnd# * * POW_QOS_RNDX = POW QOS Issue Round Register (4 rounds per register x 8 registers = 32 rounds) * * Contains the round definitions for issuing new work. Each round consists of 8 bits with each bit * corresponding to a QOS level. There are 4 rounds contained in each register for a total of 32 * rounds. The issue logic traverses through the rounds sequentially (lowest round to highest round) * in an attempt to find new work for each PP. Within each round, the issue logic traverses through * the QOS levels sequentially (highest QOS to lowest QOS) skipping over each QOS level with a clear * bit in the round mask. Note: setting a QOS level to all zeroes in all issue round registers will * prevent work from being issued from that QOS level. */ typedef union { uint64_t u64; struct cvmx_pow_qos_rndx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t rnd_p3 : 8; /**< Round mask for round Xx4+3 */ uint64_t rnd_p2 : 8; /**< Round mask for round Xx4+2 */ uint64_t rnd_p1 : 8; /**< Round mask for round Xx4+1 */ uint64_t rnd : 8; /**< Round mask for round Xx4 */ #else uint64_t rnd : 8; uint64_t rnd_p1 : 8; uint64_t rnd_p2 : 8; uint64_t rnd_p3 : 8; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pow_qos_rndx_s cn30xx; struct cvmx_pow_qos_rndx_s cn31xx; struct cvmx_pow_qos_rndx_s cn38xx; struct cvmx_pow_qos_rndx_s cn38xxp2; struct cvmx_pow_qos_rndx_s cn50xx; struct cvmx_pow_qos_rndx_s cn52xx; struct cvmx_pow_qos_rndx_s cn52xxp1; struct cvmx_pow_qos_rndx_s cn56xx; struct cvmx_pow_qos_rndx_s cn56xxp1; struct cvmx_pow_qos_rndx_s cn58xx; struct cvmx_pow_qos_rndx_s cn58xxp1; } cvmx_pow_qos_rndx_t; /** * cvmx_pow_qos_thr# * * POW_QOS_THRX = POW QOS Threshold Register (1 per QOS level) * * Contains the thresholds for allocating POW internal storage buffers. If the number of remaining * free buffers drops below the minimum threshold (MIN_THR) or the number of allocated buffers for * this QOS level rises above the maximum threshold (MAX_THR), future incoming work queue entries * will be buffered externally rather than internally. This register also contains a read-only count * of the current number of free buffers (FREE_CNT), the number of internal buffers currently * allocated to this QOS level (BUF_CNT), and the total number of buffers on the de-schedule list * (DES_CNT) (which is not the same as the total number of de-scheduled buffers). */ typedef union { uint64_t u64; struct cvmx_pow_qos_thrx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_60_63 : 4; uint64_t des_cnt : 12; /**< # of buffers on de-schedule list */ uint64_t buf_cnt : 12; /**< # of internal buffers allocated to QOS level X */ uint64_t free_cnt : 12; /**< # of total free buffers */ uint64_t reserved_23_23 : 1; uint64_t max_thr : 11; /**< Max threshold for QOS level X */ uint64_t reserved_11_11 : 1; uint64_t min_thr : 11; /**< Min threshold for QOS level X */ #else uint64_t min_thr : 11; uint64_t reserved_11_11 : 1; uint64_t max_thr : 11; uint64_t reserved_23_23 : 1; uint64_t free_cnt : 12; uint64_t buf_cnt : 12; uint64_t des_cnt : 12; uint64_t reserved_60_63 : 4; #endif } s; struct cvmx_pow_qos_thrx_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_55_63 : 9; uint64_t des_cnt : 7; /**< # of buffers on de-schedule list */ uint64_t reserved_43_47 : 5; uint64_t buf_cnt : 7; /**< # of internal buffers allocated to QOS level X */ uint64_t reserved_31_35 : 5; uint64_t free_cnt : 7; /**< # of total free buffers */ uint64_t reserved_18_23 : 6; uint64_t max_thr : 6; /**< Max threshold for QOS level X */ uint64_t reserved_6_11 : 6; uint64_t min_thr : 6; /**< Min threshold for QOS level X */ #else uint64_t min_thr : 6; uint64_t reserved_6_11 : 6; uint64_t max_thr : 6; uint64_t reserved_18_23 : 6; uint64_t free_cnt : 7; uint64_t reserved_31_35 : 5; uint64_t buf_cnt : 7; uint64_t reserved_43_47 : 5; uint64_t des_cnt : 7; uint64_t reserved_55_63 : 9; #endif } cn30xx; struct cvmx_pow_qos_thrx_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_57_63 : 7; uint64_t des_cnt : 9; /**< # of buffers on de-schedule list */ uint64_t reserved_45_47 : 3; uint64_t buf_cnt : 9; /**< # of internal buffers allocated to QOS level X */ uint64_t reserved_33_35 : 3; uint64_t free_cnt : 9; /**< # of total free buffers */ uint64_t reserved_20_23 : 4; uint64_t max_thr : 8; /**< Max threshold for QOS level X */ uint64_t reserved_8_11 : 4; uint64_t min_thr : 8; /**< Min threshold for QOS level X */ #else uint64_t min_thr : 8; uint64_t reserved_8_11 : 4; uint64_t max_thr : 8; uint64_t reserved_20_23 : 4; uint64_t free_cnt : 9; uint64_t reserved_33_35 : 3; uint64_t buf_cnt : 9; uint64_t reserved_45_47 : 3; uint64_t des_cnt : 9; uint64_t reserved_57_63 : 7; #endif } cn31xx; struct cvmx_pow_qos_thrx_s cn38xx; struct cvmx_pow_qos_thrx_s cn38xxp2; struct cvmx_pow_qos_thrx_cn31xx cn50xx; struct cvmx_pow_qos_thrx_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_58_63 : 6; uint64_t des_cnt : 10; /**< # of buffers on de-schedule list */ uint64_t reserved_46_47 : 2; uint64_t buf_cnt : 10; /**< # of internal buffers allocated to QOS level X */ uint64_t reserved_34_35 : 2; uint64_t free_cnt : 10; /**< # of total free buffers */ uint64_t reserved_21_23 : 3; uint64_t max_thr : 9; /**< Max threshold for QOS level X */ uint64_t reserved_9_11 : 3; uint64_t min_thr : 9; /**< Min threshold for QOS level X */ #else uint64_t min_thr : 9; uint64_t reserved_9_11 : 3; uint64_t max_thr : 9; uint64_t reserved_21_23 : 3; uint64_t free_cnt : 10; uint64_t reserved_34_35 : 2; uint64_t buf_cnt : 10; uint64_t reserved_46_47 : 2; uint64_t des_cnt : 10; uint64_t reserved_58_63 : 6; #endif } cn52xx; struct cvmx_pow_qos_thrx_cn52xx cn52xxp1; struct cvmx_pow_qos_thrx_s cn56xx; struct cvmx_pow_qos_thrx_s cn56xxp1; struct cvmx_pow_qos_thrx_s cn58xx; struct cvmx_pow_qos_thrx_s cn58xxp1; } cvmx_pow_qos_thrx_t; /** * cvmx_pow_ts_pc * * POW_TS_PC = POW Tag Switch Performance Counter * * Counts the number of tag switch requests. Write to clear. */ typedef union { uint64_t u64; struct cvmx_pow_ts_pc_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ts_pc : 32; /**< Tag switch performance counter */ #else uint64_t ts_pc : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pow_ts_pc_s cn30xx; struct cvmx_pow_ts_pc_s cn31xx; struct cvmx_pow_ts_pc_s cn38xx; struct cvmx_pow_ts_pc_s cn38xxp2; struct cvmx_pow_ts_pc_s cn50xx; struct cvmx_pow_ts_pc_s cn52xx; struct cvmx_pow_ts_pc_s cn52xxp1; struct cvmx_pow_ts_pc_s cn56xx; struct cvmx_pow_ts_pc_s cn56xxp1; struct cvmx_pow_ts_pc_s cn58xx; struct cvmx_pow_ts_pc_s cn58xxp1; } cvmx_pow_ts_pc_t; /** * cvmx_pow_wa_com_pc * * POW_WA_COM_PC = POW Work Add Combined Performance Counter * * Counts the number of add new work requests for all QOS levels. Write to clear. */ typedef union { uint64_t u64; struct cvmx_pow_wa_com_pc_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t wa_pc : 32; /**< Work add combined performance counter */ #else uint64_t wa_pc : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pow_wa_com_pc_s cn30xx; struct cvmx_pow_wa_com_pc_s cn31xx; struct cvmx_pow_wa_com_pc_s cn38xx; struct cvmx_pow_wa_com_pc_s cn38xxp2; struct cvmx_pow_wa_com_pc_s cn50xx; struct cvmx_pow_wa_com_pc_s cn52xx; struct cvmx_pow_wa_com_pc_s cn52xxp1; struct cvmx_pow_wa_com_pc_s cn56xx; struct cvmx_pow_wa_com_pc_s cn56xxp1; struct cvmx_pow_wa_com_pc_s cn58xx; struct cvmx_pow_wa_com_pc_s cn58xxp1; } cvmx_pow_wa_com_pc_t; /** * cvmx_pow_wa_pc# * * POW_WA_PCX = POW Work Add Performance Counter (1 per QOS level) * * Counts the number of add new work requests for each QOS level. Write to clear. */ typedef union { uint64_t u64; struct cvmx_pow_wa_pcx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t wa_pc : 32; /**< Work add performance counter for QOS level X */ #else uint64_t wa_pc : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pow_wa_pcx_s cn30xx; struct cvmx_pow_wa_pcx_s cn31xx; struct cvmx_pow_wa_pcx_s cn38xx; struct cvmx_pow_wa_pcx_s cn38xxp2; struct cvmx_pow_wa_pcx_s cn50xx; struct cvmx_pow_wa_pcx_s cn52xx; struct cvmx_pow_wa_pcx_s cn52xxp1; struct cvmx_pow_wa_pcx_s cn56xx; struct cvmx_pow_wa_pcx_s cn56xxp1; struct cvmx_pow_wa_pcx_s cn58xx; struct cvmx_pow_wa_pcx_s cn58xxp1; } cvmx_pow_wa_pcx_t; /** * cvmx_pow_wq_int * * POW_WQ_INT = POW Work Queue Interrupt Register * * Contains the bits (1 per group) that set work queue interrupts and are used to clear these * interrupts. Also contains the input queue interrupt temporary disable bits (1 per group). For * more information regarding this register, see the interrupt section. */ typedef union { uint64_t u64; struct cvmx_pow_wq_int_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t iq_dis : 16; /**< Input queue interrupt temporary disable mask Corresponding WQ_INT<*> bit cannot be set due to IQ_CNT/IQ_THR check when this bit is set. Corresponding IQ_DIS bit is cleared by HW whenever: - POW_WQ_INT_CNT*[IQ_CNT] is zero, or - POW_WQ_INT_CNT*[TC_CNT]==1 when periodic counter POW_WQ_INT_PC[PC]==0 */ uint64_t wq_int : 16; /**< Work queue interrupt bits Corresponding WQ_INT bit is set by HW whenever: - POW_WQ_INT_CNT*[IQ_CNT] >= POW_WQ_INT_THR*[IQ_THR] and the threshold interrupt is not disabled. IQ_DIS<*>==1 disables the interrupt. POW_WQ_INT_THR*[IQ_THR]==0 disables the int. - POW_WQ_INT_CNT*[DS_CNT] >= POW_WQ_INT_THR*[DS_THR] and the threshold interrupt is not disabled POW_WQ_INT_THR*[DS_THR]==0 disables the int. - POW_WQ_INT_CNT*[TC_CNT]==1 when periodic counter POW_WQ_INT_PC[PC]==0 and POW_WQ_INT_THR*[TC_EN]==1 and at least one of: - POW_WQ_INT_CNT*[IQ_CNT] > 0 - POW_WQ_INT_CNT*[DS_CNT] > 0 */ #else uint64_t wq_int : 16; uint64_t iq_dis : 16; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pow_wq_int_s cn30xx; struct cvmx_pow_wq_int_s cn31xx; struct cvmx_pow_wq_int_s cn38xx; struct cvmx_pow_wq_int_s cn38xxp2; struct cvmx_pow_wq_int_s cn50xx; struct cvmx_pow_wq_int_s cn52xx; struct cvmx_pow_wq_int_s cn52xxp1; struct cvmx_pow_wq_int_s cn56xx; struct cvmx_pow_wq_int_s cn56xxp1; struct cvmx_pow_wq_int_s cn58xx; struct cvmx_pow_wq_int_s cn58xxp1; } cvmx_pow_wq_int_t; /** * cvmx_pow_wq_int_cnt# * * POW_WQ_INT_CNTX = POW Work Queue Interrupt Count Register (1 per group) * * Contains a read-only copy of the counts used to trigger work queue interrupts. For more * information regarding this register, see the interrupt section. */ typedef union { uint64_t u64; struct cvmx_pow_wq_int_cntx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t tc_cnt : 4; /**< Time counter current value for group X HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever: - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and corresponding POW_WQ_INT_CNT*[DS_CNT]==0 - corresponding POW_WQ_INT[WQ_INT<*>] is written with a 1 by SW - corresponding POW_WQ_INT[IQ_DIS<*>] is written with a 1 by SW - corresponding POW_WQ_INT_THR* is written by SW - TC_CNT==1 and periodic counter POW_WQ_INT_PC[PC]==0 Otherwise, HW decrements TC_CNT whenever the periodic counter POW_WQ_INT_PC[PC]==0. TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */ uint64_t ds_cnt : 12; /**< De-schedule executable count for group X */ uint64_t iq_cnt : 12; /**< Input queue executable count for group X */ #else uint64_t iq_cnt : 12; uint64_t ds_cnt : 12; uint64_t tc_cnt : 4; uint64_t reserved_28_63 : 36; #endif } s; struct cvmx_pow_wq_int_cntx_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t tc_cnt : 4; /**< Time counter current value for group X HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever: - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and corresponding POW_WQ_INT_CNT*[DS_CNT]==0 - corresponding POW_WQ_INT[WQ_INT<*>] is written with a 1 by SW - corresponding POW_WQ_INT[IQ_DIS<*>] is written with a 1 by SW - corresponding POW_WQ_INT_THR* is written by SW - TC_CNT==1 and periodic counter POW_WQ_INT_PC[PC]==0 Otherwise, HW decrements TC_CNT whenever the periodic counter POW_WQ_INT_PC[PC]==0. TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */ uint64_t reserved_19_23 : 5; uint64_t ds_cnt : 7; /**< De-schedule executable count for group X */ uint64_t reserved_7_11 : 5; uint64_t iq_cnt : 7; /**< Input queue executable count for group X */ #else uint64_t iq_cnt : 7; uint64_t reserved_7_11 : 5; uint64_t ds_cnt : 7; uint64_t reserved_19_23 : 5; uint64_t tc_cnt : 4; uint64_t reserved_28_63 : 36; #endif } cn30xx; struct cvmx_pow_wq_int_cntx_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t tc_cnt : 4; /**< Time counter current value for group X HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever: - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and corresponding POW_WQ_INT_CNT*[DS_CNT]==0 - corresponding POW_WQ_INT[WQ_INT<*>] is written with a 1 by SW - corresponding POW_WQ_INT[IQ_DIS<*>] is written with a 1 by SW - corresponding POW_WQ_INT_THR* is written by SW - TC_CNT==1 and periodic counter POW_WQ_INT_PC[PC]==0 Otherwise, HW decrements TC_CNT whenever the periodic counter POW_WQ_INT_PC[PC]==0. TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */ uint64_t reserved_21_23 : 3; uint64_t ds_cnt : 9; /**< De-schedule executable count for group X */ uint64_t reserved_9_11 : 3; uint64_t iq_cnt : 9; /**< Input queue executable count for group X */ #else uint64_t iq_cnt : 9; uint64_t reserved_9_11 : 3; uint64_t ds_cnt : 9; uint64_t reserved_21_23 : 3; uint64_t tc_cnt : 4; uint64_t reserved_28_63 : 36; #endif } cn31xx; struct cvmx_pow_wq_int_cntx_s cn38xx; struct cvmx_pow_wq_int_cntx_s cn38xxp2; struct cvmx_pow_wq_int_cntx_cn31xx cn50xx; struct cvmx_pow_wq_int_cntx_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_28_63 : 36; uint64_t tc_cnt : 4; /**< Time counter current value for group X HW sets TC_CNT to POW_WQ_INT_THR*[TC_THR] whenever: - corresponding POW_WQ_INT_CNT*[IQ_CNT]==0 and corresponding POW_WQ_INT_CNT*[DS_CNT]==0 - corresponding POW_WQ_INT[WQ_INT<*>] is written with a 1 by SW - corresponding POW_WQ_INT[IQ_DIS<*>] is written with a 1 by SW - corresponding POW_WQ_INT_THR* is written by SW - TC_CNT==1 and periodic counter POW_WQ_INT_PC[PC]==0 Otherwise, HW decrements TC_CNT whenever the periodic counter POW_WQ_INT_PC[PC]==0. TC_CNT is 0 whenever POW_WQ_INT_THR*[TC_THR]==0. */ uint64_t reserved_22_23 : 2; uint64_t ds_cnt : 10; /**< De-schedule executable count for group X */ uint64_t reserved_10_11 : 2; uint64_t iq_cnt : 10; /**< Input queue executable count for group X */ #else uint64_t iq_cnt : 10; uint64_t reserved_10_11 : 2; uint64_t ds_cnt : 10; uint64_t reserved_22_23 : 2; uint64_t tc_cnt : 4; uint64_t reserved_28_63 : 36; #endif } cn52xx; struct cvmx_pow_wq_int_cntx_cn52xx cn52xxp1; struct cvmx_pow_wq_int_cntx_s cn56xx; struct cvmx_pow_wq_int_cntx_s cn56xxp1; struct cvmx_pow_wq_int_cntx_s cn58xx; struct cvmx_pow_wq_int_cntx_s cn58xxp1; } cvmx_pow_wq_int_cntx_t; /** * cvmx_pow_wq_int_pc * * POW_WQ_INT_PC = POW Work Queue Interrupt Periodic Counter Register * * Contains the threshold value for the work queue interrupt periodic counter and also a read-only * copy of the periodic counter. For more information regarding this register, see the interrupt * section. */ typedef union { uint64_t u64; struct cvmx_pow_wq_int_pc_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_60_63 : 4; uint64_t pc : 28; /**< Work queue interrupt periodic counter */ uint64_t reserved_28_31 : 4; uint64_t pc_thr : 20; /**< Work queue interrupt periodic counter threshold */ uint64_t reserved_0_7 : 8; #else uint64_t reserved_0_7 : 8; uint64_t pc_thr : 20; uint64_t reserved_28_31 : 4; uint64_t pc : 28; uint64_t reserved_60_63 : 4; #endif } s; struct cvmx_pow_wq_int_pc_s cn30xx; struct cvmx_pow_wq_int_pc_s cn31xx; struct cvmx_pow_wq_int_pc_s cn38xx; struct cvmx_pow_wq_int_pc_s cn38xxp2; struct cvmx_pow_wq_int_pc_s cn50xx; struct cvmx_pow_wq_int_pc_s cn52xx; struct cvmx_pow_wq_int_pc_s cn52xxp1; struct cvmx_pow_wq_int_pc_s cn56xx; struct cvmx_pow_wq_int_pc_s cn56xxp1; struct cvmx_pow_wq_int_pc_s cn58xx; struct cvmx_pow_wq_int_pc_s cn58xxp1; } cvmx_pow_wq_int_pc_t; /** * cvmx_pow_wq_int_thr# * * POW_WQ_INT_THRX = POW Work Queue Interrupt Threshold Register (1 per group) * * Contains the thresholds for enabling and setting work queue interrupts. For more information * regarding this register, see the interrupt section. * * Note: Up to 4 of the POW's internal storage buffers can be allocated for hardware use and are * therefore not available for incoming work queue entries. Additionally, any PP that is not in the * NULL_NULL state consumes a buffer. Thus in a 4 PP system, it is not advisable to set either * IQ_THR or DS_THR to greater than 512 - 4 - 4 = 504. Doing so may prevent the interrupt from * ever triggering. */ typedef union { uint64_t u64; struct cvmx_pow_wq_int_thrx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t tc_en : 1; /**< Time counter interrupt enable for group X TC_EN must be zero when TC_THR==0 */ uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */ uint64_t reserved_23_23 : 1; uint64_t ds_thr : 11; /**< De-schedule count threshold for group X DS_THR==0 disables the threshold interrupt */ uint64_t reserved_11_11 : 1; uint64_t iq_thr : 11; /**< Input queue count threshold for group X IQ_THR==0 disables the threshold interrupt */ #else uint64_t iq_thr : 11; uint64_t reserved_11_11 : 1; uint64_t ds_thr : 11; uint64_t reserved_23_23 : 1; uint64_t tc_thr : 4; uint64_t tc_en : 1; uint64_t reserved_29_63 : 35; #endif } s; struct cvmx_pow_wq_int_thrx_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t tc_en : 1; /**< Time counter interrupt enable for group X TC_EN must be zero when TC_THR==0 */ uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */ uint64_t reserved_18_23 : 6; uint64_t ds_thr : 6; /**< De-schedule count threshold for group X DS_THR==0 disables the threshold interrupt */ uint64_t reserved_6_11 : 6; uint64_t iq_thr : 6; /**< Input queue count threshold for group X IQ_THR==0 disables the threshold interrupt */ #else uint64_t iq_thr : 6; uint64_t reserved_6_11 : 6; uint64_t ds_thr : 6; uint64_t reserved_18_23 : 6; uint64_t tc_thr : 4; uint64_t tc_en : 1; uint64_t reserved_29_63 : 35; #endif } cn30xx; struct cvmx_pow_wq_int_thrx_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t tc_en : 1; /**< Time counter interrupt enable for group X TC_EN must be zero when TC_THR==0 */ uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */ uint64_t reserved_20_23 : 4; uint64_t ds_thr : 8; /**< De-schedule count threshold for group X DS_THR==0 disables the threshold interrupt */ uint64_t reserved_8_11 : 4; uint64_t iq_thr : 8; /**< Input queue count threshold for group X IQ_THR==0 disables the threshold interrupt */ #else uint64_t iq_thr : 8; uint64_t reserved_8_11 : 4; uint64_t ds_thr : 8; uint64_t reserved_20_23 : 4; uint64_t tc_thr : 4; uint64_t tc_en : 1; uint64_t reserved_29_63 : 35; #endif } cn31xx; struct cvmx_pow_wq_int_thrx_s cn38xx; struct cvmx_pow_wq_int_thrx_s cn38xxp2; struct cvmx_pow_wq_int_thrx_cn31xx cn50xx; struct cvmx_pow_wq_int_thrx_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_29_63 : 35; uint64_t tc_en : 1; /**< Time counter interrupt enable for group X TC_EN must be zero when TC_THR==0 */ uint64_t tc_thr : 4; /**< Time counter interrupt threshold for group X When TC_THR==0, POW_WQ_INT_CNT*[TC_CNT] is zero */ uint64_t reserved_21_23 : 3; uint64_t ds_thr : 9; /**< De-schedule count threshold for group X DS_THR==0 disables the threshold interrupt */ uint64_t reserved_9_11 : 3; uint64_t iq_thr : 9; /**< Input queue count threshold for group X IQ_THR==0 disables the threshold interrupt */ #else uint64_t iq_thr : 9; uint64_t reserved_9_11 : 3; uint64_t ds_thr : 9; uint64_t reserved_21_23 : 3; uint64_t tc_thr : 4; uint64_t tc_en : 1; uint64_t reserved_29_63 : 35; #endif } cn52xx; struct cvmx_pow_wq_int_thrx_cn52xx cn52xxp1; struct cvmx_pow_wq_int_thrx_s cn56xx; struct cvmx_pow_wq_int_thrx_s cn56xxp1; struct cvmx_pow_wq_int_thrx_s cn58xx; struct cvmx_pow_wq_int_thrx_s cn58xxp1; } cvmx_pow_wq_int_thrx_t; /** * cvmx_pow_ws_pc# * * POW_WS_PCX = POW Work Schedule Performance Counter (1 per group) * * Counts the number of work schedules for each group. Write to clear. */ typedef union { uint64_t u64; struct cvmx_pow_ws_pcx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t ws_pc : 32; /**< Work schedule performance counter for group X */ #else uint64_t ws_pc : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_pow_ws_pcx_s cn30xx; struct cvmx_pow_ws_pcx_s cn31xx; struct cvmx_pow_ws_pcx_s cn38xx; struct cvmx_pow_ws_pcx_s cn38xxp2; struct cvmx_pow_ws_pcx_s cn50xx; struct cvmx_pow_ws_pcx_s cn52xx; struct cvmx_pow_ws_pcx_s cn52xxp1; struct cvmx_pow_ws_pcx_s cn56xx; struct cvmx_pow_ws_pcx_s cn56xxp1; struct cvmx_pow_ws_pcx_s cn58xx; struct cvmx_pow_ws_pcx_s cn58xxp1; } cvmx_pow_ws_pcx_t; /** * cvmx_rad_mem_debug0 * * Notes: * This CSR is a memory of 32 entries, and thus, the RAD_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. A read of any entry that has not been * previously written is illegal and will result in unpredictable CSR read data. */ typedef union { uint64_t u64; struct cvmx_rad_mem_debug0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t iword : 64; /**< IWord */ #else uint64_t iword : 64; #endif } s; struct cvmx_rad_mem_debug0_s cn52xx; struct cvmx_rad_mem_debug0_s cn52xxp1; struct cvmx_rad_mem_debug0_s cn56xx; struct cvmx_rad_mem_debug0_s cn56xxp1; } cvmx_rad_mem_debug0_t; /** * cvmx_rad_mem_debug1 * * Notes: * This CSR is a memory of 256 entries, and thus, the RAD_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. A read of any entry that has not been * previously written is illegal and will result in unpredictable CSR read data. */ typedef union { uint64_t u64; struct cvmx_rad_mem_debug1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t p_dat : 64; /**< P data */ #else uint64_t p_dat : 64; #endif } s; struct cvmx_rad_mem_debug1_s cn52xx; struct cvmx_rad_mem_debug1_s cn52xxp1; struct cvmx_rad_mem_debug1_s cn56xx; struct cvmx_rad_mem_debug1_s cn56xxp1; } cvmx_rad_mem_debug1_t; /** * cvmx_rad_mem_debug2 * * Notes: * This CSR is a memory of 256 entries, and thus, the RAD_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. A read of any entry that has not been * previously written is illegal and will result in unpredictable CSR read data. */ typedef union { uint64_t u64; struct cvmx_rad_mem_debug2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t q_dat : 64; /**< Q data */ #else uint64_t q_dat : 64; #endif } s; struct cvmx_rad_mem_debug2_s cn52xx; struct cvmx_rad_mem_debug2_s cn52xxp1; struct cvmx_rad_mem_debug2_s cn56xx; struct cvmx_rad_mem_debug2_s cn56xxp1; } cvmx_rad_mem_debug2_t; /** * cvmx_rad_reg_bist_result * * Notes: * Access to the internal BiST results * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail). */ typedef union { uint64_t u64; struct cvmx_rad_reg_bist_result_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t sta : 1; /**< BiST result of the STA memories */ uint64_t ncb_oub : 1; /**< BiST result of the NCB_OUB memories */ uint64_t ncb_inb : 2; /**< BiST result of the NCB_INB memories */ uint64_t dat : 2; /**< BiST result of the DAT memories */ #else uint64_t dat : 2; uint64_t ncb_inb : 2; uint64_t ncb_oub : 1; uint64_t sta : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_rad_reg_bist_result_s cn52xx; struct cvmx_rad_reg_bist_result_s cn52xxp1; struct cvmx_rad_reg_bist_result_s cn56xx; struct cvmx_rad_reg_bist_result_s cn56xxp1; } cvmx_rad_reg_bist_result_t; /** * cvmx_rad_reg_cmd_buf * * Notes: * Sets the command buffer parameters * The size of the command buffer segments is measured in uint64s. The pool specifies 1 of 8 free * lists to be used when freeing command buffer segments. The PTR field is overwritten with the next * pointer each time that the command buffer segment is exhausted. */ typedef union { uint64_t u64; struct cvmx_rad_reg_cmd_buf_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_58_63 : 6; uint64_t dwb : 9; /**< Number of DontWriteBacks */ uint64_t pool : 3; /**< Free list used to free command buffer segments */ uint64_t size : 13; /**< Number of uint64s per command buffer segment */ uint64_t ptr : 33; /**< Initial command buffer pointer[39:7] (128B-aligned) */ #else uint64_t ptr : 33; uint64_t size : 13; uint64_t pool : 3; uint64_t dwb : 9; uint64_t reserved_58_63 : 6; #endif } s; struct cvmx_rad_reg_cmd_buf_s cn52xx; struct cvmx_rad_reg_cmd_buf_s cn52xxp1; struct cvmx_rad_reg_cmd_buf_s cn56xx; struct cvmx_rad_reg_cmd_buf_s cn56xxp1; } cvmx_rad_reg_cmd_buf_t; /** * cvmx_rad_reg_ctl * * Notes: * MAX_READ is a throttle to control NCB usage. Values >8 are illegal. * */ typedef union { uint64_t u64; struct cvmx_rad_reg_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t max_read : 4; /**< Maximum number of outstanding data read commands */ uint64_t store_le : 1; /**< Force STORE0 byte write address to little endian */ uint64_t reset : 1; /**< Reset oneshot pulse (lasts for 4 cycles) */ #else uint64_t reset : 1; uint64_t store_le : 1; uint64_t max_read : 4; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_rad_reg_ctl_s cn52xx; struct cvmx_rad_reg_ctl_s cn52xxp1; struct cvmx_rad_reg_ctl_s cn56xx; struct cvmx_rad_reg_ctl_s cn56xxp1; } cvmx_rad_reg_ctl_t; /** * cvmx_rad_reg_debug0 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_57_63 : 7; uint64_t loop : 25; /**< Loop offset */ uint64_t reserved_22_31 : 10; uint64_t iridx : 6; /**< IWords read index */ uint64_t reserved_14_15 : 2; uint64_t iwidx : 6; /**< IWords write index */ uint64_t owordqv : 1; /**< Valid for OWORDQ */ uint64_t owordpv : 1; /**< Valid for OWORDP */ uint64_t commit : 1; /**< Waiting for write commit */ uint64_t state : 5; /**< Main state */ #else uint64_t state : 5; uint64_t commit : 1; uint64_t owordpv : 1; uint64_t owordqv : 1; uint64_t iwidx : 6; uint64_t reserved_14_15 : 2; uint64_t iridx : 6; uint64_t reserved_22_31 : 10; uint64_t loop : 25; uint64_t reserved_57_63 : 7; #endif } s; struct cvmx_rad_reg_debug0_s cn52xx; struct cvmx_rad_reg_debug0_s cn52xxp1; struct cvmx_rad_reg_debug0_s cn56xx; struct cvmx_rad_reg_debug0_s cn56xxp1; } cvmx_rad_reg_debug0_t; /** * cvmx_rad_reg_debug1 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t cword : 64; /**< CWord */ #else uint64_t cword : 64; #endif } s; struct cvmx_rad_reg_debug1_s cn52xx; struct cvmx_rad_reg_debug1_s cn52xxp1; struct cvmx_rad_reg_debug1_s cn56xx; struct cvmx_rad_reg_debug1_s cn56xxp1; } cvmx_rad_reg_debug1_t; /** * cvmx_rad_reg_debug10 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug10_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t flags : 8; /**< OCTL flags */ uint64_t size : 16; /**< OCTL size (bytes) */ uint64_t ptr : 40; /**< OCTL pointer */ #else uint64_t ptr : 40; uint64_t size : 16; uint64_t flags : 8; #endif } s; struct cvmx_rad_reg_debug10_s cn52xx; struct cvmx_rad_reg_debug10_s cn52xxp1; struct cvmx_rad_reg_debug10_s cn56xx; struct cvmx_rad_reg_debug10_s cn56xxp1; } cvmx_rad_reg_debug10_t; /** * cvmx_rad_reg_debug11 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug11_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t q : 1; /**< OCTL q flag */ uint64_t p : 1; /**< OCTL p flag */ uint64_t wc : 1; /**< OCTL write commit flag */ uint64_t eod : 1; /**< OCTL eod flag */ uint64_t sod : 1; /**< OCTL sod flag */ uint64_t index : 8; /**< OCTL index */ #else uint64_t index : 8; uint64_t sod : 1; uint64_t eod : 1; uint64_t wc : 1; uint64_t p : 1; uint64_t q : 1; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_rad_reg_debug11_s cn52xx; struct cvmx_rad_reg_debug11_s cn52xxp1; struct cvmx_rad_reg_debug11_s cn56xx; struct cvmx_rad_reg_debug11_s cn56xxp1; } cvmx_rad_reg_debug11_t; /** * cvmx_rad_reg_debug12 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug12_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_15_63 : 49; uint64_t asserts : 15; /**< Various assertion checks */ #else uint64_t asserts : 15; uint64_t reserved_15_63 : 49; #endif } s; struct cvmx_rad_reg_debug12_s cn52xx; struct cvmx_rad_reg_debug12_s cn52xxp1; struct cvmx_rad_reg_debug12_s cn56xx; struct cvmx_rad_reg_debug12_s cn56xxp1; } cvmx_rad_reg_debug12_t; /** * cvmx_rad_reg_debug2 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t owordp : 64; /**< OWordP */ #else uint64_t owordp : 64; #endif } s; struct cvmx_rad_reg_debug2_s cn52xx; struct cvmx_rad_reg_debug2_s cn52xxp1; struct cvmx_rad_reg_debug2_s cn56xx; struct cvmx_rad_reg_debug2_s cn56xxp1; } cvmx_rad_reg_debug2_t; /** * cvmx_rad_reg_debug3 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t owordq : 64; /**< OWordQ */ #else uint64_t owordq : 64; #endif } s; struct cvmx_rad_reg_debug3_s cn52xx; struct cvmx_rad_reg_debug3_s cn52xxp1; struct cvmx_rad_reg_debug3_s cn56xx; struct cvmx_rad_reg_debug3_s cn56xxp1; } cvmx_rad_reg_debug3_t; /** * cvmx_rad_reg_debug4 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t rword : 64; /**< RWord */ #else uint64_t rword : 64; #endif } s; struct cvmx_rad_reg_debug4_s cn52xx; struct cvmx_rad_reg_debug4_s cn52xxp1; struct cvmx_rad_reg_debug4_s cn56xx; struct cvmx_rad_reg_debug4_s cn56xxp1; } cvmx_rad_reg_debug4_t; /** * cvmx_rad_reg_debug5 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug5_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_53_63 : 11; uint64_t niropc7 : 3; /**< NCBI ropc (stage7 grant) */ uint64_t nirque7 : 2; /**< NCBI rque (stage7 grant) */ uint64_t nirval7 : 5; /**< NCBI rval (stage7 grant) */ uint64_t niropc6 : 3; /**< NCBI ropc (stage6 arb) */ uint64_t nirque6 : 2; /**< NCBI rque (stage6 arb) */ uint64_t nirarb6 : 1; /**< NCBI rarb (stage6 arb) */ uint64_t nirval6 : 5; /**< NCBI rval (stage6 arb) */ uint64_t niridx1 : 4; /**< NCBI ridx1 */ uint64_t niwidx1 : 4; /**< NCBI widx1 */ uint64_t niridx0 : 4; /**< NCBI ridx0 */ uint64_t niwidx0 : 4; /**< NCBI widx0 */ uint64_t wccreds : 2; /**< WC credits */ uint64_t fpacreds : 2; /**< POW credits */ uint64_t reserved_10_11 : 2; uint64_t powcreds : 2; /**< POW credits */ uint64_t n1creds : 4; /**< NCBI1 credits */ uint64_t n0creds : 4; /**< NCBI0 credits */ #else uint64_t n0creds : 4; uint64_t n1creds : 4; uint64_t powcreds : 2; uint64_t reserved_10_11 : 2; uint64_t fpacreds : 2; uint64_t wccreds : 2; uint64_t niwidx0 : 4; uint64_t niridx0 : 4; uint64_t niwidx1 : 4; uint64_t niridx1 : 4; uint64_t nirval6 : 5; uint64_t nirarb6 : 1; uint64_t nirque6 : 2; uint64_t niropc6 : 3; uint64_t nirval7 : 5; uint64_t nirque7 : 2; uint64_t niropc7 : 3; uint64_t reserved_53_63 : 11; #endif } s; struct cvmx_rad_reg_debug5_s cn52xx; struct cvmx_rad_reg_debug5_s cn52xxp1; struct cvmx_rad_reg_debug5_s cn56xx; struct cvmx_rad_reg_debug5_s cn56xxp1; } cvmx_rad_reg_debug5_t; /** * cvmx_rad_reg_debug6 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug6_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t cnt : 8; /**< CCTL count[7:0] (bytes) */ uint64_t size : 16; /**< CCTL size (bytes) */ uint64_t ptr : 40; /**< CCTL pointer */ #else uint64_t ptr : 40; uint64_t size : 16; uint64_t cnt : 8; #endif } s; struct cvmx_rad_reg_debug6_s cn52xx; struct cvmx_rad_reg_debug6_s cn52xxp1; struct cvmx_rad_reg_debug6_s cn56xx; struct cvmx_rad_reg_debug6_s cn56xxp1; } cvmx_rad_reg_debug6_t; /** * cvmx_rad_reg_debug7 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug7_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_15_63 : 49; uint64_t cnt : 15; /**< CCTL count[22:8] (bytes) */ #else uint64_t cnt : 15; uint64_t reserved_15_63 : 49; #endif } s; struct cvmx_rad_reg_debug7_s cn52xx; struct cvmx_rad_reg_debug7_s cn52xxp1; struct cvmx_rad_reg_debug7_s cn56xx; struct cvmx_rad_reg_debug7_s cn56xxp1; } cvmx_rad_reg_debug7_t; /** * cvmx_rad_reg_debug8 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug8_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t flags : 8; /**< ICTL flags */ uint64_t size : 16; /**< ICTL size (bytes) */ uint64_t ptr : 40; /**< ICTL pointer */ #else uint64_t ptr : 40; uint64_t size : 16; uint64_t flags : 8; #endif } s; struct cvmx_rad_reg_debug8_s cn52xx; struct cvmx_rad_reg_debug8_s cn52xxp1; struct cvmx_rad_reg_debug8_s cn56xx; struct cvmx_rad_reg_debug8_s cn56xxp1; } cvmx_rad_reg_debug8_t; /** * cvmx_rad_reg_debug9 */ typedef union { uint64_t u64; struct cvmx_rad_reg_debug9_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t eod : 1; /**< ICTL eod flag */ uint64_t ini : 1; /**< ICTL init flag */ uint64_t q : 1; /**< ICTL q enable */ uint64_t p : 1; /**< ICTL p enable */ uint64_t mul : 8; /**< ICTL multiplier */ uint64_t index : 8; /**< ICTL index */ #else uint64_t index : 8; uint64_t mul : 8; uint64_t p : 1; uint64_t q : 1; uint64_t ini : 1; uint64_t eod : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_rad_reg_debug9_s cn52xx; struct cvmx_rad_reg_debug9_s cn52xxp1; struct cvmx_rad_reg_debug9_s cn56xx; struct cvmx_rad_reg_debug9_s cn56xxp1; } cvmx_rad_reg_debug9_t; /** * cvmx_rad_reg_error */ typedef union { uint64_t u64; struct cvmx_rad_reg_error_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t doorbell : 1; /**< A doorbell count has overflowed */ #else uint64_t doorbell : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_rad_reg_error_s cn52xx; struct cvmx_rad_reg_error_s cn52xxp1; struct cvmx_rad_reg_error_s cn56xx; struct cvmx_rad_reg_error_s cn56xxp1; } cvmx_rad_reg_error_t; /** * cvmx_rad_reg_int_mask * * Notes: * When a mask bit is set, the corresponding interrupt is enabled. * */ typedef union { uint64_t u64; struct cvmx_rad_reg_int_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t doorbell : 1; /**< Bit mask corresponding to RAD_REG_ERROR[0] above */ #else uint64_t doorbell : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_rad_reg_int_mask_s cn52xx; struct cvmx_rad_reg_int_mask_s cn52xxp1; struct cvmx_rad_reg_int_mask_s cn56xx; struct cvmx_rad_reg_int_mask_s cn56xxp1; } cvmx_rad_reg_int_mask_t; /** * cvmx_rad_reg_polynomial * * Notes: * The polynomial is x^8 + C7*x^7 + C6*x^6 + C5*x^5 + C4*x^4 + C3*x^3 + C2*x^2 + C1*x^1 + C0. * */ typedef union { uint64_t u64; struct cvmx_rad_reg_polynomial_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t coeffs : 8; /**< coefficients of GF(2^8) irreducible polynomial */ #else uint64_t coeffs : 8; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_rad_reg_polynomial_s cn52xx; struct cvmx_rad_reg_polynomial_s cn52xxp1; struct cvmx_rad_reg_polynomial_s cn56xx; struct cvmx_rad_reg_polynomial_s cn56xxp1; } cvmx_rad_reg_polynomial_t; /** * cvmx_rad_reg_read_idx * * Notes: * Provides the read index during a CSR read operation to any of the CSRs that are physically stored * as memories. The names of these CSRs begin with the prefix "RAD_MEM_". * IDX[15:0] is the read index. INC[15:0] is an increment that is added to IDX[15:0] after any CSR read. * The intended use is to initially write this CSR such that IDX=0 and INC=1. Then, the entire * contents of a CSR memory can be read with consecutive CSR read commands. */ typedef union { uint64_t u64; struct cvmx_rad_reg_read_idx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t inc : 16; /**< Increment to add to current index for next index */ uint64_t index : 16; /**< Index to use for next memory CSR read */ #else uint64_t index : 16; uint64_t inc : 16; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_rad_reg_read_idx_s cn52xx; struct cvmx_rad_reg_read_idx_s cn52xxp1; struct cvmx_rad_reg_read_idx_s cn56xx; struct cvmx_rad_reg_read_idx_s cn56xxp1; } cvmx_rad_reg_read_idx_t; /** * cvmx_rnm_bist_status * * RNM_BIST_STATUS = RNM's BIST Status Register * * The RNM's Memory Bist Status register. */ typedef union { uint64_t u64; struct cvmx_rnm_bist_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t rrc : 1; /**< Status of RRC block bist. */ uint64_t mem : 1; /**< Status of MEM block bist. */ #else uint64_t mem : 1; uint64_t rrc : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_rnm_bist_status_s cn30xx; struct cvmx_rnm_bist_status_s cn31xx; struct cvmx_rnm_bist_status_s cn38xx; struct cvmx_rnm_bist_status_s cn38xxp2; struct cvmx_rnm_bist_status_s cn50xx; struct cvmx_rnm_bist_status_s cn52xx; struct cvmx_rnm_bist_status_s cn52xxp1; struct cvmx_rnm_bist_status_s cn56xx; struct cvmx_rnm_bist_status_s cn56xxp1; struct cvmx_rnm_bist_status_s cn58xx; struct cvmx_rnm_bist_status_s cn58xxp1; } cvmx_rnm_bist_status_t; /** * cvmx_rnm_ctl_status * * RNM_CTL_STATUS = RNM's Control/Status Register * * The RNM's interrupt enable register. */ typedef union { uint64_t u64; struct cvmx_rnm_ctl_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t ent_sel : 4; /**< ? */ uint64_t exp_ent : 1; /**< Exported entropy enable for random number generator */ uint64_t rng_rst : 1; /**< Reset RNG as core reset. */ uint64_t rnm_rst : 1; /**< Reset the RNM as core reset except for register logic. */ uint64_t rng_en : 1; /**< Enable the output of the RNG. */ uint64_t ent_en : 1; /**< Entropy enable for random number generator. */ #else uint64_t ent_en : 1; uint64_t rng_en : 1; uint64_t rnm_rst : 1; uint64_t rng_rst : 1; uint64_t exp_ent : 1; uint64_t ent_sel : 4; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_rnm_ctl_status_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t rng_rst : 1; /**< Reset RNG as core reset. */ uint64_t rnm_rst : 1; /**< Reset the RNM as core reset except for register logic. */ uint64_t rng_en : 1; /**< Enable the output of the RNG. */ uint64_t ent_en : 1; /**< Entropy enable for random number generator. */ #else uint64_t ent_en : 1; uint64_t rng_en : 1; uint64_t rnm_rst : 1; uint64_t rng_rst : 1; uint64_t reserved_4_63 : 60; #endif } cn30xx; struct cvmx_rnm_ctl_status_cn30xx cn31xx; struct cvmx_rnm_ctl_status_cn30xx cn38xx; struct cvmx_rnm_ctl_status_cn30xx cn38xxp2; struct cvmx_rnm_ctl_status_s cn50xx; struct cvmx_rnm_ctl_status_s cn52xx; struct cvmx_rnm_ctl_status_s cn52xxp1; struct cvmx_rnm_ctl_status_s cn56xx; struct cvmx_rnm_ctl_status_s cn56xxp1; struct cvmx_rnm_ctl_status_s cn58xx; struct cvmx_rnm_ctl_status_s cn58xxp1; } cvmx_rnm_ctl_status_t; /** * cvmx_smi#_clk * * SMI_CLK = Clock Control Register * */ typedef union { uint64_t u64; struct cvmx_smix_clk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_25_63 : 39; uint64_t mode : 1; /**< IEEE operating mode 0=Clause 22 complient 1=Clause 45 complient */ uint64_t reserved_21_23 : 3; uint64_t sample_hi : 5; /**< When to sample read data (extended bits) */ uint64_t sample_mode : 1; /**< Read Data sampling mode According to the 802.3 spec, on reads, the STA transitions MDC and the PHY drives MDIO with some delay relative to that edge. This is edge1. The STA then samples MDIO on the next rising edge of MDC. This is edge2. Octeon can sample the read data relative to either edge. 0=[SAMPLE_HI,SAMPLE] specify the sample time relative to edge2 1=[SAMPLE_HI,SAMPLE] specify the sample time relative to edge1 */ uint64_t reserved_14_14 : 1; uint64_t clk_idle : 1; /**< Do not toggle MDC on idle cycles */ uint64_t preamble : 1; /**< Send PREAMBLE on SMI transacton */ uint64_t sample : 4; /**< When to sample read data (number of eclks after the rising edge of mdc) ( [SAMPLE_HI,SAMPLE] > 1 ) ( [SAMPLE_HI, SAMPLE] + 3 <= 2*PHASE ) */ uint64_t phase : 8; /**< MDC Clock Phase (number of eclks that make up an mdc phase) (PHASE > 2) */ #else uint64_t phase : 8; uint64_t sample : 4; uint64_t preamble : 1; uint64_t clk_idle : 1; uint64_t reserved_14_14 : 1; uint64_t sample_mode : 1; uint64_t sample_hi : 5; uint64_t reserved_21_23 : 3; uint64_t mode : 1; uint64_t reserved_25_63 : 39; #endif } s; struct cvmx_smix_clk_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_21_63 : 43; uint64_t sample_hi : 5; /**< When to sample read data (extended bits) */ uint64_t sample_mode : 1; /**< Read Data sampling mode According to the 802.3 spec, on reads, the STA transitions MDC and the PHY drives MDIO with some delay relative to that edge. This is edge1. The STA then samples MDIO on the next rising edge of MDC. This is edge2. Octeon can sample the read data relative to either edge. 0=[SAMPLE_HI,SAMPLE] specify the sample time relative to edge2 1=[SAMPLE_HI,SAMPLE] specify the sample time relative to edge1 */ uint64_t reserved_14_14 : 1; uint64_t clk_idle : 1; /**< Do not toggle MDC on idle cycles */ uint64_t preamble : 1; /**< Send PREAMBLE on SMI transacton */ uint64_t sample : 4; /**< When to sample read data (number of eclks after the rising edge of mdc) ( [SAMPLE_HI,SAMPLE] > 1 ) ( [SAMPLE_HI, SAMPLE] + 3 <= 2*PHASE ) */ uint64_t phase : 8; /**< MDC Clock Phase (number of eclks that make up an mdc phase) (PHASE > 2) */ #else uint64_t phase : 8; uint64_t sample : 4; uint64_t preamble : 1; uint64_t clk_idle : 1; uint64_t reserved_14_14 : 1; uint64_t sample_mode : 1; uint64_t sample_hi : 5; uint64_t reserved_21_63 : 43; #endif } cn30xx; struct cvmx_smix_clk_cn30xx cn31xx; struct cvmx_smix_clk_cn30xx cn38xx; struct cvmx_smix_clk_cn30xx cn38xxp2; struct cvmx_smix_clk_s cn50xx; struct cvmx_smix_clk_s cn52xx; struct cvmx_smix_clk_s cn52xxp1; struct cvmx_smix_clk_s cn56xx; struct cvmx_smix_clk_s cn56xxp1; struct cvmx_smix_clk_cn30xx cn58xx; struct cvmx_smix_clk_cn30xx cn58xxp1; } cvmx_smix_clk_t; /** * cvmx_smi#_cmd * * SMI_CMD = Force a Read/Write command to the PHY * * * Notes: * Writes to this register will create SMI xactions. Software will poll on (depending on the xaction type). * */ typedef union { uint64_t u64; struct cvmx_smix_cmd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t phy_op : 2; /**< PHY Opcode 0=write 1=read */ uint64_t reserved_13_15 : 3; uint64_t phy_adr : 5; /**< PHY Address */ uint64_t reserved_5_7 : 3; uint64_t reg_adr : 5; /**< PHY Register Offset */ #else uint64_t reg_adr : 5; uint64_t reserved_5_7 : 3; uint64_t phy_adr : 5; uint64_t reserved_13_15 : 3; uint64_t phy_op : 2; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_smix_cmd_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t phy_op : 1; /**< PHY Opcode 0=write 1=read */ uint64_t reserved_13_15 : 3; uint64_t phy_adr : 5; /**< PHY Address */ uint64_t reserved_5_7 : 3; uint64_t reg_adr : 5; /**< PHY Register Offset */ #else uint64_t reg_adr : 5; uint64_t reserved_5_7 : 3; uint64_t phy_adr : 5; uint64_t reserved_13_15 : 3; uint64_t phy_op : 1; uint64_t reserved_17_63 : 47; #endif } cn30xx; struct cvmx_smix_cmd_cn30xx cn31xx; struct cvmx_smix_cmd_cn30xx cn38xx; struct cvmx_smix_cmd_cn30xx cn38xxp2; struct cvmx_smix_cmd_s cn50xx; struct cvmx_smix_cmd_s cn52xx; struct cvmx_smix_cmd_s cn52xxp1; struct cvmx_smix_cmd_s cn56xx; struct cvmx_smix_cmd_s cn56xxp1; struct cvmx_smix_cmd_cn30xx cn58xx; struct cvmx_smix_cmd_cn30xx cn58xxp1; } cvmx_smix_cmd_t; /** * cvmx_smi#_en * * SMI_EN = Enable the SMI interface * */ typedef union { uint64_t u64; struct cvmx_smix_en_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t en : 1; /**< Interface enable 0=SMI Interface is down / no transactions, no MDC 1=SMI Interface is up */ #else uint64_t en : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_smix_en_s cn30xx; struct cvmx_smix_en_s cn31xx; struct cvmx_smix_en_s cn38xx; struct cvmx_smix_en_s cn38xxp2; struct cvmx_smix_en_s cn50xx; struct cvmx_smix_en_s cn52xx; struct cvmx_smix_en_s cn52xxp1; struct cvmx_smix_en_s cn56xx; struct cvmx_smix_en_s cn56xxp1; struct cvmx_smix_en_s cn58xx; struct cvmx_smix_en_s cn58xxp1; } cvmx_smix_en_t; /** * cvmx_smi#_rd_dat * * SMI_RD_DAT = SMI Read Data * * * Notes: * VAL will assert when the read xaction completes. A read to this register * will clear VAL. PENDING indicates that an SMI RD transaction is in flight. */ typedef union { uint64_t u64; struct cvmx_smix_rd_dat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t pending : 1; /**< Read Xaction Pending */ uint64_t val : 1; /**< Read Data Valid */ uint64_t dat : 16; /**< Read Data */ #else uint64_t dat : 16; uint64_t val : 1; uint64_t pending : 1; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_smix_rd_dat_s cn30xx; struct cvmx_smix_rd_dat_s cn31xx; struct cvmx_smix_rd_dat_s cn38xx; struct cvmx_smix_rd_dat_s cn38xxp2; struct cvmx_smix_rd_dat_s cn50xx; struct cvmx_smix_rd_dat_s cn52xx; struct cvmx_smix_rd_dat_s cn52xxp1; struct cvmx_smix_rd_dat_s cn56xx; struct cvmx_smix_rd_dat_s cn56xxp1; struct cvmx_smix_rd_dat_s cn58xx; struct cvmx_smix_rd_dat_s cn58xxp1; } cvmx_smix_rd_dat_t; /** * cvmx_smi#_wr_dat * * SMI_WR_DAT = SMI Write Data * * * Notes: * VAL will assert when the write xaction completes. A read to this register * will clear VAL. PENDING indicates that an SMI WR transaction is in flight. */ typedef union { uint64_t u64; struct cvmx_smix_wr_dat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t pending : 1; /**< Write Xaction Pending */ uint64_t val : 1; /**< Write Data Valid */ uint64_t dat : 16; /**< Write Data */ #else uint64_t dat : 16; uint64_t val : 1; uint64_t pending : 1; uint64_t reserved_18_63 : 46; #endif } s; struct cvmx_smix_wr_dat_s cn30xx; struct cvmx_smix_wr_dat_s cn31xx; struct cvmx_smix_wr_dat_s cn38xx; struct cvmx_smix_wr_dat_s cn38xxp2; struct cvmx_smix_wr_dat_s cn50xx; struct cvmx_smix_wr_dat_s cn52xx; struct cvmx_smix_wr_dat_s cn52xxp1; struct cvmx_smix_wr_dat_s cn56xx; struct cvmx_smix_wr_dat_s cn56xxp1; struct cvmx_smix_wr_dat_s cn58xx; struct cvmx_smix_wr_dat_s cn58xxp1; } cvmx_smix_wr_dat_t; /** * cvmx_spx#_bckprs_cnt * * Notes: * The back pressure watcher counts the number of cycles in which the spi * receiver receives data once the TPA for a particular port has been * deasserted. The desired port to watch can be selected with the * SPX_TPA_SEL[PRTSEL] field. * * This register can be cleared by simply writting all 1's to it. */ typedef union { uint64_t u64; struct cvmx_spxx_bckprs_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Number of cycles when back-pressure is received */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_spxx_bckprs_cnt_s cn38xx; struct cvmx_spxx_bckprs_cnt_s cn38xxp2; struct cvmx_spxx_bckprs_cnt_s cn58xx; struct cvmx_spxx_bckprs_cnt_s cn58xxp1; } cvmx_spxx_bckprs_cnt_t; /** * cvmx_spx#_bist_stat * * Notes: * Bist results encoding * - 0: good (or bist in progress/never run) * - 1: bad */ typedef union { uint64_t u64; struct cvmx_spxx_bist_stat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t stat2 : 1; /**< Bist Results/No Repair (Tx calendar table) (spx.stx.cal.calendar) */ uint64_t stat1 : 1; /**< Bist Results/No Repair (Rx calendar table) (spx.srx.spi4.cal.calendar) */ uint64_t stat0 : 1; /**< Bist Results/No Repair (Spi4 receive datapath FIFO) (spx.srx.spi4.dat.dpr) */ #else uint64_t stat0 : 1; uint64_t stat1 : 1; uint64_t stat2 : 1; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_spxx_bist_stat_s cn38xx; struct cvmx_spxx_bist_stat_s cn38xxp2; struct cvmx_spxx_bist_stat_s cn58xx; struct cvmx_spxx_bist_stat_s cn58xxp1; } cvmx_spxx_bist_stat_t; /** * cvmx_spx#_clk_ctl * * Notes: * * SRXDLCK * When asserted, this bit locks the Spi4 receive DLLs. This bit also * acts as the Spi4 receiver reset and must be asserted before the * training sequences are used to initialize the interface. This bit * only applies to the receiver interface. * * * RCVTRN * Once the SRXDLCK bit is asserted and the DLLs have locked and the * system has been programmed, software should assert this bit in order * to start looking for valid training sequence and synchronize the * interface. This bit only applies to the receiver interface. * * * DRPTRN * The Spi4 receiver can either convert training packets into NOPs or * drop them entirely. Dropping ticks allows the interface to deskew * periodically if the dclk and eclk ratios are close. This bit only * applies to the receiver interface. * * * SNDTRN * When software sets this bit, it indicates that the Spi4 transmit * interface has been setup and has seen the calendare status. Once the * transmitter begins sending training data, the receiving device is free * to start traversing the calendar table to synch the link. * * * STATRCV * This bit determines which status clock edge to sample the status * channel in Spi4 mode. Since the status channel is in the opposite * direction to the datapath, the STATRCV actually effects the * transmitter/TX block. * * * STATDRV * This bit determines which status clock edge to drive the status * channel in Spi4 mode. Since the status channel is in the opposite * direction to the datapath, the STATDRV actually effects the * receiver/RX block. * * * RUNBIST * RUNBIST will beginning BIST/BISR in all the SPX compilied memories. * These memories are... * * * spx.srx.spi4.dat.dpr // FIFO Spi4 to IMX * * spx.stx.cal.calendar // Spi4 TX calendar table * * spx.srx.spi4.cal.calendar // Spi4 RX calendar table * * RUNBIST must never be asserted when the interface is enabled. * Furthmore, setting RUNBIST at any other time is destructive and can * cause data and configuration corruption. The entire interface must be * reconfigured when this bit is set. * * * CLKDLY * Static clock positioning mostly intended for use in quarter clocking * schemes. The delay window is not large enough for slow clock freq, * therefore clock and data must be statically positioned with CSRs. By * changing the clock position relative to the data bits, we give the * system a wider window. * * * SEETRN * In systems in which no training data is sent to N2 or N2 cannot * correctly sample the training data, software may pulse this bit by * writing a '1' followed by a '0' in order to correctly set the * receivers state. The receive data bus should be idle at this time * (only NOPs on the bus). If N2 cannot see at least on training * sequence, the data bus will not send any data to the core. The * interface will hang. */ typedef union { uint64_t u64; struct cvmx_spxx_clk_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t seetrn : 1; /**< Force the Spi4 receive into seeing a traing sequence */ uint64_t reserved_12_15 : 4; uint64_t clkdly : 5; /**< Set the spx__clkdly lines to this value to control the delay on the incoming dclk (spx__clkdly) */ uint64_t runbist : 1; /**< Write this bit to begin BIST testing in SPX */ uint64_t statdrv : 1; /**< Spi4 status channel drive mode - 1: Drive STAT on posedge of SCLK - 0: Drive STAT on negedge of SCLK */ uint64_t statrcv : 1; /**< Spi4 status channel sample mode - 1: Sample STAT on posedge of SCLK - 0: Sample STAT on negedge of SCLK */ uint64_t sndtrn : 1; /**< Start sending training patterns on the Spi4 Tx Interface */ uint64_t drptrn : 1; /**< Drop blocks of training packets */ uint64_t rcvtrn : 1; /**< Write this bit once the DLL is locked to sync on the training seqeunce */ uint64_t srxdlck : 1; /**< Write this bit to lock the Spi4 receive DLL */ #else uint64_t srxdlck : 1; uint64_t rcvtrn : 1; uint64_t drptrn : 1; uint64_t sndtrn : 1; uint64_t statrcv : 1; uint64_t statdrv : 1; uint64_t runbist : 1; uint64_t clkdly : 5; uint64_t reserved_12_15 : 4; uint64_t seetrn : 1; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_spxx_clk_ctl_s cn38xx; struct cvmx_spxx_clk_ctl_s cn38xxp2; struct cvmx_spxx_clk_ctl_s cn58xx; struct cvmx_spxx_clk_ctl_s cn58xxp1; } cvmx_spxx_clk_ctl_t; /** * cvmx_spx#_clk_stat */ typedef union { uint64_t u64; struct cvmx_spxx_clk_stat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_11_63 : 53; uint64_t stxcal : 1; /**< The transistion from Sync to Calendar on status channel */ uint64_t reserved_9_9 : 1; uint64_t srxtrn : 1; /**< Saw a good data training sequence */ uint64_t s4clk1 : 1; /**< Saw '1' on Spi4 transmit status forward clk input */ uint64_t s4clk0 : 1; /**< Saw '0' on Spi4 transmit status forward clk input */ uint64_t d4clk1 : 1; /**< Saw '1' on Spi4 receive data forward clk input */ uint64_t d4clk0 : 1; /**< Saw '0' on Spi4 receive data forward clk input */ uint64_t reserved_0_3 : 4; #else uint64_t reserved_0_3 : 4; uint64_t d4clk0 : 1; uint64_t d4clk1 : 1; uint64_t s4clk0 : 1; uint64_t s4clk1 : 1; uint64_t srxtrn : 1; uint64_t reserved_9_9 : 1; uint64_t stxcal : 1; uint64_t reserved_11_63 : 53; #endif } s; struct cvmx_spxx_clk_stat_s cn38xx; struct cvmx_spxx_clk_stat_s cn38xxp2; struct cvmx_spxx_clk_stat_s cn58xx; struct cvmx_spxx_clk_stat_s cn58xxp1; } cvmx_spxx_clk_stat_t; /** * cvmx_spx#_dbg_deskew_ctl * * Notes: * These bits are meant as a backdoor to control Spi4 per-bit deskew. See * that Spec for more details. * * The basic idea is to allow software to disable the auto-deskew widgets * and make any adjustments by hand. These steps should only be taken * once the RCVTRN bit is set and before any real traffic is sent on the * Spi4 bus. Great care should be taken when messing with these bits as * improper programmings can cause catestrophic or intermitent problems. * * The params we have to test are the MUX tap selects and the XCV delay * tap selects. * * For the muxes, we can set each tap to a random value and then read * back the taps. To write... * * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set * SPXX_DBG_DESKEW_CTL[OFFSET] = mux tap value (2-bits) * SPXX_DBG_DESKEW_CTL[MUX] = go bit * * Notice this can all happen with a single CSR write. To read, first * set the bit you to look at with the SPXX_DBG_DESKEW_CTL[BITSEL], then * simply read SPXX_DBG_DESKEW_STATE[MUXSEL]... * * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set * SPXX_DBG_DESKEW_STATE[MUXSEL] = 2-bit value * * For the xcv delay taps, the CSR controls increment and decrement the * 5-bit count value in the XCV. This is a saturating counter, so it * will not wrap when decrementing below zero or incrementing above 31. * * To write... * * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set * SPXX_DBG_DESKEW_CTL[OFFSET] = tap value increment or decrement amount (5-bits) * SPXX_DBG_DESKEW_CTL[INC|DEC] = go bit * * These values are copied in SPX, so that they can be read back by * software by a similar mechanism to the MUX selects... * * SPXX_DBG_DESKEW_CTL[BITSEL] = bit to set * SPXX_DBG_DESKEW_STATE[OFFSET] = 5-bit value * * In addition, there is a reset bit that sets all the state back to the * default/starting value of 0x10. * * SPXX_DBG_DESKEW_CTL[CLRDLY] = 1 * * SINGLE STEP TRAINING MODE (WILMA) * Debug feature that will enable the user to single-step the debug * logic to watch initial movement and trends by putting the training * machine in single step mode. * * * SPX*_DBG_DESKEW_CTL[SSTEP] * This will put the training control logic into single step mode. We * will not deskew in this scenario and will require the TX device to * send continuous training sequences. * * It is required that SRX*_COM_CTL[INF_EN] be clear so that suspect * data does not flow into the chip. * * Deasserting SPX*_DBG_DESKEW_CTL[SSTEP] will attempt to deskew as per * the normal definition. Single step mode is for debug only. Special * care must be given to correctly deskew the interface if normal * operation is desired. * * * SPX*_DBG_DESKEW_CTL[SSTEP_GO] * Each write of '1' to SSTEP_GO will go through a single training * iteration and will perform... * * - DLL update, if SPX*_DBG_DESKEW_CTL[DLLDIS] is clear * - coarse update, if SPX*_TRN4_CTL[MUX_EN] is set * - single fine update, if SPX*_TRN4_CTL[MACRO_EN] is set and an edge * was detected after walked +/- SPX*_TRN4_CTL[MAXDIST] taps. * * Writes to this register have no effect if the interface is not in * SSTEP mode (SPX*_DBG_DESKEW_CTL[SSTEP]). * * The WILMA mode will be cleared at the final state transition, so * that software can set SPX*_DBG_DESKEW_CTL[SSTEP] and * SPX*_DBG_DESKEW_CTL[SSTEP_GO] before setting SPX*_CLK_CTL[RCVTRN] * and the machine will go through the initial iteration and stop - * waiting for another SPX*_DBG_DESKEW_CTL[SSTEP_GO] or an interface * enable. * * * SPX*_DBG_DESKEW_CTL[FALL8] * Determines how many pattern matches are required during training * operations to fallout of training and begin processing the normal data * stream. The default value is 10 pattern matches. The pattern that is * used is dependent on the SPX*_DBG_DESKEW_CTL[FALLNOP] CSR which * determines between non-training packets (the default) and NOPs. * * * SPX*_DBG_DESKEW_CTL[FALLNOP] * Determines the pattern that is required during training operations to * fallout of training and begin processing the normal data stream. The * default value is to match against non-training data. Setting this * bit, changes the behavior to watch for NOPs packet instead. * * This bit should not be changed dynamically while the link is * operational. */ typedef union { uint64_t u64; struct cvmx_spxx_dbg_deskew_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_30_63 : 34; uint64_t fallnop : 1; /**< Training fallout on NOP matches instead of non-training matches. (spx_csr__spi4_fallout_nop) */ uint64_t fall8 : 1; /**< Training fallout at 8 pattern matches instead of 10 (spx_csr__spi4_fallout_8_match) */ uint64_t reserved_26_27 : 2; uint64_t sstep_go : 1; /**< Single Step Training Sequence (spx_csr__spi4_single_step_go) */ uint64_t sstep : 1; /**< Single Step Training Mode (spx_csr__spi4_single_step_mode) */ uint64_t reserved_22_23 : 2; uint64_t clrdly : 1; /**< Resets the offset control in the XCV (spx_csr__spi4_dll_clr_dly) */ uint64_t dec : 1; /**< Decrement the offset by OFFSET for the Spi4 bit selected by BITSEL (spx_csr__spi4_dbg_trn_dec) */ uint64_t inc : 1; /**< Increment the offset by OFFSET for the Spi4 bit selected by BITSEL (spx_csr__spi4_dbg_trn_inc) */ uint64_t mux : 1; /**< Set the mux select tap for the Spi4 bit selected by BITSEL (spx_csr__spi4_dbg_trn_mux) */ uint64_t offset : 5; /**< Adds or subtracts (Based on INC or DEC) the offset to Spi4 bit BITSEL. (spx_csr__spi4_dbg_trn_offset) */ uint64_t bitsel : 5; /**< Select the Spi4 CTL or DAT bit 15-0 : Spi4 DAT[15:0] 16 : Spi4 CTL - 31-17: Invalid (spx_csr__spi4_dbg_trn_bitsel) */ uint64_t offdly : 6; /**< Set the spx__offset lines to this value when not in macro sequence (spx_csr__spi4_mac_offdly) */ uint64_t dllfrc : 1; /**< Force the Spi4 RX DLL to update (spx_csr__spi4_dll_force) */ uint64_t dlldis : 1; /**< Disable sending the update signal to the Spi4 RX DLL when set (spx_csr__spi4_dll_trn_en) */ #else uint64_t dlldis : 1; uint64_t dllfrc : 1; uint64_t offdly : 6; uint64_t bitsel : 5; uint64_t offset : 5; uint64_t mux : 1; uint64_t inc : 1; uint64_t dec : 1; uint64_t clrdly : 1; uint64_t reserved_22_23 : 2; uint64_t sstep : 1; uint64_t sstep_go : 1; uint64_t reserved_26_27 : 2; uint64_t fall8 : 1; uint64_t fallnop : 1; uint64_t reserved_30_63 : 34; #endif } s; struct cvmx_spxx_dbg_deskew_ctl_s cn38xx; struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2; struct cvmx_spxx_dbg_deskew_ctl_s cn58xx; struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1; } cvmx_spxx_dbg_deskew_ctl_t; /** * cvmx_spx#_dbg_deskew_state * * Notes: * These bits are meant as a backdoor to control Spi4 per-bit deskew. See * that Spec for more details. */ typedef union { uint64_t u64; struct cvmx_spxx_dbg_deskew_state_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t testres : 1; /**< Training Test Mode Result (srx_spi4__test_mode_result) */ uint64_t unxterm : 1; /**< Unexpected training terminiation (srx_spi4__top_unxexp_trn_term) */ uint64_t muxsel : 2; /**< The mux select value of the bit selected by SPX_DBG_DESKEW_CTL[BITSEL] (srx_spi4__trn_mux_sel) */ uint64_t offset : 5; /**< The counter value of the bit selected by SPX_DBG_DESKEW_CTL[BITSEL] (srx_spi4__xcv_tap_select) */ #else uint64_t offset : 5; uint64_t muxsel : 2; uint64_t unxterm : 1; uint64_t testres : 1; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_spxx_dbg_deskew_state_s cn38xx; struct cvmx_spxx_dbg_deskew_state_s cn38xxp2; struct cvmx_spxx_dbg_deskew_state_s cn58xx; struct cvmx_spxx_dbg_deskew_state_s cn58xxp1; } cvmx_spxx_dbg_deskew_state_t; /** * cvmx_spx#_drv_ctl * * Notes: * These bits all come from Duke - he will provide documentation and * explanation. I'll just butcher it. */ typedef union { uint64_t u64; struct cvmx_spxx_drv_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_0_63 : 64; #else uint64_t reserved_0_63 : 64; #endif } s; struct cvmx_spxx_drv_ctl_cn38xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t stx4ncmp : 4; /**< Duke (spx__spi4_tx_nctl_comp) */ uint64_t stx4pcmp : 4; /**< Duke (spx__spi4_tx_pctl_comp) */ uint64_t srx4cmp : 8; /**< Duke (spx__spi4_rx_rctl_comp) */ #else uint64_t srx4cmp : 8; uint64_t stx4pcmp : 4; uint64_t stx4ncmp : 4; uint64_t reserved_16_63 : 48; #endif } cn38xx; struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2; struct cvmx_spxx_drv_ctl_cn58xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t stx4ncmp : 4; /**< Duke (spx__spi4_tx_nctl_comp) */ uint64_t stx4pcmp : 4; /**< Duke (spx__spi4_tx_pctl_comp) */ uint64_t reserved_10_15 : 6; uint64_t srx4cmp : 10; /**< Duke (spx__spi4_rx_rctl_comp) */ #else uint64_t srx4cmp : 10; uint64_t reserved_10_15 : 6; uint64_t stx4pcmp : 4; uint64_t stx4ncmp : 4; uint64_t reserved_24_63 : 40; #endif } cn58xx; struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1; } cvmx_spxx_drv_ctl_t; /** * cvmx_spx#_err_ctl * * SPX_ERR_CTL - Spi error control register * * * Notes: * * DIPPAY, DIPCLS, PRTNXA * These bits control whether or not the packet's ERR bit is set when any of * the these error is detected. If the corresponding error's bit is clear, * the packet ERR will be set. If the error bit is set, the SPX will simply * pass through the ERR bit without modifying it in anyway - the error bit * may or may not have been set by the transmitter device. */ typedef union { uint64_t u64; struct cvmx_spxx_err_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t prtnxa : 1; /**< Spi4 - set the ERR bit on packets in which the port is out-of-range */ uint64_t dipcls : 1; /**< Spi4 DIPERR on closing control words cause the ERR bit to be set */ uint64_t dippay : 1; /**< Spi4 DIPERR on payload control words cause the ERR bit to be set */ uint64_t reserved_4_5 : 2; uint64_t errcnt : 4; /**< Number of Dip4 errors before bringing down the interface */ #else uint64_t errcnt : 4; uint64_t reserved_4_5 : 2; uint64_t dippay : 1; uint64_t dipcls : 1; uint64_t prtnxa : 1; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_spxx_err_ctl_s cn38xx; struct cvmx_spxx_err_ctl_s cn38xxp2; struct cvmx_spxx_err_ctl_s cn58xx; struct cvmx_spxx_err_ctl_s cn58xxp1; } cvmx_spxx_err_ctl_t; /** * cvmx_spx#_int_dat * * SPX_INT_DAT - Interrupt Data Register * * * Notes: * Note: The SPX_INT_DAT[MUL] bit is set when multiple errors have been * detected that would set any of the data fields: PRT, RSVOP, and CALBNK. * * The following errors will cause MUL to assert for PRT conflicts. * - ABNORM * - APERR * - DPERR * * The following errors will cause MUL to assert for RSVOP conflicts. * - RSVERR * * The following errors will cause MUL to assert for CALBNK conflicts. * - CALERR * * The following errors will cause MUL to assert if multiple interrupts are * asserted. * - TPAOVR * * The MUL bit will be cleared once all outstanding errors have been * cleared by software (not just MUL errors - all errors). */ typedef union { uint64_t u64; struct cvmx_spxx_int_dat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t mul : 1; /**< Multiple errors have occured */ uint64_t reserved_14_30 : 17; uint64_t calbnk : 2; /**< Spi4 Calendar table parity error bank */ uint64_t rsvop : 4; /**< Spi4 reserved control word */ uint64_t prt : 8; /**< Port associated with error */ #else uint64_t prt : 8; uint64_t rsvop : 4; uint64_t calbnk : 2; uint64_t reserved_14_30 : 17; uint64_t mul : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_spxx_int_dat_s cn38xx; struct cvmx_spxx_int_dat_s cn38xxp2; struct cvmx_spxx_int_dat_s cn58xx; struct cvmx_spxx_int_dat_s cn58xxp1; } cvmx_spxx_int_dat_t; /** * cvmx_spx#_int_msk * * SPX_INT_MSK - Interrupt Mask Register * */ typedef union { uint64_t u64; struct cvmx_spxx_int_msk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t calerr : 1; /**< Spi4 Calendar table parity error */ uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT] */ uint64_t diperr : 1; /**< Spi4 DIP4 error */ uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */ uint64_t rsverr : 1; /**< Spi4 reserved control word detected */ uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */ uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */ uint64_t spiovr : 1; /**< Spi async FIFO overflow (Spi3 or Spi4) */ uint64_t reserved_2_3 : 2; uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */ uint64_t prtnxa : 1; /**< Port out of range */ #else uint64_t prtnxa : 1; uint64_t abnorm : 1; uint64_t reserved_2_3 : 2; uint64_t spiovr : 1; uint64_t clserr : 1; uint64_t drwnng : 1; uint64_t rsverr : 1; uint64_t tpaovr : 1; uint64_t diperr : 1; uint64_t syncerr : 1; uint64_t calerr : 1; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_spxx_int_msk_s cn38xx; struct cvmx_spxx_int_msk_s cn38xxp2; struct cvmx_spxx_int_msk_s cn58xx; struct cvmx_spxx_int_msk_s cn58xxp1; } cvmx_spxx_int_msk_t; /** * cvmx_spx#_int_reg * * SPX_INT_REG - Interrupt Register * * * Notes: * * PRTNXA * This error indicates that the port on the Spi bus was not a valid port * for the system. Spi4 accesses occur on payload control bit-times. The * SRX can be configured with the exact number of ports available (by * SRX_COM_CTL[PRTS] register). Any Spi access to anthing outside the range * of 0 .. (SRX_COM_CTL[PRTS] - 1) is considered an error. The offending * port is logged in SPX_INT_DAT[PRT] if there are no pending interrupts in * SPX_INT_REG that require SPX_INT_DAT[PRT]. * * SRX will not drop the packet with the bogus port address. Instead, the * port will be mapped into the supported port range. The remapped address * in simply... * * Address = [ interfaceId, ADR[3:0] ] * * If the SPX detects that a PRTNXA error has occured, the packet will * have its ERR bit set (or'ed in with the ERR bit from the transmitter) * if the SPX_ERR_CTL[PRTNXA] bit is clear. * * In Spi4 mode, SPX will generate an interrupt for every 8B data burst * associated with the invalid address. The SPX_INT_DAT[MUL] bit will never * be set. * * * ABNORM * This bit simply indicates that a given packet had abnormal terminiation. * In Spi4 mode, this means that packet completed with an EOPS[1:0] code of * 2'b01. This error can also be thought of as the application specific * error (as mentioned in the Spi4 spec). The offending port is logged in * SPX_INT_DAT[PRT] if there are no pending interrupts in SPX_INT_REG that * require SPX_INT_DAT[PRT]. * * The ABNORM error is only raised when the ERR bit that comes from the * Spi interface is set. It will never assert if any internal condition * causes the ERR bit to assert (e.g. PRTNXA or DPERR). * * * SPIOVR * This error indicates that the FIFOs that manage the async crossing from * the Spi clocks to the core clock domains have overflowed. This is a * fatal error and can cause much data/control corruption since ticks will * be dropped and reordered. This is purely a function of clock ratios and * correct system ratios should make this an impossible condition. * * * CLSERR * This is a Spi4 error that indicates that a given data transfer burst * that did not terminate with an EOP, did not end with the 16B alignment * as per the Spi4 spec. The offending port cannot be logged since the * block does not know the streamm terminated until the port switches. * At that time, that packet has already been pushed down the pipe. * * The CLSERR bit does not actually check the Spi4 burst - just how data * is accumulated for the downstream logic. Bursts that are separted by * idles or training will still be merged into accumulated transfers and * will not fire the CLSERR condition. The checker is really checking * non-8B aligned, non-EOP data ticks that are sent downstream. These * ticks are what will really mess up the core. * * This is an expensive fix, so we'll probably let it ride. We never * claim to check Spi4 protocol anyway. * * * DRWNNG * This error indicates that the Spi4 FIFO that services the GMX has * overflowed. Like the SPIOVR error condition, correct system ratios * should make this an impossible condition. * * * RSVERR * This Spi4 error indicates that the Spi4 receiver has seen a reserve * control packet. A reserve control packet is an invalid combiniation * of bits on DAT[15:12]. Basically this is DAT[15] == 1'b0 and DAT[12] * == 1'b1 (an SOP without a payload command). The RSVERR indicates an * error has occured and SPX_INT_DAT[RSVOP] holds the first reserved * opcode and will be set if there are no pending interrupts in * SPX_INT_REG that require SPX_INT_DAT[RSVOP]. * * * TPAOVR * This bit indicates that the TPA Watcher has flagged an event. See the * TPA Watcher for a more detailed discussion. * * * DIPERR * This bit indicates that the Spi4 receiver has encountered a DIP4 * miscompare on the datapath. A DIPERR can occur in an IDLE or a * control word that frames a data burst. If the DIPERR occurs on a * framing word there are three cases. * * 1) DIPERR occurs at the end of a data burst. The previous packet is * marked with the ERR bit to be processed later if * SPX_ERR_CTL[DIPCLS] is clear. * 2) DIPERR occurs on a payload word. The subsequent packet is marked * with the ERR bit to be processed later if SPX_ERR_CTL[DIPPAY] is * clear. * 3) DIPERR occurs on a control word that closes on packet and is a * payload for another packet. In this case, both packets will have * their ERR bit marked depending on the respective values of * SPX_ERR_CTL[DIPCLS] and SPX_ERR_CTL[DIPPAY] as discussed above. * * * SYNCERR * This bit indicates that the Spi4 receiver has encountered * SPX_ERR_CTL[ERRCNT] consecutive Spi4 DIP4 errors and the interface * should be synched. * * * CALERR * This bit indicates that the Spi4 calendar table encountered a parity * error. This error bit is associated with the calendar table on the RX * interface - the interface that receives the Spi databus. Parity errors * can occur during normal operation when the calendar table is constantly * being read for the port information, or during initialization time, when * the user has access. Since the calendar table is split into two banks, * SPX_INT_DAT[CALBNK] indicates which banks have taken a parity error. * CALBNK[1] indicates the error occured in the upper bank, while CALBNK[0] * indicates that the error occured in the lower bank. SPX_INT_DAT[CALBNK] * will be set if there are no pending interrupts in SPX_INT_REG that * require SPX_INT_DAT[CALBNK]. * * * SPF * This bit indicates that a Spi fatal error has occurred. A fatal error * is defined as any error condition for which the corresponding * SPX_INT_SYNC bit is set. Therefore, conservative systems can halt the * interface on any error condition although this is not strictly * necessary. Some error are much more fatal in nature than others. * * PRTNXA, SPIOVR, CLSERR, DRWNNG, DIPERR, CALERR, and SYNCERR are examples * of fatal error for different reasons - usually because multiple port * streams could be effected. ABNORM, RSVERR, and TPAOVR are conditions * that are contained to a single packet which allows the interface to drop * a single packet and remain up and stable. */ typedef union { uint64_t u64; struct cvmx_spxx_int_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t spf : 1; /**< Spi interface down */ uint64_t reserved_12_30 : 19; uint64_t calerr : 1; /**< Spi4 Calendar table parity error */ uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT] */ uint64_t diperr : 1; /**< Spi4 DIP4 error */ uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */ uint64_t rsverr : 1; /**< Spi4 reserved control word detected */ uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */ uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */ uint64_t spiovr : 1; /**< Spi async FIFO overflow */ uint64_t reserved_2_3 : 2; uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */ uint64_t prtnxa : 1; /**< Port out of range */ #else uint64_t prtnxa : 1; uint64_t abnorm : 1; uint64_t reserved_2_3 : 2; uint64_t spiovr : 1; uint64_t clserr : 1; uint64_t drwnng : 1; uint64_t rsverr : 1; uint64_t tpaovr : 1; uint64_t diperr : 1; uint64_t syncerr : 1; uint64_t calerr : 1; uint64_t reserved_12_30 : 19; uint64_t spf : 1; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_spxx_int_reg_s cn38xx; struct cvmx_spxx_int_reg_s cn38xxp2; struct cvmx_spxx_int_reg_s cn58xx; struct cvmx_spxx_int_reg_s cn58xxp1; } cvmx_spxx_int_reg_t; /** * cvmx_spx#_int_sync * * SPX_INT_SYNC - Interrupt Sync Register * * * Notes: * This mask set indicates which exception condition should cause the * SPX_INT_REG[SPF] bit to assert * * It is recommended that software set the PRTNXA, SPIOVR, CLSERR, DRWNNG, * DIPERR, CALERR, and SYNCERR errors as synchronization events. Software is * free to synchronize the bus on other conditions, but this is the minimum * recommended set. */ typedef union { uint64_t u64; struct cvmx_spxx_int_sync_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_12_63 : 52; uint64_t calerr : 1; /**< Spi4 Calendar table parity error */ uint64_t syncerr : 1; /**< Consecutive Spi4 DIP4 errors have exceeded SPX_ERR_CTL[ERRCNT] */ uint64_t diperr : 1; /**< Spi4 DIP4 error */ uint64_t tpaovr : 1; /**< Selected port has hit TPA overflow */ uint64_t rsverr : 1; /**< Spi4 reserved control word detected */ uint64_t drwnng : 1; /**< Spi4 receive FIFO drowning/overflow */ uint64_t clserr : 1; /**< Spi4 packet closed on non-16B alignment without EOP */ uint64_t spiovr : 1; /**< Spi async FIFO overflow (Spi3 or Spi4) */ uint64_t reserved_2_3 : 2; uint64_t abnorm : 1; /**< Abnormal packet termination (ERR bit) */ uint64_t prtnxa : 1; /**< Port out of range */ #else uint64_t prtnxa : 1; uint64_t abnorm : 1; uint64_t reserved_2_3 : 2; uint64_t spiovr : 1; uint64_t clserr : 1; uint64_t drwnng : 1; uint64_t rsverr : 1; uint64_t tpaovr : 1; uint64_t diperr : 1; uint64_t syncerr : 1; uint64_t calerr : 1; uint64_t reserved_12_63 : 52; #endif } s; struct cvmx_spxx_int_sync_s cn38xx; struct cvmx_spxx_int_sync_s cn38xxp2; struct cvmx_spxx_int_sync_s cn58xx; struct cvmx_spxx_int_sync_s cn58xxp1; } cvmx_spxx_int_sync_t; /** * cvmx_spx#_tpa_acc * * SPX_TPA_ACC - TPA watcher byte accumulator * * * Notes: * This field allows the user to access the TPA watcher accumulator counter. * This register reflects the number of bytes sent to IMX once the port * specified by SPX_TPA_SEL[PRTSEL] has lost its TPA. The SPX_INT_REG[TPAOVR] * bit is asserted when CNT >= SPX_TPA_MAX[MAX]. The CNT will continue to * increment until the TPA for the port is asserted. At that point the CNT * value is frozen until software clears the interrupt bit. */ typedef union { uint64_t u64; struct cvmx_spxx_tpa_acc_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< TPA watcher accumulate count */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_spxx_tpa_acc_s cn38xx; struct cvmx_spxx_tpa_acc_s cn38xxp2; struct cvmx_spxx_tpa_acc_s cn58xx; struct cvmx_spxx_tpa_acc_s cn58xxp1; } cvmx_spxx_tpa_acc_t; /** * cvmx_spx#_tpa_max * * SPX_TPA_MAX - TPA watcher assertion threshold * * * Notes: * The TPA watcher has the ability to notify the system with an interrupt when * too much data has been received on loss of TPA. The user sets the * SPX_TPA_MAX[MAX] register and when the watcher has accumulated that many * ticks, then the interrupt is conditionally raised (based on interrupt mask * bits). This feature will be disabled if the programmed count is zero. */ typedef union { uint64_t u64; struct cvmx_spxx_tpa_max_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t max : 32; /**< TPA watcher TPA threshold */ #else uint64_t max : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_spxx_tpa_max_s cn38xx; struct cvmx_spxx_tpa_max_s cn38xxp2; struct cvmx_spxx_tpa_max_s cn58xx; struct cvmx_spxx_tpa_max_s cn58xxp1; } cvmx_spxx_tpa_max_t; /** * cvmx_spx#_tpa_sel * * SPX_TPA_SEL - TPA watcher port selector * * * Notes: * The TPA Watcher is primarily a debug vehicle used to help initial bringup * of a system. The TPA watcher counts bytes that roll in from the Spi * interface. The user programs the Spi port to watch using * SPX_TPA_SEL[PRTSEL]. Once the TPA is deasserted for that port, the watcher * begins to count the data ticks that have been delivered to the inbound * datapath (and eventually to the IOB). The result is that we can derive * turn-around times of the other device by watching how much data was sent * after a loss of TPA through the SPX_TPA_ACC[CNT] register. An optional * interrupt may be raised as well. See SPX_TPA_MAX for further information. * * TPA's can be deasserted for a number of reasons... * * 1) IPD indicates backpressure * 2) The GMX inbound FIFO is filling up and should BP * 3) User has out an override on the TPA wires */ typedef union { uint64_t u64; struct cvmx_spxx_tpa_sel_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t prtsel : 4; /**< TPA watcher port select */ #else uint64_t prtsel : 4; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_spxx_tpa_sel_s cn38xx; struct cvmx_spxx_tpa_sel_s cn38xxp2; struct cvmx_spxx_tpa_sel_s cn58xx; struct cvmx_spxx_tpa_sel_s cn58xxp1; } cvmx_spxx_tpa_sel_t; /** * cvmx_spx#_trn4_ctl * * Notes: * These bits are controls for the Spi4 RX bit deskew logic. See that Spec * for further details. * * * BOOT_BIT * On the initial training synchronization sequence, the hardware has the * BOOT_BIT set which means that it will continueously perform macro * operations. Once the BOOT_BIT is cleared, the macro machine will finish * the macro operation is working on and then return to the idle state. * Subsequent training sequences will only go through a single macro * operation in order to do slight deskews. * * * JITTER * Minimum value is 1. This parameter must be set for Spi4 mode using * auto-bit deskew. Regardless of the original intent, this field must be * set non-zero for deskew to function correctly. * * The thought is the JITTER range is no longer required since the macro * machine was enhanced to understand about edge direction. Originally * these bits were intended to compensate for clock jitter. * * dly: this is the intrinsic delay of each delay element * tap currently, it is 70ps-110ps. * jitter: amount of jitter we expect in the system (~200ps) * j: number of taps to account for jitter * * j = ((jitter / dly) + 1) * * * TRNTEST * This mode is used to test systems to make sure that the bit deskew * parameters have been correctly setup. After configuration, software can * set the TRNTEST mode bit. This should be done before SRX_COM_CTL[ST_EN] * is set such that we can be sure that the TX device is simply sending * continuous training patterns. * * The test mode samples every incoming bit-time and makes sure that it is * either a training control or a training data packet. If any other data * is observed, then SPX_DBG_DESKEW_STATE[TESTRES] will assert signaling a * test failure. * * Software must clear TRNTEST before training is terminated. * * * Example Spi4 RX init flow... * * 1) set the CLKDLY lines (SPXX_CLK_CTL[CLKDLY]) * - these bits must be set before the DLL can successfully lock * * 2) set the SRXDLCK (SPXX_CLK_CTL[SRXDLCK]) * - this is the DLL lock bit which also acts as a block reset * * 3) wait for the DLLs lock * * 4) set any desired fields in SPXX_DBG_DESKEW_CTL * - This register has only one field that most users will care about. * When set, DLLDIS will disable sending update pulses to the Spi4 RX * DLLs. This pulse allows the DLL to adjust to clock variations over * time. In general, it is desired behavior. * * 5) set fields in SPXX_TRN4_CTL * - These fields deal with the MUX training sequence * * MUX_EN * This is the enable bit for the mux select. The MUX select will * run in the training sequence between the DLL and the Macro * sequence when enabled. Once the MUX selects are selected, the * entire macro sequence must be rerun. The expectation is that * this is only run at boot time and this is bit cleared at/around * step \#8. * - These fields deal with the Macro training sequence * * MACRO_EN * This is the enable bit for the macro sequence. Macro sequences * will run after the DLL and MUX training sequences. Each macro * sequence can move the offset by one value. * * MAXDIST * This is how far we will search for an edge. Example... * * dly: this is the intrinsic delay of each delay element * tap currently, it is 70ps-110ps. * U: bit time period in time units. * * MAXDIST = MIN(16, ((bit_time / 2) / dly) * * Each MAXDIST iteration consists of an edge detect in the early * and late (+/-) directions in an attempt to center the data. This * requires two training transistions, the control/data and * data/control transistions which comprise a training sequence. * Therefore, the number of training sequences required for a single * macro operation is simply MAXDIST. * * 6) set the RCVTRN go bit (SPXX_CLK_CTL[RCVTRN]) * - this bit synchs on the first valid complete training cycle and * starts to process the training packets * * 6b) This is where software could manually set the controls as opposed to * letting the hardware do it. See the SPXX_DBG_DESKEW_CTL register * description for more detail. * * 7) the TX device must continue to send training packets for the initial * time period. * - this can be determined by... * * DLL: one training sequence for the DLL adjustment (regardless of enable/disable) * MUX: one training sequence for the Flop MUX taps (regardless of enable/disable) * INIT_SEQUENCES: max number of taps that we must move * * INIT_SEQUENCES = MIN(16, ((bit_time / 2) / dly)) * * INIT_TRN = DLL + MUX + ROUNDUP((INIT_SEQUENCES * (MAXDIST + 2))) * * * - software can either wait a fixed amount of time based on the clock * frequencies or poll the SPXX_CLK_STAT[SRXTRN] register. Each * assertion of SRXTRN means that at least one training sequence has * been received. Software can poll, clear, and repeat on this bit to * eventually count all required transistions. * * int cnt = 0; * while (cnt < INIT_TRN) [ * if (SPXX_CLK_STAT[SRXTRN]) [ * cnt++; * SPXX_CLK_STAT[SRXTRN] = 0; * ] * ] * * - subsequent training sequences will normally move the taps only * one position, so the ALPHA equation becomes... * * MAC = (MAXDIST == 0) ? 1 : ROUNDUP((1 * (MAXDIST + 2))) + 1 * * ALPHA = DLL + MUX + MAC * * ergo, MAXDIST simplifies to... * * ALPHA = (MAXDIST == 0) ? 3 : MAXDIST + 5 * * DLL and MUX and MAC will always require at least a training sequence * each - even if disabled. If the macro sequence is enabled, an * additional training sequenece at the end is necessary. The extra * sequence allows for all training state to be cleared before resuming * normal operation. * * 8) after the recevier gets enough training sequences in order to achieve * deskew lock, set SPXX_TRN4_CTL[CLR_BOOT] * - this disables the continuous macro sequences and puts into into one * macro sequnence per training operation * - optionally, the machine can choose to fall out of training if * enough NOPs follow the training operation (require at least 32 NOPs * to follow the training sequence). * * There must be at least MAXDIST + 3 training sequences after the * SPXX_TRN4_CTL[CLR_BOOT] is set or sufficient NOPs from the TX device. * * 9) the TX device continues to send training sequences until the RX * device sends a calendar transistion. This is controlled by * SRXX_COM_CTL[ST_EN]. Other restrictions require other Spi parameters * (e.g. the calendar table) to be setup before this bit can be enabled. * Once the entire interface is properly programmed, software writes * SRXX_COM_CTL[INF_EN]. At this point, the Spi4 packets will begin to * be sent into the N2K core and processed by the chip. */ typedef union { uint64_t u64; struct cvmx_spxx_trn4_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_13_63 : 51; uint64_t trntest : 1; /**< Training Test Mode This bit is only for initial bringup (spx_csr__spi4_trn_test_mode) */ uint64_t jitter : 3; /**< Accounts for jitter when the macro sequence is locking. The value is how many consecutive transititions before declaring en edge. Minimum value is 1. This parameter must be set for Spi4 mode using auto-bit deskew. (spx_csr__spi4_mac_jitter) */ uint64_t clr_boot : 1; /**< Clear the macro boot sequence mode bit (spx_csr__spi4_mac_clr_boot) */ uint64_t set_boot : 1; /**< Enable the macro boot sequence mode bit (spx_csr__spi4_mac_set_boot) */ uint64_t maxdist : 5; /**< This field defines how far from center the deskew logic will search in a single macro sequence (spx_csr__spi4_mac_iters) */ uint64_t macro_en : 1; /**< Allow the macro sequence to center the sample point in the data window through hardware (spx_csr__spi4_mac_trn_en) */ uint64_t mux_en : 1; /**< Enable the hardware machine that selects the proper coarse FLOP selects (spx_csr__spi4_mux_trn_en) */ #else uint64_t mux_en : 1; uint64_t macro_en : 1; uint64_t maxdist : 5; uint64_t set_boot : 1; uint64_t clr_boot : 1; uint64_t jitter : 3; uint64_t trntest : 1; uint64_t reserved_13_63 : 51; #endif } s; struct cvmx_spxx_trn4_ctl_s cn38xx; struct cvmx_spxx_trn4_ctl_s cn38xxp2; struct cvmx_spxx_trn4_ctl_s cn58xx; struct cvmx_spxx_trn4_ctl_s cn58xxp1; } cvmx_spxx_trn4_ctl_t; /** * cvmx_spx0_pll_bw_ctl */ typedef union { uint64_t u64; struct cvmx_spx0_pll_bw_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t bw_ctl : 5; /**< Core PLL bandwidth control */ #else uint64_t bw_ctl : 5; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_spx0_pll_bw_ctl_s cn38xx; struct cvmx_spx0_pll_bw_ctl_s cn38xxp2; } cvmx_spx0_pll_bw_ctl_t; /** * cvmx_spx0_pll_setting */ typedef union { uint64_t u64; struct cvmx_spx0_pll_setting_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t setting : 17; /**< Core PLL setting */ #else uint64_t setting : 17; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_spx0_pll_setting_s cn38xx; struct cvmx_spx0_pll_setting_s cn38xxp2; } cvmx_spx0_pll_setting_t; /** * cvmx_srx#_com_ctl * * SRX_COM_CTL - Spi receive common control * * * Notes: * Restrictions: * Both the calendar table and the LEN and M parameters must be completely * setup before writing the Interface enable (INF_EN) and Status channel * enabled (ST_EN) asserted. */ typedef union { uint64_t u64; struct cvmx_srxx_com_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t prts : 4; /**< Number of ports in the receiver (write: ports - 1) - 0: 1 port - 1: 2 ports - 2: 3 ports - ... - 15: 16 ports */ uint64_t st_en : 1; /**< Status channel enabled This is to allow configs without a status channel. This bit should not be modified once the interface is enabled. */ uint64_t reserved_1_2 : 2; uint64_t inf_en : 1; /**< Interface enable The master switch that enables the entire interface. SRX will not validiate any data until this bit is set. This bit should not be modified once the interface is enabled. */ #else uint64_t inf_en : 1; uint64_t reserved_1_2 : 2; uint64_t st_en : 1; uint64_t prts : 4; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_srxx_com_ctl_s cn38xx; struct cvmx_srxx_com_ctl_s cn38xxp2; struct cvmx_srxx_com_ctl_s cn58xx; struct cvmx_srxx_com_ctl_s cn58xxp1; } cvmx_srxx_com_ctl_t; /** * cvmx_srx#_ign_rx_full * * SRX_IGN_RX_FULL - Ignore RX FIFO backpressure * * * Notes: * * IGNORE * If a device can not or should not assert backpressure, then setting DROP * will force STARVING status on the status channel for all ports. This * eliminates any back pressure from N2. * * This implies that it's ok drop packets when the FIFOS fill up. * * A side effect of this mode is that the TPA Watcher will effectively be * disabled. Since the DROP mode forces all TPA lines asserted, the TPA * Watcher will never find a cycle where the TPA for the selected port is * deasserted in order to increment its count. */ typedef union { uint64_t u64; struct cvmx_srxx_ign_rx_full_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t ignore : 16; /**< This port should ignore backpressure hints from GMX when the RX FIFO fills up - 0: Use GMX backpressure - 1: Ignore GMX backpressure */ #else uint64_t ignore : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_srxx_ign_rx_full_s cn38xx; struct cvmx_srxx_ign_rx_full_s cn38xxp2; struct cvmx_srxx_ign_rx_full_s cn58xx; struct cvmx_srxx_ign_rx_full_s cn58xxp1; } cvmx_srxx_ign_rx_full_t; /** * cvmx_srx#_spi4_cal# * * specify the RSL base addresses for the block * SRX_SPI4_CAL - Spi4 Calender table * direct_calendar_write / direct_calendar_read * * Notes: * There are 32 calendar table CSR's, each containing 4 entries for a * total of 128 entries. In the above definition... * * n = calendar table offset * 4 * * Example, offset 0x00 contains the calendar table entries 0, 1, 2, 3 * (with n == 0). Offset 0x10 is the 16th entry in the calendar table * and would contain entries (16*4) = 64, 65, 66, and 67. * * Restrictions: * Calendar table entry accesses (read or write) can only occur * if the interface is disabled. All other accesses will be * unpredictable. * * Both the calendar table and the LEN and M parameters must be * completely setup before writing the Interface enable (INF_EN) and * Status channel enabled (ST_EN) asserted. */ typedef union { uint64_t u64; struct cvmx_srxx_spi4_calx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t oddpar : 1; /**< Odd parity over SRX_SPI4_CAL[15:0] (^SRX_SPI4_CAL[16:0] === 1'b1) | $NS NS */ uint64_t prt3 : 4; /**< Status for port n+3 */ uint64_t prt2 : 4; /**< Status for port n+2 */ uint64_t prt1 : 4; /**< Status for port n+1 */ uint64_t prt0 : 4; /**< Status for port n+0 */ #else uint64_t prt0 : 4; uint64_t prt1 : 4; uint64_t prt2 : 4; uint64_t prt3 : 4; uint64_t oddpar : 1; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_srxx_spi4_calx_s cn38xx; struct cvmx_srxx_spi4_calx_s cn38xxp2; struct cvmx_srxx_spi4_calx_s cn58xx; struct cvmx_srxx_spi4_calx_s cn58xxp1; } cvmx_srxx_spi4_calx_t; /** * cvmx_srx#_spi4_stat * * SRX_SPI4_STAT - Spi4 status channel control * * * Notes: * Restrictions: * Both the calendar table and the LEN and M parameters must be * completely setup before writing the Interface enable (INF_EN) and * Status channel enabled (ST_EN) asserted. * * Current rev only supports LVTTL status IO */ typedef union { uint64_t u64; struct cvmx_srxx_spi4_stat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t m : 8; /**< CALENDAR_M (from spi4.2 spec) */ uint64_t reserved_7_7 : 1; uint64_t len : 7; /**< CALENDAR_LEN (from spi4.2 spec) */ #else uint64_t len : 7; uint64_t reserved_7_7 : 1; uint64_t m : 8; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_srxx_spi4_stat_s cn38xx; struct cvmx_srxx_spi4_stat_s cn38xxp2; struct cvmx_srxx_spi4_stat_s cn58xx; struct cvmx_srxx_spi4_stat_s cn58xxp1; } cvmx_srxx_spi4_stat_t; /** * cvmx_srx#_sw_tick_ctl * * SRX_SW_TICK_CTL - Create a software tick of Spi4 data. A write to this register will create a data tick. * */ typedef union { uint64_t u64; struct cvmx_srxx_sw_tick_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t eop : 1; /**< SW Tick EOP (PASS3 only) */ uint64_t sop : 1; /**< SW Tick SOP (PASS3 only) */ uint64_t mod : 4; /**< SW Tick MOD - valid byte count (PASS3 only) */ uint64_t opc : 4; /**< SW Tick ERR - packet had an error (PASS3 only) */ uint64_t adr : 4; /**< SW Tick port address (PASS3 only) */ #else uint64_t adr : 4; uint64_t opc : 4; uint64_t mod : 4; uint64_t sop : 1; uint64_t eop : 1; uint64_t reserved_14_63 : 50; #endif } s; struct cvmx_srxx_sw_tick_ctl_s cn38xx; struct cvmx_srxx_sw_tick_ctl_s cn58xx; struct cvmx_srxx_sw_tick_ctl_s cn58xxp1; } cvmx_srxx_sw_tick_ctl_t; /** * cvmx_srx#_sw_tick_dat * * SRX_SW_TICK_DAT - Create a software tick of Spi4 data * */ typedef union { uint64_t u64; struct cvmx_srxx_sw_tick_dat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t dat : 64; /**< Data tick when SRX_SW_TICK_CTL is written (PASS3 only) */ #else uint64_t dat : 64; #endif } s; struct cvmx_srxx_sw_tick_dat_s cn38xx; struct cvmx_srxx_sw_tick_dat_s cn58xx; struct cvmx_srxx_sw_tick_dat_s cn58xxp1; } cvmx_srxx_sw_tick_dat_t; /** * cvmx_stx#_arb_ctl * * STX_ARB_CTL - Spi transmit arbitration control * * * Notes: * If STX_ARB_CTL[MINTRN] is set in Spi4 mode, then the data_max_t * parameter will have to be adjusted. Please see the * STX_SPI4_DAT[MAX_T] section for additional information. In * addition, the min_burst can only be guaranteed on the initial data * burst of a given packet (i.e. the first data burst which contains * the SOP tick). All subsequent bursts could be truncated by training * sequences at any point during transmission and could be arbitrarily * small. This mode is only for use in Spi4 mode. */ typedef union { uint64_t u64; struct cvmx_stxx_arb_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t mintrn : 1; /**< Hold off training cycles until STX_MIN_BST[MINB] is satisfied */ uint64_t reserved_4_4 : 1; uint64_t igntpa : 1; /**< User switch to ignore any TPA information from the Spi interface. This CSR forces all TPA terms to be masked out. It is only intended as backdoor or debug feature. */ uint64_t reserved_0_2 : 3; #else uint64_t reserved_0_2 : 3; uint64_t igntpa : 1; uint64_t reserved_4_4 : 1; uint64_t mintrn : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_stxx_arb_ctl_s cn38xx; struct cvmx_stxx_arb_ctl_s cn38xxp2; struct cvmx_stxx_arb_ctl_s cn58xx; struct cvmx_stxx_arb_ctl_s cn58xxp1; } cvmx_stxx_arb_ctl_t; /** * cvmx_stx#_bckprs_cnt * * Notes: * This register reports the total number of cycles (STX data clks - * stx_clk) in which the port defined in STX_STAT_CTL[BCKPRS] has lost TPA * or is otherwise receiving backpressure. * * In Spi4 mode, this is defined as a loss of TPA which is indicated when * the receiving device reports SATISFIED for the given port. The calendar * status is brought into N2 on the spi4_tx*_sclk and synchronized into the * N2 Spi TX clock domain which is 1/2 the frequency of the spi4_tx*_dclk * clock (internally, this the stx_clk). The counter will update on the * rising edge in which backpressure is reported. * * This register will be cleared when software writes all '1's to * the STX_BCKPRS_CNT. */ typedef union { uint64_t u64; struct cvmx_stxx_bckprs_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Number of cycles when back-pressure is received for port defined in STX_STAT_CTL[BCKPRS] */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_stxx_bckprs_cnt_s cn38xx; struct cvmx_stxx_bckprs_cnt_s cn38xxp2; struct cvmx_stxx_bckprs_cnt_s cn58xx; struct cvmx_stxx_bckprs_cnt_s cn58xxp1; } cvmx_stxx_bckprs_cnt_t; /** * cvmx_stx#_com_ctl * * STX_COM_CTL - TX Common Control Register * * * Notes: * Restrictions: * Both the calendar table and the LEN and M parameters must be * completely setup before writing the Interface enable (INF_EN) and * Status channel enabled (ST_EN) asserted. */ typedef union { uint64_t u64; struct cvmx_stxx_com_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t st_en : 1; /**< Status channel enabled */ uint64_t reserved_1_2 : 2; uint64_t inf_en : 1; /**< Interface enable */ #else uint64_t inf_en : 1; uint64_t reserved_1_2 : 2; uint64_t st_en : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_stxx_com_ctl_s cn38xx; struct cvmx_stxx_com_ctl_s cn38xxp2; struct cvmx_stxx_com_ctl_s cn58xx; struct cvmx_stxx_com_ctl_s cn58xxp1; } cvmx_stxx_com_ctl_t; /** * cvmx_stx#_dip_cnt * * Notes: * * DIPMAX * This counts the number of consecutive DIP2 states in which the the * received DIP2 is bad. The expected range is 1-15 cycles with the * value of 0 meaning disabled. * * * FRMMAX * This counts the number of consecutive unexpected framing patterns (11) * states. The expected range is 1-15 cycles with the value of 0 meaning * disabled. */ typedef union { uint64_t u64; struct cvmx_stxx_dip_cnt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t frmmax : 4; /**< Number of consecutive unexpected framing patterns before loss of sync */ uint64_t dipmax : 4; /**< Number of consecutive DIP2 error before loss of sync */ #else uint64_t dipmax : 4; uint64_t frmmax : 4; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_stxx_dip_cnt_s cn38xx; struct cvmx_stxx_dip_cnt_s cn38xxp2; struct cvmx_stxx_dip_cnt_s cn58xx; struct cvmx_stxx_dip_cnt_s cn58xxp1; } cvmx_stxx_dip_cnt_t; /** * cvmx_stx#_ign_cal * * STX_IGN_CAL - Ignore Calendar Status from Spi4 Status Channel * */ typedef union { uint64_t u64; struct cvmx_stxx_ign_cal_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t igntpa : 16; /**< Ignore Calendar Status from Spi4 Status Channel per Spi4 port - 0: Use the status channel info - 1: Grant the given port MAX_BURST1 credits */ #else uint64_t igntpa : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_stxx_ign_cal_s cn38xx; struct cvmx_stxx_ign_cal_s cn38xxp2; struct cvmx_stxx_ign_cal_s cn58xx; struct cvmx_stxx_ign_cal_s cn58xxp1; } cvmx_stxx_ign_cal_t; /** * cvmx_stx#_int_msk * * Notes: * If the bit is enabled, then the coresponding exception condition will * result in an interrupt to the system. */ typedef union { uint64_t u64; struct cvmx_stxx_int_msk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t frmerr : 1; /**< FRMCNT has exceeded STX_DIP_CNT[MAXFRM] */ uint64_t unxfrm : 1; /**< Unexpected framing sequence */ uint64_t nosync : 1; /**< ERRCNT has exceeded STX_DIP_CNT[MAXDIP] */ uint64_t diperr : 1; /**< DIP2 error on the Spi4 Status channel */ uint64_t datovr : 1; /**< Spi4 FIFO overflow error */ uint64_t ovrbst : 1; /**< Transmit packet burst too big */ uint64_t calpar1 : 1; /**< STX Calendar Table Parity Error Bank1 */ uint64_t calpar0 : 1; /**< STX Calendar Table Parity Error Bank0 */ #else uint64_t calpar0 : 1; uint64_t calpar1 : 1; uint64_t ovrbst : 1; uint64_t datovr : 1; uint64_t diperr : 1; uint64_t nosync : 1; uint64_t unxfrm : 1; uint64_t frmerr : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_stxx_int_msk_s cn38xx; struct cvmx_stxx_int_msk_s cn38xxp2; struct cvmx_stxx_int_msk_s cn58xx; struct cvmx_stxx_int_msk_s cn58xxp1; } cvmx_stxx_int_msk_t; /** * cvmx_stx#_int_reg * * Notes: * * CALPAR0 * This bit indicates that the Spi4 calendar table encountered a parity * error on bank0 of the calendar table memory. This error bit is * associated with the calendar table on the TX interface - the interface * that drives the Spi databus. The calendar table is used in Spi4 mode * when using the status channel. Parity errors can occur during normal * operation when the calendar table is constantly being read for the port * information, or during initialization time, when the user has access. * This errors will force the the status channel to the reset state and * begin driving training sequences. The status channel will also reset. * Software must follow the init sequence to resynch the interface. This * includes toggling INF_EN which will cancel all outstanding accumulated * credits. * * * CALPAR1 * Identical to CALPAR0 except that it indicates that the error occured * on bank1 (instead of bank0). * * * OVRBST * STX can track upto a 512KB data burst. Any packet larger than that is * illegal and will cause confusion in the STX state machine. BMI is * responsible for throwing away these out of control packets from the * input and the Execs should never generate them on the output. This is * a fatal error and should have STX_INT_SYNC[OVRBST] set. * * * DATOVR * FIFO where the Spi4 data ramps upto its transmit frequency has * overflowed. This is a fatal error and should have * STX_INT_SYNC[DATOVR] set. * * * DIPERR * This bit will fire if any DIP2 error is caught by the Spi4 status * channel. * * * NOSYNC * This bit indicates that the number of consecutive DIP2 errors exceeds * STX_DIP_CNT[MAXDIP] and that the interface should be taken down. The * datapath will be notified and send continuous training sequences until * software resynchronizes the interface. This error condition should * have STX_INT_SYNC[NOSYNC] set. * * * UNXFRM * Unexpected framing data was seen on the status channel. * * * FRMERR * This bit indicates that the number of consecutive unexpected framing * sequences STX_DIP_CNT[MAXFRM] and that the interface should be taken * down. The datapath will be notified and send continuous training * sequences until software resynchronizes the interface. This error * condition should have STX_INT_SYNC[FRMERR] set. * * * SYNCERR * Indicates that an exception marked in STX_INT_SYNC has occured and the * TX datapath is disabled. It is recommended that the OVRBST, DATOVR, * NOSYNC, and FRMERR error conditions all have their bits set in the * STX_INT_SYNC register. */ typedef union { uint64_t u64; struct cvmx_stxx_int_reg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t syncerr : 1; /**< Interface encountered a fatal error */ uint64_t frmerr : 1; /**< FRMCNT has exceeded STX_DIP_CNT[MAXFRM] */ uint64_t unxfrm : 1; /**< Unexpected framing sequence */ uint64_t nosync : 1; /**< ERRCNT has exceeded STX_DIP_CNT[MAXDIP] */ uint64_t diperr : 1; /**< DIP2 error on the Spi4 Status channel */ uint64_t datovr : 1; /**< Spi4 FIFO overflow error */ uint64_t ovrbst : 1; /**< Transmit packet burst too big */ uint64_t calpar1 : 1; /**< STX Calendar Table Parity Error Bank1 */ uint64_t calpar0 : 1; /**< STX Calendar Table Parity Error Bank0 */ #else uint64_t calpar0 : 1; uint64_t calpar1 : 1; uint64_t ovrbst : 1; uint64_t datovr : 1; uint64_t diperr : 1; uint64_t nosync : 1; uint64_t unxfrm : 1; uint64_t frmerr : 1; uint64_t syncerr : 1; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_stxx_int_reg_s cn38xx; struct cvmx_stxx_int_reg_s cn38xxp2; struct cvmx_stxx_int_reg_s cn58xx; struct cvmx_stxx_int_reg_s cn58xxp1; } cvmx_stxx_int_reg_t; /** * cvmx_stx#_int_sync * * Notes: * If the bit is enabled, then the coresponding exception condition is flagged * to be fatal. In Spi4 mode, the exception condition will result in a loss * of sync condition on the Spi4 interface and the datapath will send * continuous traing sequences. * * It is recommended that software set the OVRBST, DATOVR, NOSYNC, and * FRMERR errors as synchronization events. Software is free to * synchronize the bus on other conditions, but this is the minimum * recommended set. */ typedef union { uint64_t u64; struct cvmx_stxx_int_sync_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_8_63 : 56; uint64_t frmerr : 1; /**< FRMCNT has exceeded STX_DIP_CNT[MAXFRM] */ uint64_t unxfrm : 1; /**< Unexpected framing sequence */ uint64_t nosync : 1; /**< ERRCNT has exceeded STX_DIP_CNT[MAXDIP] */ uint64_t diperr : 1; /**< DIP2 error on the Spi4 Status channel */ uint64_t datovr : 1; /**< Spi4 FIFO overflow error */ uint64_t ovrbst : 1; /**< Transmit packet burst too big */ uint64_t calpar1 : 1; /**< STX Calendar Table Parity Error Bank1 */ uint64_t calpar0 : 1; /**< STX Calendar Table Parity Error Bank0 */ #else uint64_t calpar0 : 1; uint64_t calpar1 : 1; uint64_t ovrbst : 1; uint64_t datovr : 1; uint64_t diperr : 1; uint64_t nosync : 1; uint64_t unxfrm : 1; uint64_t frmerr : 1; uint64_t reserved_8_63 : 56; #endif } s; struct cvmx_stxx_int_sync_s cn38xx; struct cvmx_stxx_int_sync_s cn38xxp2; struct cvmx_stxx_int_sync_s cn58xx; struct cvmx_stxx_int_sync_s cn58xxp1; } cvmx_stxx_int_sync_t; /** * cvmx_stx#_min_bst * * STX_MIN_BST - Min Burst to enforce when inserting training sequence * */ typedef union { uint64_t u64; struct cvmx_stxx_min_bst_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_9_63 : 55; uint64_t minb : 9; /**< When STX_ARB_CTL[MINTRN] is set, MINB indicates the number of 8B blocks to send before inserting a training sequence. Normally MINB will be set to GMX_TX_SPI_THRESH[THRESH]. MINB should always be set to an even number (ie. multiple of 16B) */ #else uint64_t minb : 9; uint64_t reserved_9_63 : 55; #endif } s; struct cvmx_stxx_min_bst_s cn38xx; struct cvmx_stxx_min_bst_s cn38xxp2; struct cvmx_stxx_min_bst_s cn58xx; struct cvmx_stxx_min_bst_s cn58xxp1; } cvmx_stxx_min_bst_t; /** * cvmx_stx#_spi4_cal# * * specify the RSL base addresses for the block * STX_SPI4_CAL - Spi4 Calender table * direct_calendar_write / direct_calendar_read * * Notes: * There are 32 calendar table CSR's, each containing 4 entries for a * total of 128 entries. In the above definition... * * n = calendar table offset * 4 * * Example, offset 0x00 contains the calendar table entries 0, 1, 2, 3 * (with n == 0). Offset 0x10 is the 16th entry in the calendar table * and would contain entries (16*4) = 64, 65, 66, and 67. * * Restrictions: * Calendar table entry accesses (read or write) can only occur * if the interface is disabled. All other accesses will be * unpredictable. * * Both the calendar table and the LEN and M parameters must be * completely setup before writing the Interface enable (INF_EN) and * Status channel enabled (ST_EN) asserted. */ typedef union { uint64_t u64; struct cvmx_stxx_spi4_calx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t oddpar : 1; /**< Odd parity over STX_SPI4_CAL[15:0] (^STX_SPI4_CAL[16:0] === 1'b1) | $NS NS */ uint64_t prt3 : 4; /**< Status for port n+3 */ uint64_t prt2 : 4; /**< Status for port n+2 */ uint64_t prt1 : 4; /**< Status for port n+1 */ uint64_t prt0 : 4; /**< Status for port n+0 */ #else uint64_t prt0 : 4; uint64_t prt1 : 4; uint64_t prt2 : 4; uint64_t prt3 : 4; uint64_t oddpar : 1; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_stxx_spi4_calx_s cn38xx; struct cvmx_stxx_spi4_calx_s cn38xxp2; struct cvmx_stxx_spi4_calx_s cn58xx; struct cvmx_stxx_spi4_calx_s cn58xxp1; } cvmx_stxx_spi4_calx_t; /** * cvmx_stx#_spi4_dat * * STX_SPI4_DAT - Spi4 datapath channel control register * * * Notes: * Restrictions: * * DATA_MAX_T must be in MOD 4 cycles * * * DATA_MAX_T must at least 0x20 * * * DATA_MAX_T == 0 or ALPHA == 0 will disable the training sequnce * * * If STX_ARB_CTL[MINTRN] is set, then training cycles will stall * waiting for min bursts to complete. In the worst case, this will * add the entire min burst transmission time to the interval between * trainging sequence. The observed MAX_T on the Spi4 bus will be... * * STX_SPI4_DAT[MAX_T] + (STX_MIN_BST[MINB] * 4) * * If STX_ARB_CTL[MINTRN] is set in Spi4 mode, then the data_max_t * parameter will have to be adjusted. Please see the * STX_SPI4_DAT[MAX_T] section for additional information. In * addition, the min_burst can only be guaranteed on the initial data * burst of a given packet (i.e. the first data burst which contains * the SOP tick). All subsequent bursts could be truncated by training * sequences at any point during transmission and could be arbitrarily * small. This mode is only for use in Spi4 mode. */ typedef union { uint64_t u64; struct cvmx_stxx_spi4_dat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t alpha : 16; /**< alpha (from spi4.2 spec) */ uint64_t max_t : 16; /**< DATA_MAX_T (from spi4.2 spec) */ #else uint64_t max_t : 16; uint64_t alpha : 16; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_stxx_spi4_dat_s cn38xx; struct cvmx_stxx_spi4_dat_s cn38xxp2; struct cvmx_stxx_spi4_dat_s cn58xx; struct cvmx_stxx_spi4_dat_s cn58xxp1; } cvmx_stxx_spi4_dat_t; /** * cvmx_stx#_spi4_stat * * STX_SPI4_STAT - Spi4 status channel control register * * * Notes: * Restrictions: * Both the calendar table and the LEN and M parameters must be * completely setup before writing the Interface enable (INF_EN) and * Status channel enabled (ST_EN) asserted. * * The calendar table will only be enabled when LEN > 0. * * Current rev will only support LVTTL status IO. */ typedef union { uint64_t u64; struct cvmx_stxx_spi4_stat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t m : 8; /**< CALENDAR_M (from spi4.2 spec) */ uint64_t reserved_7_7 : 1; uint64_t len : 7; /**< CALENDAR_LEN (from spi4.2 spec) */ #else uint64_t len : 7; uint64_t reserved_7_7 : 1; uint64_t m : 8; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_stxx_spi4_stat_s cn38xx; struct cvmx_stxx_spi4_stat_s cn38xxp2; struct cvmx_stxx_spi4_stat_s cn58xx; struct cvmx_stxx_spi4_stat_s cn58xxp1; } cvmx_stxx_spi4_stat_t; /** * cvmx_stx#_stat_bytes_hi */ typedef union { uint64_t u64; struct cvmx_stxx_stat_bytes_hi_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Number of bytes sent (CNT[63:32]) */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_stxx_stat_bytes_hi_s cn38xx; struct cvmx_stxx_stat_bytes_hi_s cn38xxp2; struct cvmx_stxx_stat_bytes_hi_s cn58xx; struct cvmx_stxx_stat_bytes_hi_s cn58xxp1; } cvmx_stxx_stat_bytes_hi_t; /** * cvmx_stx#_stat_bytes_lo */ typedef union { uint64_t u64; struct cvmx_stxx_stat_bytes_lo_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Number of bytes sent (CNT[31:0]) */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_stxx_stat_bytes_lo_s cn38xx; struct cvmx_stxx_stat_bytes_lo_s cn38xxp2; struct cvmx_stxx_stat_bytes_lo_s cn58xx; struct cvmx_stxx_stat_bytes_lo_s cn58xxp1; } cvmx_stxx_stat_bytes_lo_t; /** * cvmx_stx#_stat_ctl */ typedef union { uint64_t u64; struct cvmx_stxx_stat_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_5_63 : 59; uint64_t clr : 1; /**< Clear all statistics counters - STX_STAT_PKT_XMT - STX_STAT_BYTES_HI - STX_STAT_BYTES_LO */ uint64_t bckprs : 4; /**< The selected port for STX_BCKPRS_CNT */ #else uint64_t bckprs : 4; uint64_t clr : 1; uint64_t reserved_5_63 : 59; #endif } s; struct cvmx_stxx_stat_ctl_s cn38xx; struct cvmx_stxx_stat_ctl_s cn38xxp2; struct cvmx_stxx_stat_ctl_s cn58xx; struct cvmx_stxx_stat_ctl_s cn58xxp1; } cvmx_stxx_stat_ctl_t; /** * cvmx_stx#_stat_pkt_xmt */ typedef union { uint64_t u64; struct cvmx_stxx_stat_pkt_xmt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t cnt : 32; /**< Number of packets sent */ #else uint64_t cnt : 32; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_stxx_stat_pkt_xmt_s cn38xx; struct cvmx_stxx_stat_pkt_xmt_s cn38xxp2; struct cvmx_stxx_stat_pkt_xmt_s cn58xx; struct cvmx_stxx_stat_pkt_xmt_s cn58xxp1; } cvmx_stxx_stat_pkt_xmt_t; /** * cvmx_tim_mem_debug0 * * Notes: * Internal per-ring state intended for debug use only - tim.ctl[47:0] * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_tim_mem_debug0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t ena : 1; /**< Ring timer enable */ uint64_t reserved_46_46 : 1; uint64_t count : 22; /**< Time offset for the ring Set to INTERVAL and counts down by 1 every 1024 cycles when ENA==1. The HW forces a bucket traversal (and resets COUNT to INTERVAL) whenever the decrement would cause COUNT to go negative. COUNT is unpredictable whenever ENA==0. COUNT is reset to INTERVAL whenever TIM_MEM_RING1 is written for the ring. */ uint64_t reserved_22_23 : 2; uint64_t interval : 22; /**< Timer interval - 1 */ #else uint64_t interval : 22; uint64_t reserved_22_23 : 2; uint64_t count : 22; uint64_t reserved_46_46 : 1; uint64_t ena : 1; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_tim_mem_debug0_s cn30xx; struct cvmx_tim_mem_debug0_s cn31xx; struct cvmx_tim_mem_debug0_s cn38xx; struct cvmx_tim_mem_debug0_s cn38xxp2; struct cvmx_tim_mem_debug0_s cn50xx; struct cvmx_tim_mem_debug0_s cn52xx; struct cvmx_tim_mem_debug0_s cn52xxp1; struct cvmx_tim_mem_debug0_s cn56xx; struct cvmx_tim_mem_debug0_s cn56xxp1; struct cvmx_tim_mem_debug0_s cn58xx; struct cvmx_tim_mem_debug0_s cn58xxp1; } cvmx_tim_mem_debug0_t; /** * cvmx_tim_mem_debug1 * * Notes: * Internal per-ring state intended for debug use only - tim.sta[63:0] * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_tim_mem_debug1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t bucket : 13; /**< Current bucket[12:0] Reset to 0 whenever TIM_MEM_RING0 is written for the ring. Incremented (modulo BSIZE) once per bucket traversal. See TIM_MEM_DEBUG2[BUCKET]. */ uint64_t base : 31; /**< Pointer[35:5] to bucket[0] */ uint64_t bsize : 20; /**< Number of buckets - 1 */ #else uint64_t bsize : 20; uint64_t base : 31; uint64_t bucket : 13; #endif } s; struct cvmx_tim_mem_debug1_s cn30xx; struct cvmx_tim_mem_debug1_s cn31xx; struct cvmx_tim_mem_debug1_s cn38xx; struct cvmx_tim_mem_debug1_s cn38xxp2; struct cvmx_tim_mem_debug1_s cn50xx; struct cvmx_tim_mem_debug1_s cn52xx; struct cvmx_tim_mem_debug1_s cn52xxp1; struct cvmx_tim_mem_debug1_s cn56xx; struct cvmx_tim_mem_debug1_s cn56xxp1; struct cvmx_tim_mem_debug1_s cn58xx; struct cvmx_tim_mem_debug1_s cn58xxp1; } cvmx_tim_mem_debug1_t; /** * cvmx_tim_mem_debug2 * * Notes: * Internal per-ring state intended for debug use only - tim.sta[95:64] * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_tim_mem_debug2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_24_63 : 40; uint64_t cpool : 3; /**< Free list used to free chunks */ uint64_t csize : 13; /**< Number of words per chunk */ uint64_t reserved_7_7 : 1; uint64_t bucket : 7; /**< Current bucket[19:13] See TIM_MEM_DEBUG1[BUCKET]. */ #else uint64_t bucket : 7; uint64_t reserved_7_7 : 1; uint64_t csize : 13; uint64_t cpool : 3; uint64_t reserved_24_63 : 40; #endif } s; struct cvmx_tim_mem_debug2_s cn30xx; struct cvmx_tim_mem_debug2_s cn31xx; struct cvmx_tim_mem_debug2_s cn38xx; struct cvmx_tim_mem_debug2_s cn38xxp2; struct cvmx_tim_mem_debug2_s cn50xx; struct cvmx_tim_mem_debug2_s cn52xx; struct cvmx_tim_mem_debug2_s cn52xxp1; struct cvmx_tim_mem_debug2_s cn56xx; struct cvmx_tim_mem_debug2_s cn56xxp1; struct cvmx_tim_mem_debug2_s cn58xx; struct cvmx_tim_mem_debug2_s cn58xxp1; } cvmx_tim_mem_debug2_t; /** * cvmx_tim_mem_ring0 * * Notes: * TIM_MEM_RING0 must not be written for a ring when TIM_MEM_RING1[ENA] is set for the ring. * Every write to TIM_MEM_RING0 clears the current bucket for the ring. (The current bucket is * readable via TIM_MEM_DEBUG2[BUCKET],TIM_MEM_DEBUG1[BUCKET].) * BASE is a 32-byte aligned pointer[35:0]. Only pointer[35:5] are stored because pointer[4:0] = 0. * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_tim_mem_ring0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_55_63 : 9; uint64_t first_bucket : 31; /**< Pointer[35:5] to bucket[0] */ uint64_t num_buckets : 20; /**< Number of buckets - 1 */ uint64_t ring : 4; /**< Ring ID */ #else uint64_t ring : 4; uint64_t num_buckets : 20; uint64_t first_bucket : 31; uint64_t reserved_55_63 : 9; #endif } s; struct cvmx_tim_mem_ring0_s cn30xx; struct cvmx_tim_mem_ring0_s cn31xx; struct cvmx_tim_mem_ring0_s cn38xx; struct cvmx_tim_mem_ring0_s cn38xxp2; struct cvmx_tim_mem_ring0_s cn50xx; struct cvmx_tim_mem_ring0_s cn52xx; struct cvmx_tim_mem_ring0_s cn52xxp1; struct cvmx_tim_mem_ring0_s cn56xx; struct cvmx_tim_mem_ring0_s cn56xxp1; struct cvmx_tim_mem_ring0_s cn58xx; struct cvmx_tim_mem_ring0_s cn58xxp1; } cvmx_tim_mem_ring0_t; /** * cvmx_tim_mem_ring1 * * Notes: * After a 1->0 transition on ENA, the HW will still complete a bucket traversal for the ring * if it was pending or active prior to the transition. (SW must delay to ensure the completion * of the traversal before reprogramming the ring.) * Every write to TIM_MEM_RING1 resets the current time offset for the ring to the INTERVAL value. * (The current time offset for the ring is readable via TIM_MEM_DEBUG0[COUNT].) * CSIZE must be at least 16. It is illegal to program CSIZE to a value that is less than 16. * This CSR is a memory of 16 entries, and thus, the TIM_REG_READ_IDX CSR must be written before any * CSR read operations to this address can be performed. */ typedef union { uint64_t u64; struct cvmx_tim_mem_ring1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_43_63 : 21; uint64_t enable : 1; /**< Ring timer enable When clear, the ring is disabled and TIM will not traverse any new buckets for the ring. */ uint64_t pool : 3; /**< Free list used to free chunks */ uint64_t words_per_chunk : 13; /**< Number of words per chunk */ uint64_t interval : 22; /**< Timer interval - 1, measured in 1024 cycle ticks */ uint64_t ring : 4; /**< Ring ID */ #else uint64_t ring : 4; uint64_t interval : 22; uint64_t words_per_chunk : 13; uint64_t pool : 3; uint64_t enable : 1; uint64_t reserved_43_63 : 21; #endif } s; struct cvmx_tim_mem_ring1_s cn30xx; struct cvmx_tim_mem_ring1_s cn31xx; struct cvmx_tim_mem_ring1_s cn38xx; struct cvmx_tim_mem_ring1_s cn38xxp2; struct cvmx_tim_mem_ring1_s cn50xx; struct cvmx_tim_mem_ring1_s cn52xx; struct cvmx_tim_mem_ring1_s cn52xxp1; struct cvmx_tim_mem_ring1_s cn56xx; struct cvmx_tim_mem_ring1_s cn56xxp1; struct cvmx_tim_mem_ring1_s cn58xx; struct cvmx_tim_mem_ring1_s cn58xxp1; } cvmx_tim_mem_ring1_t; /** * cvmx_tim_reg_bist_result * * Notes: * Access to the internal BiST results * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail). */ typedef union { uint64_t u64; struct cvmx_tim_reg_bist_result_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t sta : 2; /**< BiST result of the STA memories (0=pass, !0=fail) */ uint64_t ncb : 1; /**< BiST result of the NCB memories (0=pass, !0=fail) */ uint64_t ctl : 1; /**< BiST result of the CTL memories (0=pass, !0=fail) */ #else uint64_t ctl : 1; uint64_t ncb : 1; uint64_t sta : 2; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_tim_reg_bist_result_s cn30xx; struct cvmx_tim_reg_bist_result_s cn31xx; struct cvmx_tim_reg_bist_result_s cn38xx; struct cvmx_tim_reg_bist_result_s cn38xxp2; struct cvmx_tim_reg_bist_result_s cn50xx; struct cvmx_tim_reg_bist_result_s cn52xx; struct cvmx_tim_reg_bist_result_s cn52xxp1; struct cvmx_tim_reg_bist_result_s cn56xx; struct cvmx_tim_reg_bist_result_s cn56xxp1; struct cvmx_tim_reg_bist_result_s cn58xx; struct cvmx_tim_reg_bist_result_s cn58xxp1; } cvmx_tim_reg_bist_result_t; /** * cvmx_tim_reg_error * * Notes: * A ring is in error if its interval has elapsed more than once without having been serviced. * During a CSR write to this register, the write data is used as a mask to clear the selected mask * bits (mask'[15:0] = mask[15:0] & ~write_data[15:0]). */ typedef union { uint64_t u64; struct cvmx_tim_reg_error_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t mask : 16; /**< Bit mask indicating the rings in error */ #else uint64_t mask : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_tim_reg_error_s cn30xx; struct cvmx_tim_reg_error_s cn31xx; struct cvmx_tim_reg_error_s cn38xx; struct cvmx_tim_reg_error_s cn38xxp2; struct cvmx_tim_reg_error_s cn50xx; struct cvmx_tim_reg_error_s cn52xx; struct cvmx_tim_reg_error_s cn52xxp1; struct cvmx_tim_reg_error_s cn56xx; struct cvmx_tim_reg_error_s cn56xxp1; struct cvmx_tim_reg_error_s cn58xx; struct cvmx_tim_reg_error_s cn58xxp1; } cvmx_tim_reg_error_t; /** * cvmx_tim_reg_flags * * Notes: * TIM has a counter that causes a periodic tick every 1024 cycles. This counter is shared by all * rings. (Each tick causes the HW to decrement the time offset (i.e. COUNT) for all enabled rings.) * When ENA_TIM==0, the HW stops this shared periodic counter, so there are no more ticks, and there * are no more new bucket traversals (for any ring). * * If ENA_TIM transitions 1->0, TIM will no longer create new bucket traversals, but there may * have been previous ones. If there are ring bucket traversals that were already pending but * not currently active (i.e. bucket traversals that need to be done by the HW, but haven't been yet) * during this ENA_TIM 1->0 transition, then these bucket traversals will remain pending until * ENA_TIM is later set to one. Bucket traversals that were already in progress will complete * after the 1->0 ENA_TIM transition, though. */ typedef union { uint64_t u64; struct cvmx_tim_reg_flags_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t reset : 1; /**< Reset oneshot pulse for free-running structures */ uint64_t enable_dwb : 1; /**< Enables non-zero DonwWriteBacks when set When set, enables the use of DontWriteBacks during the buffer freeing operations. */ uint64_t enable_timers : 1; /**< Enables the TIM section when set When set, TIM is in normal operation. When clear, time is effectively stopped for all rings in TIM. */ #else uint64_t enable_timers : 1; uint64_t enable_dwb : 1; uint64_t reset : 1; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_tim_reg_flags_s cn30xx; struct cvmx_tim_reg_flags_s cn31xx; struct cvmx_tim_reg_flags_s cn38xx; struct cvmx_tim_reg_flags_s cn38xxp2; struct cvmx_tim_reg_flags_s cn50xx; struct cvmx_tim_reg_flags_s cn52xx; struct cvmx_tim_reg_flags_s cn52xxp1; struct cvmx_tim_reg_flags_s cn56xx; struct cvmx_tim_reg_flags_s cn56xxp1; struct cvmx_tim_reg_flags_s cn58xx; struct cvmx_tim_reg_flags_s cn58xxp1; } cvmx_tim_reg_flags_t; /** * cvmx_tim_reg_int_mask * * Notes: * Note that this CSR is present only in chip revisions beginning with pass2. * When mask bit is set, the interrupt is enabled. */ typedef union { uint64_t u64; struct cvmx_tim_reg_int_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t mask : 16; /**< Bit mask corresponding to TIM_REG_ERROR.MASK above */ #else uint64_t mask : 16; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_tim_reg_int_mask_s cn30xx; struct cvmx_tim_reg_int_mask_s cn31xx; struct cvmx_tim_reg_int_mask_s cn38xx; struct cvmx_tim_reg_int_mask_s cn38xxp2; struct cvmx_tim_reg_int_mask_s cn50xx; struct cvmx_tim_reg_int_mask_s cn52xx; struct cvmx_tim_reg_int_mask_s cn52xxp1; struct cvmx_tim_reg_int_mask_s cn56xx; struct cvmx_tim_reg_int_mask_s cn56xxp1; struct cvmx_tim_reg_int_mask_s cn58xx; struct cvmx_tim_reg_int_mask_s cn58xxp1; } cvmx_tim_reg_int_mask_t; /** * cvmx_tim_reg_read_idx * * Notes: * Provides the read index during a CSR read operation to any of the CSRs that are physically stored * as memories. The names of these CSRs begin with the prefix "TIM_MEM_". * IDX[7:0] is the read index. INC[7:0] is an increment that is added to IDX[7:0] after any CSR read. * The intended use is to initially write this CSR such that IDX=0 and INC=1. Then, the entire * contents of a CSR memory can be read with consecutive CSR read commands. */ typedef union { uint64_t u64; struct cvmx_tim_reg_read_idx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t inc : 8; /**< Increment to add to current index for next index */ uint64_t index : 8; /**< Index to use for next memory CSR read */ #else uint64_t index : 8; uint64_t inc : 8; uint64_t reserved_16_63 : 48; #endif } s; struct cvmx_tim_reg_read_idx_s cn30xx; struct cvmx_tim_reg_read_idx_s cn31xx; struct cvmx_tim_reg_read_idx_s cn38xx; struct cvmx_tim_reg_read_idx_s cn38xxp2; struct cvmx_tim_reg_read_idx_s cn50xx; struct cvmx_tim_reg_read_idx_s cn52xx; struct cvmx_tim_reg_read_idx_s cn52xxp1; struct cvmx_tim_reg_read_idx_s cn56xx; struct cvmx_tim_reg_read_idx_s cn56xxp1; struct cvmx_tim_reg_read_idx_s cn58xx; struct cvmx_tim_reg_read_idx_s cn58xxp1; } cvmx_tim_reg_read_idx_t; /** * cvmx_tra_bist_status * * TRA_BIST_STATUS = Trace Buffer BiST Status * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_bist_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t tcf : 1; /**< Bist Results for TCF memory - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t tdf1 : 1; /**< Bist Results for TDF memory 1 - 0: GOOD (or bist in progress/never run) - 1: BAD */ uint64_t tdf0 : 1; /**< Bist Results for TCF memory 0 - 0: GOOD (or bist in progress/never run) - 1: BAD */ #else uint64_t tdf0 : 1; uint64_t tdf1 : 1; uint64_t tcf : 1; uint64_t reserved_3_63 : 61; #endif } s; struct cvmx_tra_bist_status_s cn31xx; struct cvmx_tra_bist_status_s cn38xx; struct cvmx_tra_bist_status_s cn38xxp2; struct cvmx_tra_bist_status_s cn52xx; struct cvmx_tra_bist_status_s cn52xxp1; struct cvmx_tra_bist_status_s cn56xx; struct cvmx_tra_bist_status_s cn56xxp1; struct cvmx_tra_bist_status_s cn58xx; struct cvmx_tra_bist_status_s cn58xxp1; } cvmx_tra_bist_status_t; /** * cvmx_tra_ctl * * TRA_CTL = Trace Buffer Control * * Description: * * Notes: * It is illegal to change the values of WRAP, TRIG_CTL, IGNORE_O while tracing (i.e. when ENA=1). * Note that the following fields are present only in chip revisions beginning with pass2: IGNORE_O */ typedef union { uint64_t u64; struct cvmx_tra_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_15_63 : 49; uint64_t ignore_o : 1; /**< Ignore overflow during wrap mode If set and wrapping mode is enabled, then tracing will not stop at the overflow condition. Each write during an overflow will overwrite the oldest, unread entry and the read pointer is incremented by one entry. This bit has no effect if WRAP=0. */ uint64_t mcd0_ena : 1; /**< MCD0 enable If set and any PP sends the MCD0 signal, the tracing is disabled. */ uint64_t mcd0_thr : 1; /**< MCD0_threshold At a fill threshold event, sends an MCD0 wire pulse that can cause cores to enter debug mode, if enabled. This MCD0 wire pulse will not occur while (TRA_INT_STATUS.MCD0_THR == 1). */ uint64_t mcd0_trg : 1; /**< MCD0_trigger At an end trigger event, sends an MCD0 wire pulse that can cause cores to enter debug mode, if enabled. This MCD0 wire pulse will not occur while (TRA_INT_STATUS.MCD0_TRG == 1). */ uint64_t ciu_thr : 1; /**< CIU_threshold When set during a fill threshold event, TRA_INT_STATUS[CIU_THR] is set, which can cause core interrupts, if enabled. */ uint64_t ciu_trg : 1; /**< CIU_trigger When set during an end trigger event, TRA_INT_STATUS[CIU_TRG] is set, which can cause core interrupts, if enabled. */ uint64_t full_thr : 2; /**< Full Threshhold 0=none 1=1/2 full 2=3/4 full 3=4/4 full */ uint64_t time_grn : 3; /**< Timestamp granularity granularity=8^n cycles, n=0,1,2,3,4,5,6,7 */ uint64_t trig_ctl : 2; /**< Trigger Control Note: trigger events are written to the trace 0=no triggers 1=trigger0=start trigger, trigger1=stop trigger 2=(trigger0 || trigger1)=start trigger 3=(trigger0 || trigger1)=stop trigger */ uint64_t wrap : 1; /**< Wrap mode When WRAP=0, the trace buffer will disable itself after having logged 1024 entries. When WRAP=1, the trace buffer will never disable itself. In this case, tracing may or may not be temporarily suspended during the overflow condition (see IGNORE_O above). 0=do not wrap 1=wrap */ uint64_t ena : 1; /**< Enable Trace Master enable. Tracing only happens when ENA=1. When ENA changes from 0 to 1, the read and write pointers are reset to 0x00 to begin a new trace. The MCD0 event may set ENA=0 (see MCD0_ENA above). When using triggers, tracing occurs only between start and stop triggers (including the triggers themselves). 0=disable 1=enable */ #else uint64_t ena : 1; uint64_t wrap : 1; uint64_t trig_ctl : 2; uint64_t time_grn : 3; uint64_t full_thr : 2; uint64_t ciu_trg : 1; uint64_t ciu_thr : 1; uint64_t mcd0_trg : 1; uint64_t mcd0_thr : 1; uint64_t mcd0_ena : 1; uint64_t ignore_o : 1; uint64_t reserved_15_63 : 49; #endif } s; struct cvmx_tra_ctl_s cn31xx; struct cvmx_tra_ctl_s cn38xx; struct cvmx_tra_ctl_s cn38xxp2; struct cvmx_tra_ctl_s cn52xx; struct cvmx_tra_ctl_s cn52xxp1; struct cvmx_tra_ctl_s cn56xx; struct cvmx_tra_ctl_s cn56xxp1; struct cvmx_tra_ctl_s cn58xx; struct cvmx_tra_ctl_s cn58xxp1; } cvmx_tra_ctl_t; /** * cvmx_tra_cycles_since * * TRA_CYCLES_SINCE = Trace Buffer Cycles Since Last Write, Read/Write pointers * * Description: * * Notes: * This CSR is obsolete. Use TRA_CYCLES_SINCE1 instead. * */ typedef union { uint64_t u64; struct cvmx_tra_cycles_since_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t cycles : 48; /**< Cycles since the last entry was written */ uint64_t rptr : 8; /**< Read pointer */ uint64_t wptr : 8; /**< Write pointer */ #else uint64_t wptr : 8; uint64_t rptr : 8; uint64_t cycles : 48; #endif } s; struct cvmx_tra_cycles_since_s cn31xx; struct cvmx_tra_cycles_since_s cn38xx; struct cvmx_tra_cycles_since_s cn38xxp2; struct cvmx_tra_cycles_since_s cn52xx; struct cvmx_tra_cycles_since_s cn52xxp1; struct cvmx_tra_cycles_since_s cn56xx; struct cvmx_tra_cycles_since_s cn56xxp1; struct cvmx_tra_cycles_since_s cn58xx; struct cvmx_tra_cycles_since_s cn58xxp1; } cvmx_tra_cycles_since_t; /** * cvmx_tra_cycles_since1 * * TRA_CYCLES_SINCE1 = Trace Buffer Cycles Since Last Write, Read/Write pointers * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_cycles_since1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t cycles : 40; /**< Cycles since the last entry was written */ uint64_t reserved_22_23 : 2; uint64_t rptr : 10; /**< Read pointer */ uint64_t reserved_10_11 : 2; uint64_t wptr : 10; /**< Write pointer */ #else uint64_t wptr : 10; uint64_t reserved_10_11 : 2; uint64_t rptr : 10; uint64_t reserved_22_23 : 2; uint64_t cycles : 40; #endif } s; struct cvmx_tra_cycles_since1_s cn52xx; struct cvmx_tra_cycles_since1_s cn52xxp1; struct cvmx_tra_cycles_since1_s cn56xx; struct cvmx_tra_cycles_since1_s cn56xxp1; struct cvmx_tra_cycles_since1_s cn58xx; struct cvmx_tra_cycles_since1_s cn58xxp1; } cvmx_tra_cycles_since1_t; /** * cvmx_tra_filt_adr_adr * * TRA_FILT_ADR_ADR = Trace Buffer Filter Address Address * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_filt_adr_adr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t adr : 36; /**< Unmasked Address The combination of TRA_FILT_ADR_ADR and TRA_FILT_ADR_MSK is a masked address to enable tracing of only those commands whose masked address matches */ #else uint64_t adr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_tra_filt_adr_adr_s cn31xx; struct cvmx_tra_filt_adr_adr_s cn38xx; struct cvmx_tra_filt_adr_adr_s cn38xxp2; struct cvmx_tra_filt_adr_adr_s cn52xx; struct cvmx_tra_filt_adr_adr_s cn52xxp1; struct cvmx_tra_filt_adr_adr_s cn56xx; struct cvmx_tra_filt_adr_adr_s cn56xxp1; struct cvmx_tra_filt_adr_adr_s cn58xx; struct cvmx_tra_filt_adr_adr_s cn58xxp1; } cvmx_tra_filt_adr_adr_t; /** * cvmx_tra_filt_adr_msk * * TRA_FILT_ADR_MSK = Trace Buffer Filter Address Mask * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_filt_adr_msk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t adr : 36; /**< Address Mask The combination of TRA_FILT_ADR_ADR and TRA_FILT_ADR_MSK is a masked address to enable tracing of only those commands whose masked address matches. When a mask bit is not set, the corresponding address bits are assumed to match. Also, note that IOBDMAs do not have proper addresses, so when TRA_FILT_CMD[IOBDMA] is set, TRA_FILT_ADR_MSK must be zero to guarantee that any IOBDMAs enter the trace. */ #else uint64_t adr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_tra_filt_adr_msk_s cn31xx; struct cvmx_tra_filt_adr_msk_s cn38xx; struct cvmx_tra_filt_adr_msk_s cn38xxp2; struct cvmx_tra_filt_adr_msk_s cn52xx; struct cvmx_tra_filt_adr_msk_s cn52xxp1; struct cvmx_tra_filt_adr_msk_s cn56xx; struct cvmx_tra_filt_adr_msk_s cn56xxp1; struct cvmx_tra_filt_adr_msk_s cn58xx; struct cvmx_tra_filt_adr_msk_s cn58xxp1; } cvmx_tra_filt_adr_msk_t; /** * cvmx_tra_filt_cmd * * TRA_FILT_CMD = Trace Buffer Filter Command Mask * * Description: * * Notes: * Note that the trace buffer does not do proper IOBDMA address compares. Thus, if IOBDMA is set, then * the address compare must be disabled (i.e. TRA_FILT_ADR_MSK set to zero) to guarantee that IOBDMAs * enter the trace. */ typedef union { uint64_t u64; struct cvmx_tra_filt_cmd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t saa : 1; /**< Enable SAA tracing 0=disable, 1=enable */ uint64_t iobdma : 1; /**< Enable IOBDMA tracing 0=disable, 1=enable */ uint64_t iobst : 1; /**< Enable IOBST tracing 0=disable, 1=enable */ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing 0=disable, 1=enable */ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing 0=disable, 1=enable */ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing 0=disable, 1=enable */ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing 0=disable, 1=enable */ uint64_t stt : 1; /**< Enable STT tracing 0=disable, 1=enable */ uint64_t stp : 1; /**< Enable STP tracing 0=disable, 1=enable */ uint64_t stc : 1; /**< Enable STC tracing 0=disable, 1=enable */ uint64_t stf : 1; /**< Enable STF tracing 0=disable, 1=enable */ uint64_t ldt : 1; /**< Enable LDT tracing 0=disable, 1=enable */ uint64_t ldi : 1; /**< Enable LDI tracing 0=disable, 1=enable */ uint64_t ldd : 1; /**< Enable LDD tracing 0=disable, 1=enable */ uint64_t psl1 : 1; /**< Enable PSL1 tracing 0=disable, 1=enable */ uint64_t pl2 : 1; /**< Enable PL2 tracing 0=disable, 1=enable */ uint64_t dwb : 1; /**< Enable DWB tracing 0=disable, 1=enable */ #else uint64_t dwb : 1; uint64_t pl2 : 1; uint64_t psl1 : 1; uint64_t ldd : 1; uint64_t ldi : 1; uint64_t ldt : 1; uint64_t stf : 1; uint64_t stc : 1; uint64_t stp : 1; uint64_t stt : 1; uint64_t iobld8 : 1; uint64_t iobld16 : 1; uint64_t iobld32 : 1; uint64_t iobld64 : 1; uint64_t iobst : 1; uint64_t iobdma : 1; uint64_t saa : 1; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_tra_filt_cmd_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t iobdma : 1; /**< Enable IOBDMA tracing 0=disable, 1=enable */ uint64_t iobst : 1; /**< Enable IOBST tracing 0=disable, 1=enable */ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing 0=disable, 1=enable */ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing 0=disable, 1=enable */ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing 0=disable, 1=enable */ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing 0=disable, 1=enable */ uint64_t stt : 1; /**< Enable STT tracing 0=disable, 1=enable */ uint64_t stp : 1; /**< Enable STP tracing 0=disable, 1=enable */ uint64_t stc : 1; /**< Enable STC tracing 0=disable, 1=enable */ uint64_t stf : 1; /**< Enable STF tracing 0=disable, 1=enable */ uint64_t ldt : 1; /**< Enable LDT tracing 0=disable, 1=enable */ uint64_t ldi : 1; /**< Enable LDI tracing 0=disable, 1=enable */ uint64_t ldd : 1; /**< Enable LDD tracing 0=disable, 1=enable */ uint64_t psl1 : 1; /**< Enable PSL1 tracing 0=disable, 1=enable */ uint64_t pl2 : 1; /**< Enable PL2 tracing 0=disable, 1=enable */ uint64_t dwb : 1; /**< Enable DWB tracing 0=disable, 1=enable */ #else uint64_t dwb : 1; uint64_t pl2 : 1; uint64_t psl1 : 1; uint64_t ldd : 1; uint64_t ldi : 1; uint64_t ldt : 1; uint64_t stf : 1; uint64_t stc : 1; uint64_t stp : 1; uint64_t stt : 1; uint64_t iobld8 : 1; uint64_t iobld16 : 1; uint64_t iobld32 : 1; uint64_t iobld64 : 1; uint64_t iobst : 1; uint64_t iobdma : 1; uint64_t reserved_16_63 : 48; #endif } cn31xx; struct cvmx_tra_filt_cmd_cn31xx cn38xx; struct cvmx_tra_filt_cmd_cn31xx cn38xxp2; struct cvmx_tra_filt_cmd_s cn52xx; struct cvmx_tra_filt_cmd_s cn52xxp1; struct cvmx_tra_filt_cmd_s cn56xx; struct cvmx_tra_filt_cmd_s cn56xxp1; struct cvmx_tra_filt_cmd_s cn58xx; struct cvmx_tra_filt_cmd_s cn58xxp1; } cvmx_tra_filt_cmd_t; /** * cvmx_tra_filt_did * * TRA_FILT_DID = Trace Buffer Filter DestinationId Mask * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_filt_did_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t illegal : 19; /**< Illegal destinations */ uint64_t pow : 1; /**< Enable tracing of requests to POW (get work, add work, status/memory/index loads, NULLRd loads, CSR's) */ uint64_t illegal2 : 3; /**< Illegal destinations */ uint64_t rng : 1; /**< Enable tracing of requests to RNG (loads/IOBDMA's are legal) */ uint64_t zip : 1; /**< Enable tracing of requests to ZIP (doorbell stores are legal) */ uint64_t dfa : 1; /**< Enable tracing of requests to DFA (CSR's and operations are legal) */ uint64_t fpa : 1; /**< Enable tracing of requests to FPA (alloc's (loads/IOBDMA's), frees (stores) are legal) */ uint64_t key : 1; /**< Enable tracing of requests to KEY memory (loads/IOBDMA's/stores are legal) */ uint64_t pci : 1; /**< Enable tracing of requests to PCI and RSL-type CSR's (RSL CSR's, PCI bus operations, PCI CSR's) */ uint64_t illegal3 : 2; /**< Illegal destinations */ uint64_t mio : 1; /**< Enable tracing of CIU and GPIO CSR's */ #else uint64_t mio : 1; uint64_t illegal3 : 2; uint64_t pci : 1; uint64_t key : 1; uint64_t fpa : 1; uint64_t dfa : 1; uint64_t zip : 1; uint64_t rng : 1; uint64_t illegal2 : 3; uint64_t pow : 1; uint64_t illegal : 19; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_tra_filt_did_s cn31xx; struct cvmx_tra_filt_did_s cn38xx; struct cvmx_tra_filt_did_s cn38xxp2; struct cvmx_tra_filt_did_s cn52xx; struct cvmx_tra_filt_did_s cn52xxp1; struct cvmx_tra_filt_did_s cn56xx; struct cvmx_tra_filt_did_s cn56xxp1; struct cvmx_tra_filt_did_s cn58xx; struct cvmx_tra_filt_did_s cn58xxp1; } cvmx_tra_filt_did_t; /** * cvmx_tra_filt_sid * * TRA_FILT_SID = Trace Buffer Filter SourceId Mask * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_filt_sid_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t dwb : 1; /**< Enable tracing of requests from the IOB DWB engine */ uint64_t iobreq : 1; /**< Enable tracing of requests from FPA,TIM,DFA, PCI,ZIP,POW, and PKO (writes) */ uint64_t pko : 1; /**< Enable tracing of read requests from PKO */ uint64_t pki : 1; /**< Enable tracing of write requests from PIP/IPD */ uint64_t pp : 16; /**< Enable tracing from PP[N] with matching SourceID 0=disable, 1=enableper bit N where 0<=N<=15 */ #else uint64_t pp : 16; uint64_t pki : 1; uint64_t pko : 1; uint64_t iobreq : 1; uint64_t dwb : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_tra_filt_sid_s cn31xx; struct cvmx_tra_filt_sid_s cn38xx; struct cvmx_tra_filt_sid_s cn38xxp2; struct cvmx_tra_filt_sid_s cn52xx; struct cvmx_tra_filt_sid_s cn52xxp1; struct cvmx_tra_filt_sid_s cn56xx; struct cvmx_tra_filt_sid_s cn56xxp1; struct cvmx_tra_filt_sid_s cn58xx; struct cvmx_tra_filt_sid_s cn58xxp1; } cvmx_tra_filt_sid_t; /** * cvmx_tra_int_status * * TRA_INT_STATUS = Trace Buffer Interrupt Status * * Description: * * Notes: * During a CSR write to this register, the write data is used as a mask to clear the selected status * bits (status'[3:0] = status[3:0] & ~write_data[3:0]). */ typedef union { uint64_t u64; struct cvmx_tra_int_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_4_63 : 60; uint64_t mcd0_thr : 1; /**< MCD0 full threshold interrupt status 0=trace buffer did not generate MCD0 wire pulse 1=trace buffer did generate MCD0 wire pulse and prevents additional MCD0_THR MCD0 wire pulses */ uint64_t mcd0_trg : 1; /**< MCD0 end trigger interrupt status 0=trace buffer did not generate interrupt 1=trace buffer did generate interrupt and prevents additional MCD0_TRG MCD0 wire pulses */ uint64_t ciu_thr : 1; /**< CIU full threshold interrupt status 0=trace buffer did not generate interrupt 1=trace buffer did generate interrupt */ uint64_t ciu_trg : 1; /**< CIU end trigger interrupt status 0=trace buffer did not generate interrupt 1=trace buffer did generate interrupt */ #else uint64_t ciu_trg : 1; uint64_t ciu_thr : 1; uint64_t mcd0_trg : 1; uint64_t mcd0_thr : 1; uint64_t reserved_4_63 : 60; #endif } s; struct cvmx_tra_int_status_s cn31xx; struct cvmx_tra_int_status_s cn38xx; struct cvmx_tra_int_status_s cn38xxp2; struct cvmx_tra_int_status_s cn52xx; struct cvmx_tra_int_status_s cn52xxp1; struct cvmx_tra_int_status_s cn56xx; struct cvmx_tra_int_status_s cn56xxp1; struct cvmx_tra_int_status_s cn58xx; struct cvmx_tra_int_status_s cn58xxp1; } cvmx_tra_int_status_t; /** * cvmx_tra_read_dat * * TRA_READ_DAT = Trace Buffer Read Data * * Description: * * Notes: * This CSR is a memory of 1024 entries. When the trace was enabled, the read pointer was set to entry * 0 by hardware. Each read to this address increments the read pointer. */ typedef union { uint64_t u64; struct cvmx_tra_read_dat_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t data : 64; /**< Trace buffer data for current entry */ #else uint64_t data : 64; #endif } s; struct cvmx_tra_read_dat_s cn31xx; struct cvmx_tra_read_dat_s cn38xx; struct cvmx_tra_read_dat_s cn38xxp2; struct cvmx_tra_read_dat_s cn52xx; struct cvmx_tra_read_dat_s cn52xxp1; struct cvmx_tra_read_dat_s cn56xx; struct cvmx_tra_read_dat_s cn56xxp1; struct cvmx_tra_read_dat_s cn58xx; struct cvmx_tra_read_dat_s cn58xxp1; } cvmx_tra_read_dat_t; /** * cvmx_tra_trig0_adr_adr * * TRA_TRIG0_ADR_ADR = Trace Buffer Filter Address Address * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_trig0_adr_adr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t adr : 36; /**< Unmasked Address The combination of TRA_TRIG0_ADR_ADR and TRA_TRIG0_ADR_MSK is a masked address to enable tracing of only those commands whose masked address matches */ #else uint64_t adr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_tra_trig0_adr_adr_s cn31xx; struct cvmx_tra_trig0_adr_adr_s cn38xx; struct cvmx_tra_trig0_adr_adr_s cn38xxp2; struct cvmx_tra_trig0_adr_adr_s cn52xx; struct cvmx_tra_trig0_adr_adr_s cn52xxp1; struct cvmx_tra_trig0_adr_adr_s cn56xx; struct cvmx_tra_trig0_adr_adr_s cn56xxp1; struct cvmx_tra_trig0_adr_adr_s cn58xx; struct cvmx_tra_trig0_adr_adr_s cn58xxp1; } cvmx_tra_trig0_adr_adr_t; /** * cvmx_tra_trig0_adr_msk * * TRA_TRIG0_ADR_MSK = Trace Buffer Filter Address Mask * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_trig0_adr_msk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t adr : 36; /**< Address Mask The combination of TRA_TRIG0_ADR_ADR and TRA_TRIG0_ADR_MSK is a masked address to enable tracing of only those commands whose masked address matches. When a mask bit is not set, the corresponding address bits are assumed to match. Also, note that IOBDMAs do not have proper addresses, so when TRA_TRIG0_CMD[IOBDMA] is set, TRA_FILT_TRIG0_MSK must be zero to guarantee that any IOBDMAs are recognized as triggers. */ #else uint64_t adr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_tra_trig0_adr_msk_s cn31xx; struct cvmx_tra_trig0_adr_msk_s cn38xx; struct cvmx_tra_trig0_adr_msk_s cn38xxp2; struct cvmx_tra_trig0_adr_msk_s cn52xx; struct cvmx_tra_trig0_adr_msk_s cn52xxp1; struct cvmx_tra_trig0_adr_msk_s cn56xx; struct cvmx_tra_trig0_adr_msk_s cn56xxp1; struct cvmx_tra_trig0_adr_msk_s cn58xx; struct cvmx_tra_trig0_adr_msk_s cn58xxp1; } cvmx_tra_trig0_adr_msk_t; /** * cvmx_tra_trig0_cmd * * TRA_TRIG0_CMD = Trace Buffer Filter Command Mask * * Description: * * Notes: * Note that the trace buffer does not do proper IOBDMA address compares. Thus, if IOBDMA is set, then * the address compare must be disabled (i.e. TRA_TRIG0_ADR_MSK set to zero) to guarantee that IOBDMAs * are recognized as triggers. */ typedef union { uint64_t u64; struct cvmx_tra_trig0_cmd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t saa : 1; /**< Enable SAA tracing 0=disable, 1=enable */ uint64_t iobdma : 1; /**< Enable IOBDMA tracing 0=disable, 1=enable */ uint64_t iobst : 1; /**< Enable IOBST tracing 0=disable, 1=enable */ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing 0=disable, 1=enable */ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing 0=disable, 1=enable */ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing 0=disable, 1=enable */ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing 0=disable, 1=enable */ uint64_t stt : 1; /**< Enable STT tracing 0=disable, 1=enable */ uint64_t stp : 1; /**< Enable STP tracing 0=disable, 1=enable */ uint64_t stc : 1; /**< Enable STC tracing 0=disable, 1=enable */ uint64_t stf : 1; /**< Enable STF tracing 0=disable, 1=enable */ uint64_t ldt : 1; /**< Enable LDT tracing 0=disable, 1=enable */ uint64_t ldi : 1; /**< Enable LDI tracing 0=disable, 1=enable */ uint64_t ldd : 1; /**< Enable LDD tracing 0=disable, 1=enable */ uint64_t psl1 : 1; /**< Enable PSL1 tracing 0=disable, 1=enable */ uint64_t pl2 : 1; /**< Enable PL2 tracing 0=disable, 1=enable */ uint64_t dwb : 1; /**< Enable DWB tracing 0=disable, 1=enable */ #else uint64_t dwb : 1; uint64_t pl2 : 1; uint64_t psl1 : 1; uint64_t ldd : 1; uint64_t ldi : 1; uint64_t ldt : 1; uint64_t stf : 1; uint64_t stc : 1; uint64_t stp : 1; uint64_t stt : 1; uint64_t iobld8 : 1; uint64_t iobld16 : 1; uint64_t iobld32 : 1; uint64_t iobld64 : 1; uint64_t iobst : 1; uint64_t iobdma : 1; uint64_t saa : 1; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_tra_trig0_cmd_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t iobdma : 1; /**< Enable IOBDMA tracing 0=disable, 1=enable */ uint64_t iobst : 1; /**< Enable IOBST tracing 0=disable, 1=enable */ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing 0=disable, 1=enable */ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing 0=disable, 1=enable */ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing 0=disable, 1=enable */ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing 0=disable, 1=enable */ uint64_t stt : 1; /**< Enable STT tracing 0=disable, 1=enable */ uint64_t stp : 1; /**< Enable STP tracing 0=disable, 1=enable */ uint64_t stc : 1; /**< Enable STC tracing 0=disable, 1=enable */ uint64_t stf : 1; /**< Enable STF tracing 0=disable, 1=enable */ uint64_t ldt : 1; /**< Enable LDT tracing 0=disable, 1=enable */ uint64_t ldi : 1; /**< Enable LDI tracing 0=disable, 1=enable */ uint64_t ldd : 1; /**< Enable LDD tracing 0=disable, 1=enable */ uint64_t psl1 : 1; /**< Enable PSL1 tracing 0=disable, 1=enable */ uint64_t pl2 : 1; /**< Enable PL2 tracing 0=disable, 1=enable */ uint64_t dwb : 1; /**< Enable DWB tracing 0=disable, 1=enable */ #else uint64_t dwb : 1; uint64_t pl2 : 1; uint64_t psl1 : 1; uint64_t ldd : 1; uint64_t ldi : 1; uint64_t ldt : 1; uint64_t stf : 1; uint64_t stc : 1; uint64_t stp : 1; uint64_t stt : 1; uint64_t iobld8 : 1; uint64_t iobld16 : 1; uint64_t iobld32 : 1; uint64_t iobld64 : 1; uint64_t iobst : 1; uint64_t iobdma : 1; uint64_t reserved_16_63 : 48; #endif } cn31xx; struct cvmx_tra_trig0_cmd_cn31xx cn38xx; struct cvmx_tra_trig0_cmd_cn31xx cn38xxp2; struct cvmx_tra_trig0_cmd_s cn52xx; struct cvmx_tra_trig0_cmd_s cn52xxp1; struct cvmx_tra_trig0_cmd_s cn56xx; struct cvmx_tra_trig0_cmd_s cn56xxp1; struct cvmx_tra_trig0_cmd_s cn58xx; struct cvmx_tra_trig0_cmd_s cn58xxp1; } cvmx_tra_trig0_cmd_t; /** * cvmx_tra_trig0_did * * TRA_TRIG0_DID = Trace Buffer Filter DestinationId Mask * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_trig0_did_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t illegal : 19; /**< Illegal destinations */ uint64_t pow : 1; /**< Enable triggering on requests to POW (get work, add work, status/memory/index loads, NULLRd loads, CSR's) */ uint64_t illegal2 : 3; /**< Illegal destinations */ uint64_t rng : 1; /**< Enable triggering on requests to RNG (loads/IOBDMA's are legal) */ uint64_t zip : 1; /**< Enable triggering on requests to ZIP (doorbell stores are legal) */ uint64_t dfa : 1; /**< Enable triggering on requests to DFA (CSR's and operations are legal) */ uint64_t fpa : 1; /**< Enable triggering on requests to FPA (alloc's (loads/IOBDMA's), frees (stores) are legal) */ uint64_t key : 1; /**< Enable triggering on requests to KEY memory (loads/IOBDMA's/stores are legal) */ uint64_t pci : 1; /**< Enable triggering on requests to PCI and RSL-type CSR's (RSL CSR's, PCI bus operations, PCI CSR's) */ uint64_t illegal3 : 2; /**< Illegal destinations */ uint64_t mio : 1; /**< Enable triggering on CIU and GPIO CSR's */ #else uint64_t mio : 1; uint64_t illegal3 : 2; uint64_t pci : 1; uint64_t key : 1; uint64_t fpa : 1; uint64_t dfa : 1; uint64_t zip : 1; uint64_t rng : 1; uint64_t illegal2 : 3; uint64_t pow : 1; uint64_t illegal : 19; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_tra_trig0_did_s cn31xx; struct cvmx_tra_trig0_did_s cn38xx; struct cvmx_tra_trig0_did_s cn38xxp2; struct cvmx_tra_trig0_did_s cn52xx; struct cvmx_tra_trig0_did_s cn52xxp1; struct cvmx_tra_trig0_did_s cn56xx; struct cvmx_tra_trig0_did_s cn56xxp1; struct cvmx_tra_trig0_did_s cn58xx; struct cvmx_tra_trig0_did_s cn58xxp1; } cvmx_tra_trig0_did_t; /** * cvmx_tra_trig0_sid * * TRA_TRIG0_SID = Trace Buffer Filter SourceId Mask * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_trig0_sid_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA, PCI,ZIP,POW, and PKO (writes) */ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */ uint64_t pp : 16; /**< Enable triggering from PP[N] with matching SourceID 0=disable, 1=enableper bit N where 0<=N<=15 */ #else uint64_t pp : 16; uint64_t pki : 1; uint64_t pko : 1; uint64_t iobreq : 1; uint64_t dwb : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_tra_trig0_sid_s cn31xx; struct cvmx_tra_trig0_sid_s cn38xx; struct cvmx_tra_trig0_sid_s cn38xxp2; struct cvmx_tra_trig0_sid_s cn52xx; struct cvmx_tra_trig0_sid_s cn52xxp1; struct cvmx_tra_trig0_sid_s cn56xx; struct cvmx_tra_trig0_sid_s cn56xxp1; struct cvmx_tra_trig0_sid_s cn58xx; struct cvmx_tra_trig0_sid_s cn58xxp1; } cvmx_tra_trig0_sid_t; /** * cvmx_tra_trig1_adr_adr * * TRA_TRIG1_ADR_ADR = Trace Buffer Filter Address Address * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_trig1_adr_adr_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t adr : 36; /**< Unmasked Address The combination of TRA_TRIG1_ADR_ADR and TRA_TRIG1_ADR_MSK is a masked address to enable tracing of only those commands whose masked address matches */ #else uint64_t adr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_tra_trig1_adr_adr_s cn31xx; struct cvmx_tra_trig1_adr_adr_s cn38xx; struct cvmx_tra_trig1_adr_adr_s cn38xxp2; struct cvmx_tra_trig1_adr_adr_s cn52xx; struct cvmx_tra_trig1_adr_adr_s cn52xxp1; struct cvmx_tra_trig1_adr_adr_s cn56xx; struct cvmx_tra_trig1_adr_adr_s cn56xxp1; struct cvmx_tra_trig1_adr_adr_s cn58xx; struct cvmx_tra_trig1_adr_adr_s cn58xxp1; } cvmx_tra_trig1_adr_adr_t; /** * cvmx_tra_trig1_adr_msk * * TRA_TRIG1_ADR_MSK = Trace Buffer Filter Address Mask * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_trig1_adr_msk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t adr : 36; /**< Address Mask The combination of TRA_TRIG1_ADR_ADR and TRA_TRIG1_ADR_MSK is a masked address to enable tracing of only those commands whose masked address matches. When a mask bit is not set, the corresponding address bits are assumed to match. Also, note that IOBDMAs do not have proper addresses, so when TRA_TRIG1_CMD[IOBDMA] is set, TRA_FILT_TRIG1_MSK must be zero to guarantee that any IOBDMAs are recognized as triggers. */ #else uint64_t adr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_tra_trig1_adr_msk_s cn31xx; struct cvmx_tra_trig1_adr_msk_s cn38xx; struct cvmx_tra_trig1_adr_msk_s cn38xxp2; struct cvmx_tra_trig1_adr_msk_s cn52xx; struct cvmx_tra_trig1_adr_msk_s cn52xxp1; struct cvmx_tra_trig1_adr_msk_s cn56xx; struct cvmx_tra_trig1_adr_msk_s cn56xxp1; struct cvmx_tra_trig1_adr_msk_s cn58xx; struct cvmx_tra_trig1_adr_msk_s cn58xxp1; } cvmx_tra_trig1_adr_msk_t; /** * cvmx_tra_trig1_cmd * * TRA_TRIG1_CMD = Trace Buffer Filter Command Mask * * Description: * * Notes: * Note that the trace buffer does not do proper IOBDMA address compares. Thus, if IOBDMA is set, then * the address compare must be disabled (i.e. TRA_TRIG1_ADR_MSK set to zero) to guarantee that IOBDMAs * are recognized as triggers. */ typedef union { uint64_t u64; struct cvmx_tra_trig1_cmd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_17_63 : 47; uint64_t saa : 1; /**< Enable SAA tracing 0=disable, 1=enable */ uint64_t iobdma : 1; /**< Enable IOBDMA tracing 0=disable, 1=enable */ uint64_t iobst : 1; /**< Enable IOBST tracing 0=disable, 1=enable */ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing 0=disable, 1=enable */ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing 0=disable, 1=enable */ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing 0=disable, 1=enable */ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing 0=disable, 1=enable */ uint64_t stt : 1; /**< Enable STT tracing 0=disable, 1=enable */ uint64_t stp : 1; /**< Enable STP tracing 0=disable, 1=enable */ uint64_t stc : 1; /**< Enable STC tracing 0=disable, 1=enable */ uint64_t stf : 1; /**< Enable STF tracing 0=disable, 1=enable */ uint64_t ldt : 1; /**< Enable LDT tracing 0=disable, 1=enable */ uint64_t ldi : 1; /**< Enable LDI tracing 0=disable, 1=enable */ uint64_t ldd : 1; /**< Enable LDD tracing 0=disable, 1=enable */ uint64_t psl1 : 1; /**< Enable PSL1 tracing 0=disable, 1=enable */ uint64_t pl2 : 1; /**< Enable PL2 tracing 0=disable, 1=enable */ uint64_t dwb : 1; /**< Enable DWB tracing 0=disable, 1=enable */ #else uint64_t dwb : 1; uint64_t pl2 : 1; uint64_t psl1 : 1; uint64_t ldd : 1; uint64_t ldi : 1; uint64_t ldt : 1; uint64_t stf : 1; uint64_t stc : 1; uint64_t stp : 1; uint64_t stt : 1; uint64_t iobld8 : 1; uint64_t iobld16 : 1; uint64_t iobld32 : 1; uint64_t iobld64 : 1; uint64_t iobst : 1; uint64_t iobdma : 1; uint64_t saa : 1; uint64_t reserved_17_63 : 47; #endif } s; struct cvmx_tra_trig1_cmd_cn31xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_16_63 : 48; uint64_t iobdma : 1; /**< Enable IOBDMA tracing 0=disable, 1=enable */ uint64_t iobst : 1; /**< Enable IOBST tracing 0=disable, 1=enable */ uint64_t iobld64 : 1; /**< Enable IOBLD64 tracing 0=disable, 1=enable */ uint64_t iobld32 : 1; /**< Enable IOBLD32 tracing 0=disable, 1=enable */ uint64_t iobld16 : 1; /**< Enable IOBLD16 tracing 0=disable, 1=enable */ uint64_t iobld8 : 1; /**< Enable IOBLD8 tracing 0=disable, 1=enable */ uint64_t stt : 1; /**< Enable STT tracing 0=disable, 1=enable */ uint64_t stp : 1; /**< Enable STP tracing 0=disable, 1=enable */ uint64_t stc : 1; /**< Enable STC tracing 0=disable, 1=enable */ uint64_t stf : 1; /**< Enable STF tracing 0=disable, 1=enable */ uint64_t ldt : 1; /**< Enable LDT tracing 0=disable, 1=enable */ uint64_t ldi : 1; /**< Enable LDI tracing 0=disable, 1=enable */ uint64_t ldd : 1; /**< Enable LDD tracing 0=disable, 1=enable */ uint64_t psl1 : 1; /**< Enable PSL1 tracing 0=disable, 1=enable */ uint64_t pl2 : 1; /**< Enable PL2 tracing 0=disable, 1=enable */ uint64_t dwb : 1; /**< Enable DWB tracing 0=disable, 1=enable */ #else uint64_t dwb : 1; uint64_t pl2 : 1; uint64_t psl1 : 1; uint64_t ldd : 1; uint64_t ldi : 1; uint64_t ldt : 1; uint64_t stf : 1; uint64_t stc : 1; uint64_t stp : 1; uint64_t stt : 1; uint64_t iobld8 : 1; uint64_t iobld16 : 1; uint64_t iobld32 : 1; uint64_t iobld64 : 1; uint64_t iobst : 1; uint64_t iobdma : 1; uint64_t reserved_16_63 : 48; #endif } cn31xx; struct cvmx_tra_trig1_cmd_cn31xx cn38xx; struct cvmx_tra_trig1_cmd_cn31xx cn38xxp2; struct cvmx_tra_trig1_cmd_s cn52xx; struct cvmx_tra_trig1_cmd_s cn52xxp1; struct cvmx_tra_trig1_cmd_s cn56xx; struct cvmx_tra_trig1_cmd_s cn56xxp1; struct cvmx_tra_trig1_cmd_s cn58xx; struct cvmx_tra_trig1_cmd_s cn58xxp1; } cvmx_tra_trig1_cmd_t; /** * cvmx_tra_trig1_did * * TRA_TRIG1_DID = Trace Buffer Filter DestinationId Mask * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_trig1_did_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_32_63 : 32; uint64_t illegal : 19; /**< Illegal destinations */ uint64_t pow : 1; /**< Enable triggering on requests to POW (get work, add work, status/memory/index loads, NULLRd loads, CSR's) */ uint64_t illegal2 : 3; /**< Illegal destinations */ uint64_t rng : 1; /**< Enable triggering on requests to RNG (loads/IOBDMA's are legal) */ uint64_t zip : 1; /**< Enable triggering on requests to ZIP (doorbell stores are legal) */ uint64_t dfa : 1; /**< Enable triggering on requests to DFA (CSR's and operations are legal) */ uint64_t fpa : 1; /**< Enable triggering on requests to FPA (alloc's (loads/IOBDMA's), frees (stores) are legal) */ uint64_t key : 1; /**< Enable triggering on requests to KEY memory (loads/IOBDMA's/stores are legal) */ uint64_t pci : 1; /**< Enable triggering on requests to PCI and RSL-type CSR's (RSL CSR's, PCI bus operations, PCI CSR's) */ uint64_t illegal3 : 2; /**< Illegal destinations */ uint64_t mio : 1; /**< Enable triggering on CIU and GPIO CSR's */ #else uint64_t mio : 1; uint64_t illegal3 : 2; uint64_t pci : 1; uint64_t key : 1; uint64_t fpa : 1; uint64_t dfa : 1; uint64_t zip : 1; uint64_t rng : 1; uint64_t illegal2 : 3; uint64_t pow : 1; uint64_t illegal : 19; uint64_t reserved_32_63 : 32; #endif } s; struct cvmx_tra_trig1_did_s cn31xx; struct cvmx_tra_trig1_did_s cn38xx; struct cvmx_tra_trig1_did_s cn38xxp2; struct cvmx_tra_trig1_did_s cn52xx; struct cvmx_tra_trig1_did_s cn52xxp1; struct cvmx_tra_trig1_did_s cn56xx; struct cvmx_tra_trig1_did_s cn56xxp1; struct cvmx_tra_trig1_did_s cn58xx; struct cvmx_tra_trig1_did_s cn58xxp1; } cvmx_tra_trig1_did_t; /** * cvmx_tra_trig1_sid * * TRA_TRIG1_SID = Trace Buffer Filter SourceId Mask * * Description: */ typedef union { uint64_t u64; struct cvmx_tra_trig1_sid_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t dwb : 1; /**< Enable triggering on requests from the IOB DWB engine */ uint64_t iobreq : 1; /**< Enable triggering on requests from FPA,TIM,DFA, PCI,ZIP,POW, and PKO (writes) */ uint64_t pko : 1; /**< Enable triggering on read requests from PKO */ uint64_t pki : 1; /**< Enable triggering on write requests from PIP/IPD */ uint64_t pp : 16; /**< Enable trigering from PP[N] with matching SourceID 0=disable, 1=enableper bit N where 0<=N<=15 */ #else uint64_t pp : 16; uint64_t pki : 1; uint64_t pko : 1; uint64_t iobreq : 1; uint64_t dwb : 1; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_tra_trig1_sid_s cn31xx; struct cvmx_tra_trig1_sid_s cn38xx; struct cvmx_tra_trig1_sid_s cn38xxp2; struct cvmx_tra_trig1_sid_s cn52xx; struct cvmx_tra_trig1_sid_s cn52xxp1; struct cvmx_tra_trig1_sid_s cn56xx; struct cvmx_tra_trig1_sid_s cn56xxp1; struct cvmx_tra_trig1_sid_s cn58xx; struct cvmx_tra_trig1_sid_s cn58xxp1; } cvmx_tra_trig1_sid_t; /** * cvmx_usbc#_daint * * Device All Endpoints Interrupt Register (DAINT) * * When a significant event occurs on an endpoint, a Device All Endpoints Interrupt register * interrupts the application using the Device OUT Endpoints Interrupt bit or Device IN Endpoints * Interrupt bit of the Core Interrupt register (GINTSTS.OEPInt or GINTSTS.IEPInt, respectively). * There is one interrupt bit per endpoint, up to a maximum of 16 bits for OUT endpoints and 16 * bits for IN endpoints. For a bidirectional endpoint, the corresponding IN and OUT interrupt * bits are used. Bits in this register are set and cleared when the application sets and clears * bits in the corresponding Device Endpoint-n Interrupt register (DIEPINTn/DOEPINTn). */ typedef union { uint32_t u32; struct cvmx_usbcx_daint_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t outepint : 16; /**< OUT Endpoint Interrupt Bits (OutEPInt) One bit per OUT endpoint: Bit 16 for OUT endpoint 0, bit 31 for OUT endpoint 15 */ uint32_t inepint : 16; /**< IN Endpoint Interrupt Bits (InEpInt) One bit per IN Endpoint: Bit 0 for IN endpoint 0, bit 15 for endpoint 15 */ #else uint32_t inepint : 16; uint32_t outepint : 16; #endif } s; struct cvmx_usbcx_daint_s cn30xx; struct cvmx_usbcx_daint_s cn31xx; struct cvmx_usbcx_daint_s cn50xx; struct cvmx_usbcx_daint_s cn52xx; struct cvmx_usbcx_daint_s cn52xxp1; struct cvmx_usbcx_daint_s cn56xx; struct cvmx_usbcx_daint_s cn56xxp1; } cvmx_usbcx_daint_t; /** * cvmx_usbc#_daintmsk * * Device All Endpoints Interrupt Mask Register (DAINTMSK) * * The Device Endpoint Interrupt Mask register works with the Device Endpoint Interrupt register * to interrupt the application when an event occurs on a device endpoint. However, the Device * All Endpoints Interrupt (DAINT) register bit corresponding to that interrupt will still be set. * Mask Interrupt: 1'b0 Unmask Interrupt: 1'b1 */ typedef union { uint32_t u32; struct cvmx_usbcx_daintmsk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t outepmsk : 16; /**< OUT EP Interrupt Mask Bits (OutEpMsk) One per OUT Endpoint: Bit 16 for OUT EP 0, bit 31 for OUT EP 15 */ uint32_t inepmsk : 16; /**< IN EP Interrupt Mask Bits (InEpMsk) One bit per IN Endpoint: Bit 0 for IN EP 0, bit 15 for IN EP 15 */ #else uint32_t inepmsk : 16; uint32_t outepmsk : 16; #endif } s; struct cvmx_usbcx_daintmsk_s cn30xx; struct cvmx_usbcx_daintmsk_s cn31xx; struct cvmx_usbcx_daintmsk_s cn50xx; struct cvmx_usbcx_daintmsk_s cn52xx; struct cvmx_usbcx_daintmsk_s cn52xxp1; struct cvmx_usbcx_daintmsk_s cn56xx; struct cvmx_usbcx_daintmsk_s cn56xxp1; } cvmx_usbcx_daintmsk_t; /** * cvmx_usbc#_dcfg * * Device Configuration Register (DCFG) * * This register configures the core in Device mode after power-on or after certain control * commands or enumeration. Do not make changes to this register after initial programming. */ typedef union { uint32_t u32; struct cvmx_usbcx_dcfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_23_31 : 9; uint32_t epmiscnt : 5; /**< IN Endpoint Mismatch Count (EPMisCnt) The application programs this filed with a count that determines when the core generates an Endpoint Mismatch interrupt (GINTSTS.EPMis). The core loads this value into an internal counter and decrements it. The counter is reloaded whenever there is a match or when the counter expires. The width of this counter depends on the depth of the Token Queue. */ uint32_t reserved_13_17 : 5; uint32_t perfrint : 2; /**< Periodic Frame Interval (PerFrInt) Indicates the time within a (micro)frame at which the application must be notified using the End Of Periodic Frame Interrupt. This can be used to determine if all the isochronous traffic for that (micro)frame is complete. * 2'b00: 80% of the (micro)frame interval * 2'b01: 85% * 2'b10: 90% * 2'b11: 95% */ uint32_t devaddr : 7; /**< Device Address (DevAddr) The application must program this field after every SetAddress control command. */ uint32_t reserved_3_3 : 1; uint32_t nzstsouthshk : 1; /**< Non-Zero-Length Status OUT Handshake (NZStsOUTHShk) The application can use this field to select the handshake the core sends on receiving a nonzero-length data packet during the OUT transaction of a control transfer's Status stage. * 1'b1: Send a STALL handshake on a nonzero-length status OUT transaction and do not send the received OUT packet to the application. * 1'b0: Send the received OUT packet to the application (zero- length or nonzero-length) and send a handshake based on the NAK and STALL bits for the endpoint in the Device Endpoint Control register. */ uint32_t devspd : 2; /**< Device Speed (DevSpd) Indicates the speed at which the application requires the core to enumerate, or the maximum speed the application can support. However, the actual bus speed is determined only after the chirp sequence is completed, and is based on the speed of the USB host to which the core is connected. See "Device Initialization" on page 249 for details. * 2'b00: High speed (USB 2.0 PHY clock is 30 MHz or 60 MHz) * 2'b01: Full speed (USB 2.0 PHY clock is 30 MHz or 60 MHz) * 2'b10: Low speed (USB 1.1 transceiver clock is 6 MHz). If you select 6 MHz LS mode, you must do a soft reset. * 2'b11: Full speed (USB 1.1 transceiver clock is 48 MHz) */ #else uint32_t devspd : 2; uint32_t nzstsouthshk : 1; uint32_t reserved_3_3 : 1; uint32_t devaddr : 7; uint32_t perfrint : 2; uint32_t reserved_13_17 : 5; uint32_t epmiscnt : 5; uint32_t reserved_23_31 : 9; #endif } s; struct cvmx_usbcx_dcfg_s cn30xx; struct cvmx_usbcx_dcfg_s cn31xx; struct cvmx_usbcx_dcfg_s cn50xx; struct cvmx_usbcx_dcfg_s cn52xx; struct cvmx_usbcx_dcfg_s cn52xxp1; struct cvmx_usbcx_dcfg_s cn56xx; struct cvmx_usbcx_dcfg_s cn56xxp1; } cvmx_usbcx_dcfg_t; /** * cvmx_usbc#_dctl * * Device Control Register (DCTL) * */ typedef union { uint32_t u32; struct cvmx_usbcx_dctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_12_31 : 20; uint32_t pwronprgdone : 1; /**< Power-On Programming Done (PWROnPrgDone) The application uses this bit to indicate that register programming is completed after a wake-up from Power Down mode. For more information, see "Device Mode Suspend and Resume With Partial Power-Down" on page 357. */ uint32_t cgoutnak : 1; /**< Clear Global OUT NAK (CGOUTNak) A write to this field clears the Global OUT NAK. */ uint32_t sgoutnak : 1; /**< Set Global OUT NAK (SGOUTNak) A write to this field sets the Global OUT NAK. The application uses this bit to send a NAK handshake on all OUT endpoints. The application should set the this bit only after making sure that the Global OUT NAK Effective bit in the Core Interrupt Register (GINTSTS.GOUTNakEff) is cleared. */ uint32_t cgnpinnak : 1; /**< Clear Global Non-Periodic IN NAK (CGNPInNak) A write to this field clears the Global Non-Periodic IN NAK. */ uint32_t sgnpinnak : 1; /**< Set Global Non-Periodic IN NAK (SGNPInNak) A write to this field sets the Global Non-Periodic IN NAK.The application uses this bit to send a NAK handshake on all non- periodic IN endpoints. The core can also set this bit when a timeout condition is detected on a non-periodic endpoint. The application should set this bit only after making sure that the Global IN NAK Effective bit in the Core Interrupt Register (GINTSTS.GINNakEff) is cleared. */ uint32_t tstctl : 3; /**< Test Control (TstCtl) * 3'b000: Test mode disabled * 3'b001: Test_J mode * 3'b010: Test_K mode * 3'b011: Test_SE0_NAK mode * 3'b100: Test_Packet mode * 3'b101: Test_Force_Enable * Others: Reserved */ uint32_t goutnaksts : 1; /**< Global OUT NAK Status (GOUTNakSts) * 1'b0: A handshake is sent based on the FIFO Status and the NAK and STALL bit settings. * 1'b1: No data is written to the RxFIFO, irrespective of space availability. Sends a NAK handshake on all packets, except on SETUP transactions. All isochronous OUT packets are dropped. */ uint32_t gnpinnaksts : 1; /**< Global Non-Periodic IN NAK Status (GNPINNakSts) * 1'b0: A handshake is sent out based on the data availability in the transmit FIFO. * 1'b1: A NAK handshake is sent out on all non-periodic IN endpoints, irrespective of the data availability in the transmit FIFO. */ uint32_t sftdiscon : 1; /**< Soft Disconnect (SftDiscon) The application uses this bit to signal the O2P USB core to do a soft disconnect. As long as this bit is set, the host will not see that the device is connected, and the device will not receive signals on the USB. The core stays in the disconnected state until the application clears this bit. The minimum duration for which the core must keep this bit set is specified in Minimum Duration for Soft Disconnect . * 1'b0: Normal operation. When this bit is cleared after a soft disconnect, the core drives the phy_opmode_o signal on the UTMI+ to 2'b00, which generates a device connect event to the USB host. When the device is reconnected, the USB host restarts device enumeration. * 1'b1: The core drives the phy_opmode_o signal on the UTMI+ to 2'b01, which generates a device disconnect event to the USB host. */ uint32_t rmtwkupsig : 1; /**< Remote Wakeup Signaling (RmtWkUpSig) When the application sets this bit, the core initiates remote signaling to wake up the USB host.The application must set this bit to get the core out of Suspended state and must clear this bit after the core comes out of Suspended state. */ #else uint32_t rmtwkupsig : 1; uint32_t sftdiscon : 1; uint32_t gnpinnaksts : 1; uint32_t goutnaksts : 1; uint32_t tstctl : 3; uint32_t sgnpinnak : 1; uint32_t cgnpinnak : 1; uint32_t sgoutnak : 1; uint32_t cgoutnak : 1; uint32_t pwronprgdone : 1; uint32_t reserved_12_31 : 20; #endif } s; struct cvmx_usbcx_dctl_s cn30xx; struct cvmx_usbcx_dctl_s cn31xx; struct cvmx_usbcx_dctl_s cn50xx; struct cvmx_usbcx_dctl_s cn52xx; struct cvmx_usbcx_dctl_s cn52xxp1; struct cvmx_usbcx_dctl_s cn56xx; struct cvmx_usbcx_dctl_s cn56xxp1; } cvmx_usbcx_dctl_t; /** * cvmx_usbc#_diepctl# * * Device IN Endpoint-n Control Register (DIEPCTLn) * * The application uses the register to control the behaviour of each logical endpoint other than endpoint 0. */ typedef union { uint32_t u32; struct cvmx_usbcx_diepctlx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t epena : 1; /**< Endpoint Enable (EPEna) Indicates that data is ready to be transmitted on the endpoint. The core clears this bit before setting any of the following interrupts on this endpoint: * Endpoint Disabled * Transfer Completed */ uint32_t epdis : 1; /**< Endpoint Disable (EPDis) The application sets this bit to stop transmitting data on an endpoint, even before the transfer for that endpoint is complete. The application must wait for the Endpoint Disabled interrupt before treating the endpoint as disabled. The core clears this bit before setting the Endpoint Disabled Interrupt. The application should set this bit only if Endpoint Enable is already set for this endpoint. */ uint32_t setd1pid : 1; /**< For Interrupt/BULK enpoints: Set DATA1 PID (SetD1PID) Writing to this field sets the Endpoint Data Pid (DPID) field in this register to DATA1. For Isochronous endpoints: Set Odd (micro)frame (SetOddFr) Writing to this field sets the Even/Odd (micro)frame (EO_FrNum) field to odd (micro)frame. */ uint32_t setd0pid : 1; /**< For Interrupt/BULK enpoints: Writing to this field sets the Endpoint Data Pid (DPID) field in this register to DATA0. For Isochronous endpoints: Set Odd (micro)frame (SetEvenFr) Writing to this field sets the Even/Odd (micro)frame (EO_FrNum) field to even (micro)frame. */ uint32_t snak : 1; /**< Set NAK (SNAK) A write to this bit sets the NAK bit for the endpoint. Using this bit, the application can control the transmission of NAK handshakes on an endpoint. The core can also set this bit for an endpoint after a SETUP packet is received on the endpoint. */ uint32_t cnak : 1; /**< Clear NAK (CNAK) A write to this bit clears the NAK bit for the endpoint. */ uint32_t txfnum : 4; /**< TxFIFO Number (TxFNum) Non-periodic endpoints must set this bit to zero. Periodic endpoints must map this to the corresponding Periodic TxFIFO number. * 4'h0: Non-Periodic TxFIFO * Others: Specified Periodic TxFIFO number */ uint32_t stall : 1; /**< STALL Handshake (Stall) For non-control, non-isochronous endpoints: The application sets this bit to stall all tokens from the USB host to this endpoint. If a NAK bit, Global Non-Periodic IN NAK, or Global OUT NAK is set along with this bit, the STALL bit takes priority. Only the application can clear this bit, never the core. For control endpoints: The application can only set this bit, and the core clears it, when a SETUP token i received for this endpoint. If a NAK bit, Global Non-Periodic IN NAK, or Global OUT NAK is set along with this bit, the STALL bit takes priority. Irrespective of this bit's setting, the core always responds to SETUP data packets with an ACK handshake. */ uint32_t reserved_20_20 : 1; uint32_t eptype : 2; /**< Endpoint Type (EPType) This is the transfer type supported by this logical endpoint. * 2'b00: Control * 2'b01: Isochronous * 2'b10: Bulk * 2'b11: Interrupt */ uint32_t naksts : 1; /**< NAK Status (NAKSts) Indicates the following: * 1'b0: The core is transmitting non-NAK handshakes based on the FIFO status * 1'b1: The core is transmitting NAK handshakes on this endpoint. When either the application or the core sets this bit: * For non-isochronous IN endpoints: The core stops transmitting any data on an IN endpoint, even if data is available in the TxFIFO. * For isochronous IN endpoints: The core sends out a zero- length data packet, even if data is available in the TxFIFO. Irrespective of this bit's setting, the core always responds to SETUP data packets with an ACK handshake. */ uint32_t dpid : 1; /**< For interrupt/bulk IN and OUT endpoints: Endpoint Data PID (DPID) Contains the PID of the packet to be received or transmitted on this endpoint. The application should program the PID of the first packet to be received or transmitted on this endpoint, after the endpoint is activated. Applications use the SetD1PID and SetD0PID fields of this register to program either DATA0 or DATA1 PID. * 1'b0: DATA0 * 1'b1: DATA1 For isochronous IN and OUT endpoints: Even/Odd (Micro)Frame (EO_FrNum) Indicates the (micro)frame number in which the core transmits/ receives isochronous data for this endpoint. The application should program the even/odd (micro) frame number in which it intends to transmit/receive isochronous data for this endpoint using the SetEvnFr and SetOddFr fields in this register. * 1'b0: Even (micro)frame * 1'b1: Odd (micro)frame */ uint32_t usbactep : 1; /**< USB Active Endpoint (USBActEP) Indicates whether this endpoint is active in the current configuration and interface. The core clears this bit for all endpoints (other than EP 0) after detecting a USB reset. After receiving the SetConfiguration and SetInterface commands, the application must program endpoint registers accordingly and set this bit. */ uint32_t nextep : 4; /**< Next Endpoint (NextEp) Applies to non-periodic IN endpoints only. Indicates the endpoint number to be fetched after the data for the current endpoint is fetched. The core can access this field, even when the Endpoint Enable (EPEna) bit is not set. This field is not valid in Slave mode. */ uint32_t mps : 11; /**< Maximum Packet Size (MPS) Applies to IN and OUT endpoints. The application must program this field with the maximum packet size for the current logical endpoint. This value is in bytes. */ #else uint32_t mps : 11; uint32_t nextep : 4; uint32_t usbactep : 1; uint32_t dpid : 1; uint32_t naksts : 1; uint32_t eptype : 2; uint32_t reserved_20_20 : 1; uint32_t stall : 1; uint32_t txfnum : 4; uint32_t cnak : 1; uint32_t snak : 1; uint32_t setd0pid : 1; uint32_t setd1pid : 1; uint32_t epdis : 1; uint32_t epena : 1; #endif } s; struct cvmx_usbcx_diepctlx_s cn30xx; struct cvmx_usbcx_diepctlx_s cn31xx; struct cvmx_usbcx_diepctlx_s cn50xx; struct cvmx_usbcx_diepctlx_s cn52xx; struct cvmx_usbcx_diepctlx_s cn52xxp1; struct cvmx_usbcx_diepctlx_s cn56xx; struct cvmx_usbcx_diepctlx_s cn56xxp1; } cvmx_usbcx_diepctlx_t; /** * cvmx_usbc#_diepint# * * Device Endpoint-n Interrupt Register (DIEPINTn) * * This register indicates the status of an endpoint with respect to * USB- and AHB-related events. The application must read this register * when the OUT Endpoints Interrupt bit or IN Endpoints Interrupt bit of * the Core Interrupt register (GINTSTS.OEPInt or GINTSTS.IEPInt, * respectively) is set. Before the application can read this register, * it must first read the Device All Endpoints Interrupt (DAINT) register * to get the exact endpoint number for the Device Endpoint-n Interrupt * register. The application must clear the appropriate bit in this register * to clear the corresponding bits in the DAINT and GINTSTS registers. */ typedef union { uint32_t u32; struct cvmx_usbcx_diepintx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_7_31 : 25; uint32_t inepnakeff : 1; /**< IN Endpoint NAK Effective (INEPNakEff) Applies to periodic IN endpoints only. Indicates that the IN endpoint NAK bit set by the application has taken effect in the core. This bit can be cleared when the application clears the IN endpoint NAK by writing to DIEPCTLn.CNAK. This interrupt indicates that the core has sampled the NAK bit set (either by the application or by the core). This interrupt does not necessarily mean that a NAK handshake is sent on the USB. A STALL bit takes priority over a NAK bit. */ uint32_t intknepmis : 1; /**< IN Token Received with EP Mismatch (INTknEPMis) Applies to non-periodic IN endpoints only. Indicates that the data in the top of the non-periodic TxFIFO belongs to an endpoint other than the one for which the IN token was received. This interrupt is asserted on the endpoint for which the IN token was received. */ uint32_t intkntxfemp : 1; /**< IN Token Received When TxFIFO is Empty (INTknTXFEmp) Applies only to non-periodic IN endpoints. Indicates that an IN token was received when the associated TxFIFO (periodic/non-periodic) was empty. This interrupt is asserted on the endpoint for which the IN token was received. */ uint32_t timeout : 1; /**< Timeout Condition (TimeOUT) Applies to non-isochronous IN endpoints only. Indicates that the core has detected a timeout condition on the USB for the last IN token on this endpoint. */ uint32_t ahberr : 1; /**< AHB Error (AHBErr) This is generated only in Internal DMA mode when there is an AHB error during an AHB read/write. The application can read the corresponding endpoint DMA address register to get the error address. */ uint32_t epdisbld : 1; /**< Endpoint Disabled Interrupt (EPDisbld) This bit indicates that the endpoint is disabled per the application's request. */ uint32_t xfercompl : 1; /**< Transfer Completed Interrupt (XferCompl) Indicates that the programmed transfer is complete on the AHB as well as on the USB, for this endpoint. */ #else uint32_t xfercompl : 1; uint32_t epdisbld : 1; uint32_t ahberr : 1; uint32_t timeout : 1; uint32_t intkntxfemp : 1; uint32_t intknepmis : 1; uint32_t inepnakeff : 1; uint32_t reserved_7_31 : 25; #endif } s; struct cvmx_usbcx_diepintx_s cn30xx; struct cvmx_usbcx_diepintx_s cn31xx; struct cvmx_usbcx_diepintx_s cn50xx; struct cvmx_usbcx_diepintx_s cn52xx; struct cvmx_usbcx_diepintx_s cn52xxp1; struct cvmx_usbcx_diepintx_s cn56xx; struct cvmx_usbcx_diepintx_s cn56xxp1; } cvmx_usbcx_diepintx_t; /** * cvmx_usbc#_diepmsk * * Device IN Endpoint Common Interrupt Mask Register (DIEPMSK) * * This register works with each of the Device IN Endpoint Interrupt (DIEPINTn) registers * for all endpoints to generate an interrupt per IN endpoint. The IN endpoint interrupt * for a specific status in the DIEPINTn register can be masked by writing to the corresponding * bit in this register. Status bits are masked by default. * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 */ typedef union { uint32_t u32; struct cvmx_usbcx_diepmsk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_7_31 : 25; uint32_t inepnakeffmsk : 1; /**< IN Endpoint NAK Effective Mask (INEPNakEffMsk) */ uint32_t intknepmismsk : 1; /**< IN Token received with EP Mismatch Mask (INTknEPMisMsk) */ uint32_t intkntxfempmsk : 1; /**< IN Token Received When TxFIFO Empty Mask (INTknTXFEmpMsk) */ uint32_t timeoutmsk : 1; /**< Timeout Condition Mask (TimeOUTMsk) (Non-isochronous endpoints) */ uint32_t ahberrmsk : 1; /**< AHB Error Mask (AHBErrMsk) */ uint32_t epdisbldmsk : 1; /**< Endpoint Disabled Interrupt Mask (EPDisbldMsk) */ uint32_t xfercomplmsk : 1; /**< Transfer Completed Interrupt Mask (XferComplMsk) */ #else uint32_t xfercomplmsk : 1; uint32_t epdisbldmsk : 1; uint32_t ahberrmsk : 1; uint32_t timeoutmsk : 1; uint32_t intkntxfempmsk : 1; uint32_t intknepmismsk : 1; uint32_t inepnakeffmsk : 1; uint32_t reserved_7_31 : 25; #endif } s; struct cvmx_usbcx_diepmsk_s cn30xx; struct cvmx_usbcx_diepmsk_s cn31xx; struct cvmx_usbcx_diepmsk_s cn50xx; struct cvmx_usbcx_diepmsk_s cn52xx; struct cvmx_usbcx_diepmsk_s cn52xxp1; struct cvmx_usbcx_diepmsk_s cn56xx; struct cvmx_usbcx_diepmsk_s cn56xxp1; } cvmx_usbcx_diepmsk_t; /** * cvmx_usbc#_dieptsiz# * * Device Endpoint-n Transfer Size Register (DIEPTSIZn) * * The application must modify this register before enabling the endpoint. * Once the endpoint is enabled using Endpoint Enable bit of the Device Endpoint-n Control registers (DIEPCTLn.EPEna/DOEPCTLn.EPEna), * the core modifies this register. The application can only read this register once the core has cleared the Endpoint Enable bit. * This register is used only for endpoints other than Endpoint 0. */ typedef union { uint32_t u32; struct cvmx_usbcx_dieptsizx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_31_31 : 1; uint32_t mc : 2; /**< Multi Count (MC) Applies to IN endpoints only. For periodic IN endpoints, this field indicates the number of packets that must be transmitted per microframe on the USB. The core uses this field to calculate the data PID for isochronous IN endpoints. * 2'b01: 1 packet * 2'b10: 2 packets * 2'b11: 3 packets For non-periodic IN endpoints, this field is valid only in Internal DMA mode. It specifies the number of packets the core should fetch for an IN endpoint before it switches to the endpoint pointed to by the Next Endpoint field of the Device Endpoint-n Control register (DIEPCTLn.NextEp) */ uint32_t pktcnt : 10; /**< Packet Count (PktCnt) Indicates the total number of USB packets that constitute the Transfer Size amount of data for this endpoint. IN Endpoints: This field is decremented every time a packet (maximum size or short packet) is read from the TxFIFO. */ uint32_t xfersize : 19; /**< Transfer Size (XferSize) This field contains the transfer size in bytes for the current endpoint. The core only interrupts the application after it has exhausted the transfer size amount of data. The transfer size can be set to the maximum packet size of the endpoint, to be interrupted at the end of each packet. IN Endpoints: The core decrements this field every time a packet from the external memory is written to the TxFIFO. */ #else uint32_t xfersize : 19; uint32_t pktcnt : 10; uint32_t mc : 2; uint32_t reserved_31_31 : 1; #endif } s; struct cvmx_usbcx_dieptsizx_s cn30xx; struct cvmx_usbcx_dieptsizx_s cn31xx; struct cvmx_usbcx_dieptsizx_s cn50xx; struct cvmx_usbcx_dieptsizx_s cn52xx; struct cvmx_usbcx_dieptsizx_s cn52xxp1; struct cvmx_usbcx_dieptsizx_s cn56xx; struct cvmx_usbcx_dieptsizx_s cn56xxp1; } cvmx_usbcx_dieptsizx_t; /** * cvmx_usbc#_doepctl# * * Device OUT Endpoint-n Control Register (DOEPCTLn) * * The application uses the register to control the behaviour of each logical endpoint other than endpoint 0. */ typedef union { uint32_t u32; struct cvmx_usbcx_doepctlx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t epena : 1; /**< Endpoint Enable (EPEna) Indicates that the application has allocated the memory tp start receiving data from the USB. The core clears this bit before setting any of the following interrupts on this endpoint: * SETUP Phase Done * Endpoint Disabled * Transfer Completed For control OUT endpoints in DMA mode, this bit must be set to be able to transfer SETUP data packets in memory. */ uint32_t epdis : 1; /**< Endpoint Disable (EPDis) The application sets this bit to stop transmitting data on an endpoint, even before the transfer for that endpoint is complete. The application must wait for the Endpoint Disabled interrupt before treating the endpoint as disabled. The core clears this bit before setting the Endpoint Disabled Interrupt. The application should set this bit only if Endpoint Enable is already set for this endpoint. */ uint32_t setd1pid : 1; /**< For Interrupt/BULK enpoints: Set DATA1 PID (SetD1PID) Writing to this field sets the Endpoint Data Pid (DPID) field in this register to DATA1. For Isochronous endpoints: Set Odd (micro)frame (SetOddFr) Writing to this field sets the Even/Odd (micro)frame (EO_FrNum) field to odd (micro)frame. */ uint32_t setd0pid : 1; /**< For Interrupt/BULK enpoints: Writing to this field sets the Endpoint Data Pid (DPID) field in this register to DATA0. For Isochronous endpoints: Set Odd (micro)frame (SetEvenFr) Writing to this field sets the Even/Odd (micro)frame (EO_FrNum) field to even (micro)frame. */ uint32_t snak : 1; /**< Set NAK (SNAK) A write to this bit sets the NAK bit for the endpoint. Using this bit, the application can control the transmission of NAK handshakes on an endpoint. The core can also set this bit for an endpoint after a SETUP packet is received on the endpoint. */ uint32_t cnak : 1; /**< Clear NAK (CNAK) A write to this bit clears the NAK bit for the endpoint. */ uint32_t reserved_22_25 : 4; uint32_t stall : 1; /**< STALL Handshake (Stall) For non-control, non-isochronous endpoints: The application sets this bit to stall all tokens from the USB host to this endpoint. If a NAK bit, Global Non-Periodic IN NAK, or Global OUT NAK is set along with this bit, the STALL bit takes priority. Only the application can clear this bit, never the core. For control endpoints: The application can only set this bit, and the core clears it, when a SETUP token i received for this endpoint. If a NAK bit, Global Non-Periodic IN NAK, or Global OUT NAK is set along with this bit, the STALL bit takes priority. Irrespective of this bit's setting, the core always responds to SETUP data packets with an ACK handshake. */ uint32_t snp : 1; /**< Snoop Mode (Snp) This bit configures the endpoint to Snoop mode. In Snoop mode, the core does not check the correctness of OUT packets before transferring them to application memory. */ uint32_t eptype : 2; /**< Endpoint Type (EPType) This is the transfer type supported by this logical endpoint. * 2'b00: Control * 2'b01: Isochronous * 2'b10: Bulk * 2'b11: Interrupt */ uint32_t naksts : 1; /**< NAK Status (NAKSts) Indicates the following: * 1'b0: The core is transmitting non-NAK handshakes based on the FIFO status * 1'b1: The core is transmitting NAK handshakes on this endpoint. When either the application or the core sets this bit: * The core stops receiving any data on an OUT endpoint, even if there is space in the RxFIFO to accomodate the incoming packet. */ uint32_t dpid : 1; /**< For interrupt/bulk IN and OUT endpoints: Endpoint Data PID (DPID) Contains the PID of the packet to be received or transmitted on this endpoint. The application should program the PID of the first packet to be received or transmitted on this endpoint, after the endpoint is activated. Applications use the SetD1PID and SetD0PID fields of this register to program either DATA0 or DATA1 PID. * 1'b0: DATA0 * 1'b1: DATA1 For isochronous IN and OUT endpoints: Even/Odd (Micro)Frame (EO_FrNum) Indicates the (micro)frame number in which the core transmits/ receives isochronous data for this endpoint. The application should program the even/odd (micro) frame number in which it intends to transmit/receive isochronous data for this endpoint using the SetEvnFr and SetOddFr fields in this register. * 1'b0: Even (micro)frame * 1'b1: Odd (micro)frame */ uint32_t usbactep : 1; /**< USB Active Endpoint (USBActEP) Indicates whether this endpoint is active in the current configuration and interface. The core clears this bit for all endpoints (other than EP 0) after detecting a USB reset. After receiving the SetConfiguration and SetInterface commands, the application must program endpoint registers accordingly and set this bit. */ uint32_t reserved_11_14 : 4; uint32_t mps : 11; /**< Maximum Packet Size (MPS) Applies to IN and OUT endpoints. The application must program this field with the maximum packet size for the current logical endpoint. This value is in bytes. */ #else uint32_t mps : 11; uint32_t reserved_11_14 : 4; uint32_t usbactep : 1; uint32_t dpid : 1; uint32_t naksts : 1; uint32_t eptype : 2; uint32_t snp : 1; uint32_t stall : 1; uint32_t reserved_22_25 : 4; uint32_t cnak : 1; uint32_t snak : 1; uint32_t setd0pid : 1; uint32_t setd1pid : 1; uint32_t epdis : 1; uint32_t epena : 1; #endif } s; struct cvmx_usbcx_doepctlx_s cn30xx; struct cvmx_usbcx_doepctlx_s cn31xx; struct cvmx_usbcx_doepctlx_s cn50xx; struct cvmx_usbcx_doepctlx_s cn52xx; struct cvmx_usbcx_doepctlx_s cn52xxp1; struct cvmx_usbcx_doepctlx_s cn56xx; struct cvmx_usbcx_doepctlx_s cn56xxp1; } cvmx_usbcx_doepctlx_t; /** * cvmx_usbc#_doepint# * * Device Endpoint-n Interrupt Register (DOEPINTn) * * This register indicates the status of an endpoint with respect to USB- and AHB-related events. * The application must read this register when the OUT Endpoints Interrupt bit or IN Endpoints * Interrupt bit of the Core Interrupt register (GINTSTS.OEPInt or GINTSTS.IEPInt, respectively) * is set. Before the application can read this register, it must first read the Device All * Endpoints Interrupt (DAINT) register to get the exact endpoint number for the Device Endpoint-n * Interrupt register. The application must clear the appropriate bit in this register to clear the * corresponding bits in the DAINT and GINTSTS registers. */ typedef union { uint32_t u32; struct cvmx_usbcx_doepintx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_5_31 : 27; uint32_t outtknepdis : 1; /**< OUT Token Received When Endpoint Disabled (OUTTknEPdis) Applies only to control OUT endpoints. Indicates that an OUT token was received when the endpoint was not yet enabled. This interrupt is asserted on the endpoint for which the OUT token was received. */ uint32_t setup : 1; /**< SETUP Phase Done (SetUp) Applies to control OUT endpoints only. Indicates that the SETUP phase for the control endpoint is complete and no more back-to-back SETUP packets were received for the current control transfer. On this interrupt, the application can decode the received SETUP data packet. */ uint32_t ahberr : 1; /**< AHB Error (AHBErr) This is generated only in Internal DMA mode when there is an AHB error during an AHB read/write. The application can read the corresponding endpoint DMA address register to get the error address. */ uint32_t epdisbld : 1; /**< Endpoint Disabled Interrupt (EPDisbld) This bit indicates that the endpoint is disabled per the application's request. */ uint32_t xfercompl : 1; /**< Transfer Completed Interrupt (XferCompl) Indicates that the programmed transfer is complete on the AHB as well as on the USB, for this endpoint. */ #else uint32_t xfercompl : 1; uint32_t epdisbld : 1; uint32_t ahberr : 1; uint32_t setup : 1; uint32_t outtknepdis : 1; uint32_t reserved_5_31 : 27; #endif } s; struct cvmx_usbcx_doepintx_s cn30xx; struct cvmx_usbcx_doepintx_s cn31xx; struct cvmx_usbcx_doepintx_s cn50xx; struct cvmx_usbcx_doepintx_s cn52xx; struct cvmx_usbcx_doepintx_s cn52xxp1; struct cvmx_usbcx_doepintx_s cn56xx; struct cvmx_usbcx_doepintx_s cn56xxp1; } cvmx_usbcx_doepintx_t; /** * cvmx_usbc#_doepmsk * * Device OUT Endpoint Common Interrupt Mask Register (DOEPMSK) * * This register works with each of the Device OUT Endpoint Interrupt (DOEPINTn) registers * for all endpoints to generate an interrupt per OUT endpoint. The OUT endpoint interrupt * for a specific status in the DOEPINTn register can be masked by writing into the * corresponding bit in this register. Status bits are masked by default. * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 */ typedef union { uint32_t u32; struct cvmx_usbcx_doepmsk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_5_31 : 27; uint32_t outtknepdismsk : 1; /**< OUT Token Received when Endpoint Disabled Mask (OUTTknEPdisMsk) Applies to control OUT endpoints only. */ uint32_t setupmsk : 1; /**< SETUP Phase Done Mask (SetUPMsk) Applies to control endpoints only. */ uint32_t ahberrmsk : 1; /**< AHB Error (AHBErrMsk) */ uint32_t epdisbldmsk : 1; /**< Endpoint Disabled Interrupt Mask (EPDisbldMsk) */ uint32_t xfercomplmsk : 1; /**< Transfer Completed Interrupt Mask (XferComplMsk) */ #else uint32_t xfercomplmsk : 1; uint32_t epdisbldmsk : 1; uint32_t ahberrmsk : 1; uint32_t setupmsk : 1; uint32_t outtknepdismsk : 1; uint32_t reserved_5_31 : 27; #endif } s; struct cvmx_usbcx_doepmsk_s cn30xx; struct cvmx_usbcx_doepmsk_s cn31xx; struct cvmx_usbcx_doepmsk_s cn50xx; struct cvmx_usbcx_doepmsk_s cn52xx; struct cvmx_usbcx_doepmsk_s cn52xxp1; struct cvmx_usbcx_doepmsk_s cn56xx; struct cvmx_usbcx_doepmsk_s cn56xxp1; } cvmx_usbcx_doepmsk_t; /** * cvmx_usbc#_doeptsiz# * * Device Endpoint-n Transfer Size Register (DOEPTSIZn) * * The application must modify this register before enabling the endpoint. * Once the endpoint is enabled using Endpoint Enable bit of the Device Endpoint-n Control * registers (DOEPCTLn.EPEna/DOEPCTLn.EPEna), the core modifies this register. The application * can only read this register once the core has cleared the Endpoint Enable bit. * This register is used only for endpoints other than Endpoint 0. */ typedef union { uint32_t u32; struct cvmx_usbcx_doeptsizx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_31_31 : 1; uint32_t mc : 2; /**< Multi Count (MC) Received Data PID (RxDPID) Applies to isochronous OUT endpoints only. This is the data PID received in the last packet for this endpoint. 2'b00: DATA0 2'b01: DATA1 2'b10: DATA2 2'b11: MDATA SETUP Packet Count (SUPCnt) Applies to control OUT Endpoints only. This field specifies the number of back-to-back SETUP data packets the endpoint can receive. 2'b01: 1 packet 2'b10: 2 packets 2'b11: 3 packets */ uint32_t pktcnt : 10; /**< Packet Count (PktCnt) Indicates the total number of USB packets that constitute the Transfer Size amount of data for this endpoint. OUT Endpoints: This field is decremented every time a packet (maximum size or short packet) is written to the RxFIFO. */ uint32_t xfersize : 19; /**< Transfer Size (XferSize) This field contains the transfer size in bytes for the current endpoint. The core only interrupts the application after it has exhausted the transfer size amount of data. The transfer size can be set to the maximum packet size of the endpoint, to be interrupted at the end of each packet. OUT Endpoints: The core decrements this field every time a packet is read from the RxFIFO and written to the external memory. */ #else uint32_t xfersize : 19; uint32_t pktcnt : 10; uint32_t mc : 2; uint32_t reserved_31_31 : 1; #endif } s; struct cvmx_usbcx_doeptsizx_s cn30xx; struct cvmx_usbcx_doeptsizx_s cn31xx; struct cvmx_usbcx_doeptsizx_s cn50xx; struct cvmx_usbcx_doeptsizx_s cn52xx; struct cvmx_usbcx_doeptsizx_s cn52xxp1; struct cvmx_usbcx_doeptsizx_s cn56xx; struct cvmx_usbcx_doeptsizx_s cn56xxp1; } cvmx_usbcx_doeptsizx_t; /** * cvmx_usbc#_dptxfsiz# * * Device Periodic Transmit FIFO-n Size Register (DPTXFSIZ) * * This register holds the memory start address of each periodic TxFIFO to implemented * in Device mode. Each periodic FIFO holds the data for one periodic IN endpoint. * This register is repeated for each periodic FIFO instantiated. */ typedef union { uint32_t u32; struct cvmx_usbcx_dptxfsizx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dptxfsize : 16; /**< Device Periodic TxFIFO Size (DPTxFSize) This value is in terms of 32-bit words. * Minimum value is 4 * Maximum value is 768 */ uint32_t dptxfstaddr : 16; /**< Device Periodic TxFIFO RAM Start Address (DPTxFStAddr) Holds the start address in the RAM for this periodic FIFO. */ #else uint32_t dptxfstaddr : 16; uint32_t dptxfsize : 16; #endif } s; struct cvmx_usbcx_dptxfsizx_s cn30xx; struct cvmx_usbcx_dptxfsizx_s cn31xx; struct cvmx_usbcx_dptxfsizx_s cn50xx; struct cvmx_usbcx_dptxfsizx_s cn52xx; struct cvmx_usbcx_dptxfsizx_s cn52xxp1; struct cvmx_usbcx_dptxfsizx_s cn56xx; struct cvmx_usbcx_dptxfsizx_s cn56xxp1; } cvmx_usbcx_dptxfsizx_t; /** * cvmx_usbc#_dsts * * Device Status Register (DSTS) * * This register indicates the status of the core with respect to USB-related events. * It must be read on interrupts from Device All Interrupts (DAINT) register. */ typedef union { uint32_t u32; struct cvmx_usbcx_dsts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_22_31 : 10; uint32_t soffn : 14; /**< Frame or Microframe Number of the Received SOF (SOFFN) When the core is operating at high speed, this field contains a microframe number. When the core is operating at full or low speed, this field contains a frame number. */ uint32_t reserved_4_7 : 4; uint32_t errticerr : 1; /**< Erratic Error (ErrticErr) The core sets this bit to report any erratic errors (phy_rxvalid_i/phy_rxvldh_i or phy_rxactive_i is asserted for at least 2 ms, due to PHY error) seen on the UTMI+. Due to erratic errors, the O2P USB core goes into Suspended state and an interrupt is generated to the application with Early Suspend bit of the Core Interrupt register (GINTSTS.ErlySusp). If the early suspend is asserted due to an erratic error, the application can only perform a soft disconnect recover. */ uint32_t enumspd : 2; /**< Enumerated Speed (EnumSpd) Indicates the speed at which the O2P USB core has come up after speed detection through a chirp sequence. * 2'b00: High speed (PHY clock is running at 30 or 60 MHz) * 2'b01: Full speed (PHY clock is running at 30 or 60 MHz) * 2'b10: Low speed (PHY clock is running at 6 MHz) * 2'b11: Full speed (PHY clock is running at 48 MHz) Low speed is not supported for devices using a UTMI+ PHY. */ uint32_t suspsts : 1; /**< Suspend Status (SuspSts) In Device mode, this bit is set as long as a Suspend condition is detected on the USB. The core enters the Suspended state when there is no activity on the phy_line_state_i signal for an extended period of time. The core comes out of the suspend: * When there is any activity on the phy_line_state_i signal * When the application writes to the Remote Wakeup Signaling bit in the Device Control register (DCTL.RmtWkUpSig). */ #else uint32_t suspsts : 1; uint32_t enumspd : 2; uint32_t errticerr : 1; uint32_t reserved_4_7 : 4; uint32_t soffn : 14; uint32_t reserved_22_31 : 10; #endif } s; struct cvmx_usbcx_dsts_s cn30xx; struct cvmx_usbcx_dsts_s cn31xx; struct cvmx_usbcx_dsts_s cn50xx; struct cvmx_usbcx_dsts_s cn52xx; struct cvmx_usbcx_dsts_s cn52xxp1; struct cvmx_usbcx_dsts_s cn56xx; struct cvmx_usbcx_dsts_s cn56xxp1; } cvmx_usbcx_dsts_t; /** * cvmx_usbc#_dtknqr1 * * Device IN Token Sequence Learning Queue Read Register 1 (DTKNQR1) * * The depth of the IN Token Sequence Learning Queue is specified for Device Mode IN Token * Sequence Learning Queue Depth. The queue is 4 bits wide to store the endpoint number. * A read from this register returns the first 5 endpoint entries of the IN Token Sequence * Learning Queue. When the queue is full, the new token is pushed into the queue and oldest * token is discarded. */ typedef union { uint32_t u32; struct cvmx_usbcx_dtknqr1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t eptkn : 24; /**< Endpoint Token (EPTkn) Four bits per token represent the endpoint number of the token: * Bits [31:28]: Endpoint number of Token 5 * Bits [27:24]: Endpoint number of Token 4 - ....... * Bits [15:12]: Endpoint number of Token 1 * Bits [11:8]: Endpoint number of Token 0 */ uint32_t wrapbit : 1; /**< Wrap Bit (WrapBit) This bit is set when the write pointer wraps. It is cleared when the learning queue is cleared. */ uint32_t reserved_5_6 : 2; uint32_t intknwptr : 5; /**< IN Token Queue Write Pointer (INTknWPtr) */ #else uint32_t intknwptr : 5; uint32_t reserved_5_6 : 2; uint32_t wrapbit : 1; uint32_t eptkn : 24; #endif } s; struct cvmx_usbcx_dtknqr1_s cn30xx; struct cvmx_usbcx_dtknqr1_s cn31xx; struct cvmx_usbcx_dtknqr1_s cn50xx; struct cvmx_usbcx_dtknqr1_s cn52xx; struct cvmx_usbcx_dtknqr1_s cn52xxp1; struct cvmx_usbcx_dtknqr1_s cn56xx; struct cvmx_usbcx_dtknqr1_s cn56xxp1; } cvmx_usbcx_dtknqr1_t; /** * cvmx_usbc#_dtknqr2 * * Device IN Token Sequence Learning Queue Read Register 2 (DTKNQR2) * * A read from this register returns the next 8 endpoint entries of the learning queue. */ typedef union { uint32_t u32; struct cvmx_usbcx_dtknqr2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t eptkn : 32; /**< Endpoint Token (EPTkn) Four bits per token represent the endpoint number of the token: * Bits [31:28]: Endpoint number of Token 13 * Bits [27:24]: Endpoint number of Token 12 - ....... * Bits [7:4]: Endpoint number of Token 7 * Bits [3:0]: Endpoint number of Token 6 */ #else uint32_t eptkn : 32; #endif } s; struct cvmx_usbcx_dtknqr2_s cn30xx; struct cvmx_usbcx_dtknqr2_s cn31xx; struct cvmx_usbcx_dtknqr2_s cn50xx; struct cvmx_usbcx_dtknqr2_s cn52xx; struct cvmx_usbcx_dtknqr2_s cn52xxp1; struct cvmx_usbcx_dtknqr2_s cn56xx; struct cvmx_usbcx_dtknqr2_s cn56xxp1; } cvmx_usbcx_dtknqr2_t; /** * cvmx_usbc#_dtknqr3 * * Device IN Token Sequence Learning Queue Read Register 3 (DTKNQR3) * * A read from this register returns the next 8 endpoint entries of the learning queue. */ typedef union { uint32_t u32; struct cvmx_usbcx_dtknqr3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t eptkn : 32; /**< Endpoint Token (EPTkn) Four bits per token represent the endpoint number of the token: * Bits [31:28]: Endpoint number of Token 21 * Bits [27:24]: Endpoint number of Token 20 - ....... * Bits [7:4]: Endpoint number of Token 15 * Bits [3:0]: Endpoint number of Token 14 */ #else uint32_t eptkn : 32; #endif } s; struct cvmx_usbcx_dtknqr3_s cn30xx; struct cvmx_usbcx_dtknqr3_s cn31xx; struct cvmx_usbcx_dtknqr3_s cn50xx; struct cvmx_usbcx_dtknqr3_s cn52xx; struct cvmx_usbcx_dtknqr3_s cn52xxp1; struct cvmx_usbcx_dtknqr3_s cn56xx; struct cvmx_usbcx_dtknqr3_s cn56xxp1; } cvmx_usbcx_dtknqr3_t; /** * cvmx_usbc#_dtknqr4 * * Device IN Token Sequence Learning Queue Read Register 4 (DTKNQR4) * * A read from this register returns the last 8 endpoint entries of the learning queue. */ typedef union { uint32_t u32; struct cvmx_usbcx_dtknqr4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t eptkn : 32; /**< Endpoint Token (EPTkn) Four bits per token represent the endpoint number of the token: * Bits [31:28]: Endpoint number of Token 29 * Bits [27:24]: Endpoint number of Token 28 - ....... * Bits [7:4]: Endpoint number of Token 23 * Bits [3:0]: Endpoint number of Token 22 */ #else uint32_t eptkn : 32; #endif } s; struct cvmx_usbcx_dtknqr4_s cn30xx; struct cvmx_usbcx_dtknqr4_s cn31xx; struct cvmx_usbcx_dtknqr4_s cn50xx; struct cvmx_usbcx_dtknqr4_s cn52xx; struct cvmx_usbcx_dtknqr4_s cn52xxp1; struct cvmx_usbcx_dtknqr4_s cn56xx; struct cvmx_usbcx_dtknqr4_s cn56xxp1; } cvmx_usbcx_dtknqr4_t; /** * cvmx_usbc#_gahbcfg * * Core AHB Configuration Register (GAHBCFG) * * This register can be used to configure the core after power-on or a change in mode of operation. * This register mainly contains AHB system-related configuration parameters. The AHB is the processor * interface to the O2P USB core. In general, software need not know about this interface except to * program the values as specified. * * The application must program this register as part of the O2P USB core initialization. * Do not change this register after the initial programming. */ typedef union { uint32_t u32; struct cvmx_usbcx_gahbcfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_9_31 : 23; uint32_t ptxfemplvl : 1; /**< Periodic TxFIFO Empty Level (PTxFEmpLvl) Software should set this bit to 0x1. Indicates when the Periodic TxFIFO Empty Interrupt bit in the Core Interrupt register (GINTSTS.PTxFEmp) is triggered. This bit is used only in Slave mode. * 1'b0: GINTSTS.PTxFEmp interrupt indicates that the Periodic TxFIFO is half empty * 1'b1: GINTSTS.PTxFEmp interrupt indicates that the Periodic TxFIFO is completely empty */ uint32_t nptxfemplvl : 1; /**< Non-Periodic TxFIFO Empty Level (NPTxFEmpLvl) Software should set this bit to 0x1. Indicates when the Non-Periodic TxFIFO Empty Interrupt bit in the Core Interrupt register (GINTSTS.NPTxFEmp) is triggered. This bit is used only in Slave mode. * 1'b0: GINTSTS.NPTxFEmp interrupt indicates that the Non- Periodic TxFIFO is half empty * 1'b1: GINTSTS.NPTxFEmp interrupt indicates that the Non- Periodic TxFIFO is completely empty */ uint32_t reserved_6_6 : 1; uint32_t dmaen : 1; /**< DMA Enable (DMAEn) * 1'b0: Core operates in Slave mode * 1'b1: Core operates in a DMA mode */ uint32_t hbstlen : 4; /**< Burst Length/Type (HBstLen) This field has not effect and should be left as 0x0. */ uint32_t glblintrmsk : 1; /**< Global Interrupt Mask (GlblIntrMsk) Software should set this field to 0x1. The application uses this bit to mask or unmask the interrupt line assertion to itself. Irrespective of this bit's setting, the interrupt status registers are updated by the core. * 1'b0: Mask the interrupt assertion to the application. * 1'b1: Unmask the interrupt assertion to the application. */ #else uint32_t glblintrmsk : 1; uint32_t hbstlen : 4; uint32_t dmaen : 1; uint32_t reserved_6_6 : 1; uint32_t nptxfemplvl : 1; uint32_t ptxfemplvl : 1; uint32_t reserved_9_31 : 23; #endif } s; struct cvmx_usbcx_gahbcfg_s cn30xx; struct cvmx_usbcx_gahbcfg_s cn31xx; struct cvmx_usbcx_gahbcfg_s cn50xx; struct cvmx_usbcx_gahbcfg_s cn52xx; struct cvmx_usbcx_gahbcfg_s cn52xxp1; struct cvmx_usbcx_gahbcfg_s cn56xx; struct cvmx_usbcx_gahbcfg_s cn56xxp1; } cvmx_usbcx_gahbcfg_t; /** * cvmx_usbc#_ghwcfg1 * * User HW Config1 Register (GHWCFG1) * * This register contains the logical endpoint direction(s) of the O2P USB core. */ typedef union { uint32_t u32; struct cvmx_usbcx_ghwcfg1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t epdir : 32; /**< Endpoint Direction (epdir) Two bits per endpoint represent the direction. * 2'b00: BIDIR (IN and OUT) endpoint * 2'b01: IN endpoint * 2'b10: OUT endpoint * 2'b11: Reserved Bits [31:30]: Endpoint 15 direction Bits [29:28]: Endpoint 14 direction - ... Bits [3:2]: Endpoint 1 direction Bits[1:0]: Endpoint 0 direction (always BIDIR) */ #else uint32_t epdir : 32; #endif } s; struct cvmx_usbcx_ghwcfg1_s cn30xx; struct cvmx_usbcx_ghwcfg1_s cn31xx; struct cvmx_usbcx_ghwcfg1_s cn50xx; struct cvmx_usbcx_ghwcfg1_s cn52xx; struct cvmx_usbcx_ghwcfg1_s cn52xxp1; struct cvmx_usbcx_ghwcfg1_s cn56xx; struct cvmx_usbcx_ghwcfg1_s cn56xxp1; } cvmx_usbcx_ghwcfg1_t; /** * cvmx_usbc#_ghwcfg2 * * User HW Config2 Register (GHWCFG2) * * This register contains configuration options of the O2P USB core. */ typedef union { uint32_t u32; struct cvmx_usbcx_ghwcfg2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_31_31 : 1; uint32_t tknqdepth : 5; /**< Device Mode IN Token Sequence Learning Queue Depth (TknQDepth) Range: 0-30 */ uint32_t ptxqdepth : 2; /**< Host Mode Periodic Request Queue Depth (PTxQDepth) * 2'b00: 2 * 2'b01: 4 * 2'b10: 8 * Others: Reserved */ uint32_t nptxqdepth : 2; /**< Non-Periodic Request Queue Depth (NPTxQDepth) * 2'b00: 2 * 2'b01: 4 * 2'b10: 8 * Others: Reserved */ uint32_t reserved_20_21 : 2; uint32_t dynfifosizing : 1; /**< Dynamic FIFO Sizing Enabled (DynFifoSizing) * 1'b0: No * 1'b1: Yes */ uint32_t periosupport : 1; /**< Periodic OUT Channels Supported in Host Mode (PerioSupport) * 1'b0: No * 1'b1: Yes */ uint32_t numhstchnl : 4; /**< Number of Host Channels (NumHstChnl) Indicates the number of host channels supported by the core in Host mode. The range of this field is 0-15: 0 specifies 1 channel, 15 specifies 16 channels. */ uint32_t numdeveps : 4; /**< Number of Device Endpoints (NumDevEps) Indicates the number of device endpoints supported by the core in Device mode in addition to control endpoint 0. The range of this field is 1-15. */ uint32_t fsphytype : 2; /**< Full-Speed PHY Interface Type (FSPhyType) * 2'b00: Full-speed interface not supported * 2'b01: Dedicated full-speed interface * 2'b10: FS pins shared with UTMI+ pins * 2'b11: FS pins shared with ULPI pins */ uint32_t hsphytype : 2; /**< High-Speed PHY Interface Type (HSPhyType) * 2'b00: High-Speed interface not supported * 2'b01: UTMI+ * 2'b10: ULPI * 2'b11: UTMI+ and ULPI */ uint32_t singpnt : 1; /**< Point-to-Point (SingPnt) * 1'b0: Multi-point application * 1'b1: Single-point application */ uint32_t otgarch : 2; /**< Architecture (OtgArch) * 2'b00: Slave-Only * 2'b01: External DMA * 2'b10: Internal DMA * Others: Reserved */ uint32_t otgmode : 3; /**< Mode of Operation (OtgMode) * 3'b000: HNP- and SRP-Capable OTG (Host & Device) * 3'b001: SRP-Capable OTG (Host & Device) * 3'b010: Non-HNP and Non-SRP Capable OTG (Host & Device) * 3'b011: SRP-Capable Device * 3'b100: Non-OTG Device * 3'b101: SRP-Capable Host * 3'b110: Non-OTG Host * Others: Reserved */ #else uint32_t otgmode : 3; uint32_t otgarch : 2; uint32_t singpnt : 1; uint32_t hsphytype : 2; uint32_t fsphytype : 2; uint32_t numdeveps : 4; uint32_t numhstchnl : 4; uint32_t periosupport : 1; uint32_t dynfifosizing : 1; uint32_t reserved_20_21 : 2; uint32_t nptxqdepth : 2; uint32_t ptxqdepth : 2; uint32_t tknqdepth : 5; uint32_t reserved_31_31 : 1; #endif } s; struct cvmx_usbcx_ghwcfg2_s cn30xx; struct cvmx_usbcx_ghwcfg2_s cn31xx; struct cvmx_usbcx_ghwcfg2_s cn50xx; struct cvmx_usbcx_ghwcfg2_s cn52xx; struct cvmx_usbcx_ghwcfg2_s cn52xxp1; struct cvmx_usbcx_ghwcfg2_s cn56xx; struct cvmx_usbcx_ghwcfg2_s cn56xxp1; } cvmx_usbcx_ghwcfg2_t; /** * cvmx_usbc#_ghwcfg3 * * User HW Config3 Register (GHWCFG3) * * This register contains the configuration options of the O2P USB core. */ typedef union { uint32_t u32; struct cvmx_usbcx_ghwcfg3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dfifodepth : 16; /**< DFIFO Depth (DfifoDepth) This value is in terms of 32-bit words. * Minimum value is 32 * Maximum value is 32768 */ uint32_t reserved_13_15 : 3; uint32_t ahbphysync : 1; /**< AHB and PHY Synchronous (AhbPhySync) Indicates whether AHB and PHY clocks are synchronous to each other. * 1'b0: No * 1'b1: Yes This bit is tied to 1. */ uint32_t rsttype : 1; /**< Reset Style for Clocked always Blocks in RTL (RstType) * 1'b0: Asynchronous reset is used in the core * 1'b1: Synchronous reset is used in the core */ uint32_t optfeature : 1; /**< Optional Features Removed (OptFeature) Indicates whether the User ID register, GPIO interface ports, and SOF toggle and counter ports were removed for gate count optimization. */ uint32_t vendor_control_interface_support : 1;/**< Vendor Control Interface Support * 1'b0: Vendor Control Interface is not available on the core. * 1'b1: Vendor Control Interface is available. */ uint32_t i2c_selection : 1; /**< I2C Selection * 1'b0: I2C Interface is not available on the core. * 1'b1: I2C Interface is available on the core. */ uint32_t otgen : 1; /**< OTG Function Enabled (OtgEn) The application uses this bit to indicate the O2P USB core's OTG capabilities. * 1'b0: Not OTG capable * 1'b1: OTG Capable */ uint32_t pktsizewidth : 3; /**< Width of Packet Size Counters (PktSizeWidth) * 3'b000: 4 bits * 3'b001: 5 bits * 3'b010: 6 bits * 3'b011: 7 bits * 3'b100: 8 bits * 3'b101: 9 bits * 3'b110: 10 bits * Others: Reserved */ uint32_t xfersizewidth : 4; /**< Width of Transfer Size Counters (XferSizeWidth) * 4'b0000: 11 bits * 4'b0001: 12 bits - ... * 4'b1000: 19 bits * Others: Reserved */ #else uint32_t xfersizewidth : 4; uint32_t pktsizewidth : 3; uint32_t otgen : 1; uint32_t i2c_selection : 1; uint32_t vendor_control_interface_support : 1; uint32_t optfeature : 1; uint32_t rsttype : 1; uint32_t ahbphysync : 1; uint32_t reserved_13_15 : 3; uint32_t dfifodepth : 16; #endif } s; struct cvmx_usbcx_ghwcfg3_s cn30xx; struct cvmx_usbcx_ghwcfg3_s cn31xx; struct cvmx_usbcx_ghwcfg3_s cn50xx; struct cvmx_usbcx_ghwcfg3_s cn52xx; struct cvmx_usbcx_ghwcfg3_s cn52xxp1; struct cvmx_usbcx_ghwcfg3_s cn56xx; struct cvmx_usbcx_ghwcfg3_s cn56xxp1; } cvmx_usbcx_ghwcfg3_t; /** * cvmx_usbc#_ghwcfg4 * * User HW Config4 Register (GHWCFG4) * * This register contains the configuration options of the O2P USB core. */ typedef union { uint32_t u32; struct cvmx_usbcx_ghwcfg4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_30_31 : 2; uint32_t numdevmodinend : 4; /**< Enable dedicatd transmit FIFO for device IN endpoints. */ uint32_t endedtrfifo : 1; /**< Enable dedicatd transmit FIFO for device IN endpoints. */ uint32_t sessendfltr : 1; /**< "session_end" Filter Enabled (SessEndFltr) * 1'b0: No filter * 1'b1: Filter */ uint32_t bvalidfltr : 1; /**< "b_valid" Filter Enabled (BValidFltr) * 1'b0: No filter * 1'b1: Filter */ uint32_t avalidfltr : 1; /**< "a_valid" Filter Enabled (AValidFltr) * 1'b0: No filter * 1'b1: Filter */ uint32_t vbusvalidfltr : 1; /**< "vbus_valid" Filter Enabled (VBusValidFltr) * 1'b0: No filter * 1'b1: Filter */ uint32_t iddgfltr : 1; /**< "iddig" Filter Enable (IddgFltr) * 1'b0: No filter * 1'b1: Filter */ uint32_t numctleps : 4; /**< Number of Device Mode Control Endpoints in Addition to Endpoint 0 (NumCtlEps) Range: 1-15 */ uint32_t phydatawidth : 2; /**< UTMI+ PHY/ULPI-to-Internal UTMI+ Wrapper Data Width (PhyDataWidth) When a ULPI PHY is used, an internal wrapper converts ULPI to UTMI+. * 2'b00: 8 bits * 2'b01: 16 bits * 2'b10: 8/16 bits, software selectable * Others: Reserved */ uint32_t reserved_6_13 : 8; uint32_t ahbfreq : 1; /**< Minimum AHB Frequency Less Than 60 MHz (AhbFreq) * 1'b0: No * 1'b1: Yes */ uint32_t enablepwropt : 1; /**< Enable Power Optimization? (EnablePwrOpt) * 1'b0: No * 1'b1: Yes */ uint32_t numdevperioeps : 4; /**< Number of Device Mode Periodic IN Endpoints (NumDevPerioEps) Range: 0-15 */ #else uint32_t numdevperioeps : 4; uint32_t enablepwropt : 1; uint32_t ahbfreq : 1; uint32_t reserved_6_13 : 8; uint32_t phydatawidth : 2; uint32_t numctleps : 4; uint32_t iddgfltr : 1; uint32_t vbusvalidfltr : 1; uint32_t avalidfltr : 1; uint32_t bvalidfltr : 1; uint32_t sessendfltr : 1; uint32_t endedtrfifo : 1; uint32_t numdevmodinend : 4; uint32_t reserved_30_31 : 2; #endif } s; struct cvmx_usbcx_ghwcfg4_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_25_31 : 7; uint32_t sessendfltr : 1; /**< "session_end" Filter Enabled (SessEndFltr) * 1'b0: No filter * 1'b1: Filter */ uint32_t bvalidfltr : 1; /**< "b_valid" Filter Enabled (BValidFltr) * 1'b0: No filter * 1'b1: Filter */ uint32_t avalidfltr : 1; /**< "a_valid" Filter Enabled (AValidFltr) * 1'b0: No filter * 1'b1: Filter */ uint32_t vbusvalidfltr : 1; /**< "vbus_valid" Filter Enabled (VBusValidFltr) * 1'b0: No filter * 1'b1: Filter */ uint32_t iddgfltr : 1; /**< "iddig" Filter Enable (IddgFltr) * 1'b0: No filter * 1'b1: Filter */ uint32_t numctleps : 4; /**< Number of Device Mode Control Endpoints in Addition to Endpoint 0 (NumCtlEps) Range: 1-15 */ uint32_t phydatawidth : 2; /**< UTMI+ PHY/ULPI-to-Internal UTMI+ Wrapper Data Width (PhyDataWidth) When a ULPI PHY is used, an internal wrapper converts ULPI to UTMI+. * 2'b00: 8 bits * 2'b01: 16 bits * 2'b10: 8/16 bits, software selectable * Others: Reserved */ uint32_t reserved_6_13 : 8; uint32_t ahbfreq : 1; /**< Minimum AHB Frequency Less Than 60 MHz (AhbFreq) * 1'b0: No * 1'b1: Yes */ uint32_t enablepwropt : 1; /**< Enable Power Optimization? (EnablePwrOpt) * 1'b0: No * 1'b1: Yes */ uint32_t numdevperioeps : 4; /**< Number of Device Mode Periodic IN Endpoints (NumDevPerioEps) Range: 0-15 */ #else uint32_t numdevperioeps : 4; uint32_t enablepwropt : 1; uint32_t ahbfreq : 1; uint32_t reserved_6_13 : 8; uint32_t phydatawidth : 2; uint32_t numctleps : 4; uint32_t iddgfltr : 1; uint32_t vbusvalidfltr : 1; uint32_t avalidfltr : 1; uint32_t bvalidfltr : 1; uint32_t sessendfltr : 1; uint32_t reserved_25_31 : 7; #endif } cn30xx; struct cvmx_usbcx_ghwcfg4_cn30xx cn31xx; struct cvmx_usbcx_ghwcfg4_s cn50xx; struct cvmx_usbcx_ghwcfg4_s cn52xx; struct cvmx_usbcx_ghwcfg4_s cn52xxp1; struct cvmx_usbcx_ghwcfg4_s cn56xx; struct cvmx_usbcx_ghwcfg4_s cn56xxp1; } cvmx_usbcx_ghwcfg4_t; /** * cvmx_usbc#_gintmsk * * Core Interrupt Mask Register (GINTMSK) * * This register works with the Core Interrupt register to interrupt the application. * When an interrupt bit is masked, the interrupt associated with that bit will not be generated. * However, the Core Interrupt (GINTSTS) register bit corresponding to that interrupt will still be set. * Mask interrupt: 1'b0, Unmask interrupt: 1'b1 */ typedef union { uint32_t u32; struct cvmx_usbcx_gintmsk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t wkupintmsk : 1; /**< Resume/Remote Wakeup Detected Interrupt Mask (WkUpIntMsk) */ uint32_t sessreqintmsk : 1; /**< Session Request/New Session Detected Interrupt Mask (SessReqIntMsk) */ uint32_t disconnintmsk : 1; /**< Disconnect Detected Interrupt Mask (DisconnIntMsk) */ uint32_t conidstschngmsk : 1; /**< Connector ID Status Change Mask (ConIDStsChngMsk) */ uint32_t reserved_27_27 : 1; uint32_t ptxfempmsk : 1; /**< Periodic TxFIFO Empty Mask (PTxFEmpMsk) */ uint32_t hchintmsk : 1; /**< Host Channels Interrupt Mask (HChIntMsk) */ uint32_t prtintmsk : 1; /**< Host Port Interrupt Mask (PrtIntMsk) */ uint32_t reserved_23_23 : 1; uint32_t fetsuspmsk : 1; /**< Data Fetch Suspended Mask (FetSuspMsk) */ uint32_t incomplpmsk : 1; /**< Incomplete Periodic Transfer Mask (incomplPMsk) Incomplete Isochronous OUT Transfer Mask (incompISOOUTMsk) */ uint32_t incompisoinmsk : 1; /**< Incomplete Isochronous IN Transfer Mask (incompISOINMsk) */ uint32_t oepintmsk : 1; /**< OUT Endpoints Interrupt Mask (OEPIntMsk) */ uint32_t inepintmsk : 1; /**< IN Endpoints Interrupt Mask (INEPIntMsk) */ uint32_t epmismsk : 1; /**< Endpoint Mismatch Interrupt Mask (EPMisMsk) */ uint32_t reserved_16_16 : 1; uint32_t eopfmsk : 1; /**< End of Periodic Frame Interrupt Mask (EOPFMsk) */ uint32_t isooutdropmsk : 1; /**< Isochronous OUT Packet Dropped Interrupt Mask (ISOOutDropMsk) */ uint32_t enumdonemsk : 1; /**< Enumeration Done Mask (EnumDoneMsk) */ uint32_t usbrstmsk : 1; /**< USB Reset Mask (USBRstMsk) */ uint32_t usbsuspmsk : 1; /**< USB Suspend Mask (USBSuspMsk) */ uint32_t erlysuspmsk : 1; /**< Early Suspend Mask (ErlySuspMsk) */ uint32_t i2cint : 1; /**< I2C Interrupt Mask (I2CINT) */ uint32_t ulpickintmsk : 1; /**< ULPI Carkit Interrupt Mask (ULPICKINTMsk) I2C Carkit Interrupt Mask (I2CCKINTMsk) */ uint32_t goutnakeffmsk : 1; /**< Global OUT NAK Effective Mask (GOUTNakEffMsk) */ uint32_t ginnakeffmsk : 1; /**< Global Non-Periodic IN NAK Effective Mask (GINNakEffMsk) */ uint32_t nptxfempmsk : 1; /**< Non-Periodic TxFIFO Empty Mask (NPTxFEmpMsk) */ uint32_t rxflvlmsk : 1; /**< Receive FIFO Non-Empty Mask (RxFLvlMsk) */ uint32_t sofmsk : 1; /**< Start of (micro)Frame Mask (SofMsk) */ uint32_t otgintmsk : 1; /**< OTG Interrupt Mask (OTGIntMsk) */ uint32_t modemismsk : 1; /**< Mode Mismatch Interrupt Mask (ModeMisMsk) */ uint32_t reserved_0_0 : 1; #else uint32_t reserved_0_0 : 1; uint32_t modemismsk : 1; uint32_t otgintmsk : 1; uint32_t sofmsk : 1; uint32_t rxflvlmsk : 1; uint32_t nptxfempmsk : 1; uint32_t ginnakeffmsk : 1; uint32_t goutnakeffmsk : 1; uint32_t ulpickintmsk : 1; uint32_t i2cint : 1; uint32_t erlysuspmsk : 1; uint32_t usbsuspmsk : 1; uint32_t usbrstmsk : 1; uint32_t enumdonemsk : 1; uint32_t isooutdropmsk : 1; uint32_t eopfmsk : 1; uint32_t reserved_16_16 : 1; uint32_t epmismsk : 1; uint32_t inepintmsk : 1; uint32_t oepintmsk : 1; uint32_t incompisoinmsk : 1; uint32_t incomplpmsk : 1; uint32_t fetsuspmsk : 1; uint32_t reserved_23_23 : 1; uint32_t prtintmsk : 1; uint32_t hchintmsk : 1; uint32_t ptxfempmsk : 1; uint32_t reserved_27_27 : 1; uint32_t conidstschngmsk : 1; uint32_t disconnintmsk : 1; uint32_t sessreqintmsk : 1; uint32_t wkupintmsk : 1; #endif } s; struct cvmx_usbcx_gintmsk_s cn30xx; struct cvmx_usbcx_gintmsk_s cn31xx; struct cvmx_usbcx_gintmsk_s cn50xx; struct cvmx_usbcx_gintmsk_s cn52xx; struct cvmx_usbcx_gintmsk_s cn52xxp1; struct cvmx_usbcx_gintmsk_s cn56xx; struct cvmx_usbcx_gintmsk_s cn56xxp1; } cvmx_usbcx_gintmsk_t; /** * cvmx_usbc#_gintsts * * Core Interrupt Register (GINTSTS) * * This register interrupts the application for system-level events in the current mode of operation * (Device mode or Host mode). It is shown in Interrupt. Some of the bits in this register are valid only in Host mode, * while others are valid in Device mode only. This register also indicates the current mode of operation. * In order to clear the interrupt status bits of type R_SS_WC, the application must write 1'b1 into the bit. * The FIFO status interrupts are read only; once software reads from or writes to the FIFO while servicing these * interrupts, FIFO interrupt conditions are cleared automatically. */ typedef union { uint32_t u32; struct cvmx_usbcx_gintsts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t wkupint : 1; /**< Resume/Remote Wakeup Detected Interrupt (WkUpInt) In Device mode, this interrupt is asserted when a resume is detected on the USB. In Host mode, this interrupt is asserted when a remote wakeup is detected on the USB. For more information on how to use this interrupt, see "Partial Power-Down and Clock Gating Programming Model" on page 353. */ uint32_t sessreqint : 1; /**< Session Request/New Session Detected Interrupt (SessReqInt) In Host mode, this interrupt is asserted when a session request is detected from the device. In Device mode, this interrupt is asserted when the utmiotg_bvalid signal goes high. For more information on how to use this interrupt, see "Partial Power-Down and Clock Gating Programming Model" on page 353. */ uint32_t disconnint : 1; /**< Disconnect Detected Interrupt (DisconnInt) Asserted when a device disconnect is detected. */ uint32_t conidstschng : 1; /**< Connector ID Status Change (ConIDStsChng) The core sets this bit when there is a change in connector ID status. */ uint32_t reserved_27_27 : 1; uint32_t ptxfemp : 1; /**< Periodic TxFIFO Empty (PTxFEmp) Asserted when the Periodic Transmit FIFO is either half or completely empty and there is space for at least one entry to be written in the Periodic Request Queue. The half or completely empty status is determined by the Periodic TxFIFO Empty Level bit in the Core AHB Configuration register (GAHBCFG.PTxFEmpLvl). */ uint32_t hchint : 1; /**< Host Channels Interrupt (HChInt) The core sets this bit to indicate that an interrupt is pending on one of the channels of the core (in Host mode). The application must read the Host All Channels Interrupt (HAINT) register to determine the exact number of the channel on which the interrupt occurred, and then read the corresponding Host Channel-n Interrupt (HCINTn) register to determine the exact cause of the interrupt. The application must clear the appropriate status bit in the HCINTn register to clear this bit. */ uint32_t prtint : 1; /**< Host Port Interrupt (PrtInt) The core sets this bit to indicate a change in port status of one of the O2P USB core ports in Host mode. The application must read the Host Port Control and Status (HPRT) register to determine the exact event that caused this interrupt. The application must clear the appropriate status bit in the Host Port Control and Status register to clear this bit. */ uint32_t reserved_23_23 : 1; uint32_t fetsusp : 1; /**< Data Fetch Suspended (FetSusp) This interrupt is valid only in DMA mode. This interrupt indicates that the core has stopped fetching data for IN endpoints due to the unavailability of TxFIFO space or Request Queue space. This interrupt is used by the application for an endpoint mismatch algorithm. */ uint32_t incomplp : 1; /**< Incomplete Periodic Transfer (incomplP) In Host mode, the core sets this interrupt bit when there are incomplete periodic transactions still pending which are scheduled for the current microframe. Incomplete Isochronous OUT Transfer (incompISOOUT) The Device mode, the core sets this interrupt to indicate that there is at least one isochronous OUT endpoint on which the transfer is not completed in the current microframe. This interrupt is asserted along with the End of Periodic Frame Interrupt (EOPF) bit in this register. */ uint32_t incompisoin : 1; /**< Incomplete Isochronous IN Transfer (incompISOIN) The core sets this interrupt to indicate that there is at least one isochronous IN endpoint on which the transfer is not completed in the current microframe. This interrupt is asserted along with the End of Periodic Frame Interrupt (EOPF) bit in this register. */ uint32_t oepint : 1; /**< OUT Endpoints Interrupt (OEPInt) The core sets this bit to indicate that an interrupt is pending on one of the OUT endpoints of the core (in Device mode). The application must read the Device All Endpoints Interrupt (DAINT) register to determine the exact number of the OUT endpoint on which the interrupt occurred, and then read the corresponding Device OUT Endpoint-n Interrupt (DOEPINTn) register to determine the exact cause of the interrupt. The application must clear the appropriate status bit in the corresponding DOEPINTn register to clear this bit. */ uint32_t iepint : 1; /**< IN Endpoints Interrupt (IEPInt) The core sets this bit to indicate that an interrupt is pending on one of the IN endpoints of the core (in Device mode). The application must read the Device All Endpoints Interrupt (DAINT) register to determine the exact number of the IN endpoint on which the interrupt occurred, and then read the corresponding Device IN Endpoint-n Interrupt (DIEPINTn) register to determine the exact cause of the interrupt. The application must clear the appropriate status bit in the corresponding DIEPINTn register to clear this bit. */ uint32_t epmis : 1; /**< Endpoint Mismatch Interrupt (EPMis) Indicates that an IN token has been received for a non-periodic endpoint, but the data for another endpoint is present in the top of the Non-Periodic Transmit FIFO and the IN endpoint mismatch count programmed by the application has expired. */ uint32_t reserved_16_16 : 1; uint32_t eopf : 1; /**< End of Periodic Frame Interrupt (EOPF) Indicates that the period specified in the Periodic Frame Interval field of the Device Configuration register (DCFG.PerFrInt) has been reached in the current microframe. */ uint32_t isooutdrop : 1; /**< Isochronous OUT Packet Dropped Interrupt (ISOOutDrop) The core sets this bit when it fails to write an isochronous OUT packet into the RxFIFO because the RxFIFO doesn't have enough space to accommodate a maximum packet size packet for the isochronous OUT endpoint. */ uint32_t enumdone : 1; /**< Enumeration Done (EnumDone) The core sets this bit to indicate that speed enumeration is complete. The application must read the Device Status (DSTS) register to obtain the enumerated speed. */ uint32_t usbrst : 1; /**< USB Reset (USBRst) The core sets this bit to indicate that a reset is detected on the USB. */ uint32_t usbsusp : 1; /**< USB Suspend (USBSusp) The core sets this bit to indicate that a suspend was detected on the USB. The core enters the Suspended state when there is no activity on the phy_line_state_i signal for an extended period of time. */ uint32_t erlysusp : 1; /**< Early Suspend (ErlySusp) The core sets this bit to indicate that an Idle state has been detected on the USB for 3 ms. */ uint32_t i2cint : 1; /**< I2C Interrupt (I2CINT) This bit is always 0x0. */ uint32_t ulpickint : 1; /**< ULPI Carkit Interrupt (ULPICKINT) This bit is always 0x0. */ uint32_t goutnakeff : 1; /**< Global OUT NAK Effective (GOUTNakEff) Indicates that the Set Global OUT NAK bit in the Device Control register (DCTL.SGOUTNak), set by the application, has taken effect in the core. This bit can be cleared by writing the Clear Global OUT NAK bit in the Device Control register (DCTL.CGOUTNak). */ uint32_t ginnakeff : 1; /**< Global IN Non-Periodic NAK Effective (GINNakEff) Indicates that the Set Global Non-Periodic IN NAK bit in the Device Control register (DCTL.SGNPInNak), set by the application, has taken effect in the core. That is, the core has sampled the Global IN NAK bit set by the application. This bit can be cleared by clearing the Clear Global Non-Periodic IN NAK bit in the Device Control register (DCTL.CGNPInNak). This interrupt does not necessarily mean that a NAK handshake is sent out on the USB. The STALL bit takes precedence over the NAK bit. */ uint32_t nptxfemp : 1; /**< Non-Periodic TxFIFO Empty (NPTxFEmp) This interrupt is asserted when the Non-Periodic TxFIFO is either half or completely empty, and there is space for at least one entry to be written to the Non-Periodic Transmit Request Queue. The half or completely empty status is determined by the Non-Periodic TxFIFO Empty Level bit in the Core AHB Configuration register (GAHBCFG.NPTxFEmpLvl). */ uint32_t rxflvl : 1; /**< RxFIFO Non-Empty (RxFLvl) Indicates that there is at least one packet pending to be read from the RxFIFO. */ uint32_t sof : 1; /**< Start of (micro)Frame (Sof) In Host mode, the core sets this bit to indicate that an SOF (FS), micro-SOF (HS), or Keep-Alive (LS) is transmitted on the USB. The application must write a 1 to this bit to clear the interrupt. In Device mode, in the core sets this bit to indicate that an SOF token has been received on the USB. The application can read the Device Status register to get the current (micro)frame number. This interrupt is seen only when the core is operating at either HS or FS. */ uint32_t otgint : 1; /**< OTG Interrupt (OTGInt) The core sets this bit to indicate an OTG protocol event. The application must read the OTG Interrupt Status (GOTGINT) register to determine the exact event that caused this interrupt. The application must clear the appropriate status bit in the GOTGINT register to clear this bit. */ uint32_t modemis : 1; /**< Mode Mismatch Interrupt (ModeMis) The core sets this bit when the application is trying to access: * A Host mode register, when the core is operating in Device mode * A Device mode register, when the core is operating in Host mode The register access is completed on the AHB with an OKAY response, but is ignored by the core internally and doesn't affect the operation of the core. */ uint32_t curmod : 1; /**< Current Mode of Operation (CurMod) Indicates the current mode of operation. * 1'b0: Device mode * 1'b1: Host mode */ #else uint32_t curmod : 1; uint32_t modemis : 1; uint32_t otgint : 1; uint32_t sof : 1; uint32_t rxflvl : 1; uint32_t nptxfemp : 1; uint32_t ginnakeff : 1; uint32_t goutnakeff : 1; uint32_t ulpickint : 1; uint32_t i2cint : 1; uint32_t erlysusp : 1; uint32_t usbsusp : 1; uint32_t usbrst : 1; uint32_t enumdone : 1; uint32_t isooutdrop : 1; uint32_t eopf : 1; uint32_t reserved_16_16 : 1; uint32_t epmis : 1; uint32_t iepint : 1; uint32_t oepint : 1; uint32_t incompisoin : 1; uint32_t incomplp : 1; uint32_t fetsusp : 1; uint32_t reserved_23_23 : 1; uint32_t prtint : 1; uint32_t hchint : 1; uint32_t ptxfemp : 1; uint32_t reserved_27_27 : 1; uint32_t conidstschng : 1; uint32_t disconnint : 1; uint32_t sessreqint : 1; uint32_t wkupint : 1; #endif } s; struct cvmx_usbcx_gintsts_s cn30xx; struct cvmx_usbcx_gintsts_s cn31xx; struct cvmx_usbcx_gintsts_s cn50xx; struct cvmx_usbcx_gintsts_s cn52xx; struct cvmx_usbcx_gintsts_s cn52xxp1; struct cvmx_usbcx_gintsts_s cn56xx; struct cvmx_usbcx_gintsts_s cn56xxp1; } cvmx_usbcx_gintsts_t; /** * cvmx_usbc#_gnptxfsiz * * Non-Periodic Transmit FIFO Size Register (GNPTXFSIZ) * * The application can program the RAM size and the memory start address for the Non-Periodic TxFIFO. */ typedef union { uint32_t u32; struct cvmx_usbcx_gnptxfsiz_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t nptxfdep : 16; /**< Non-Periodic TxFIFO Depth (NPTxFDep) This value is in terms of 32-bit words. Minimum value is 16 Maximum value is 32768 */ uint32_t nptxfstaddr : 16; /**< Non-Periodic Transmit RAM Start Address (NPTxFStAddr) This field contains the memory start address for Non-Periodic Transmit FIFO RAM. */ #else uint32_t nptxfstaddr : 16; uint32_t nptxfdep : 16; #endif } s; struct cvmx_usbcx_gnptxfsiz_s cn30xx; struct cvmx_usbcx_gnptxfsiz_s cn31xx; struct cvmx_usbcx_gnptxfsiz_s cn50xx; struct cvmx_usbcx_gnptxfsiz_s cn52xx; struct cvmx_usbcx_gnptxfsiz_s cn52xxp1; struct cvmx_usbcx_gnptxfsiz_s cn56xx; struct cvmx_usbcx_gnptxfsiz_s cn56xxp1; } cvmx_usbcx_gnptxfsiz_t; /** * cvmx_usbc#_gnptxsts * * Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS) * * This read-only register contains the free space information for the Non-Periodic TxFIFO and * the Non-Periodic Transmit Request Queue */ typedef union { uint32_t u32; struct cvmx_usbcx_gnptxsts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_31_31 : 1; uint32_t nptxqtop : 7; /**< Top of the Non-Periodic Transmit Request Queue (NPTxQTop) Entry in the Non-Periodic Tx Request Queue that is currently being processed by the MAC. * Bits [30:27]: Channel/endpoint number * Bits [26:25]: - 2'b00: IN/OUT token - 2'b01: Zero-length transmit packet (device IN/host OUT) - 2'b10: PING/CSPLIT token - 2'b11: Channel halt command * Bit [24]: Terminate (last entry for selected channel/endpoint) */ uint32_t nptxqspcavail : 8; /**< Non-Periodic Transmit Request Queue Space Available (NPTxQSpcAvail) Indicates the amount of free space available in the Non- Periodic Transmit Request Queue. This queue holds both IN and OUT requests in Host mode. Device mode has only IN requests. * 8'h0: Non-Periodic Transmit Request Queue is full * 8'h1: 1 location available * 8'h2: 2 locations available * n: n locations available (0..8) * Others: Reserved */ uint32_t nptxfspcavail : 16; /**< Non-Periodic TxFIFO Space Avail (NPTxFSpcAvail) Indicates the amount of free space available in the Non- Periodic TxFIFO. Values are in terms of 32-bit words. * 16'h0: Non-Periodic TxFIFO is full * 16'h1: 1 word available * 16'h2: 2 words available * 16'hn: n words available (where 0..32768) * 16'h8000: 32768 words available * Others: Reserved */ #else uint32_t nptxfspcavail : 16; uint32_t nptxqspcavail : 8; uint32_t nptxqtop : 7; uint32_t reserved_31_31 : 1; #endif } s; struct cvmx_usbcx_gnptxsts_s cn30xx; struct cvmx_usbcx_gnptxsts_s cn31xx; struct cvmx_usbcx_gnptxsts_s cn50xx; struct cvmx_usbcx_gnptxsts_s cn52xx; struct cvmx_usbcx_gnptxsts_s cn52xxp1; struct cvmx_usbcx_gnptxsts_s cn56xx; struct cvmx_usbcx_gnptxsts_s cn56xxp1; } cvmx_usbcx_gnptxsts_t; /** * cvmx_usbc#_gotgctl * * OTG Control and Status Register (GOTGCTL) * * The OTG Control and Status register controls the behavior and reflects the status of the OTG function of the core.: */ typedef union { uint32_t u32; struct cvmx_usbcx_gotgctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_20_31 : 12; uint32_t bsesvld : 1; /**< B-Session Valid (BSesVld) Valid only when O2P USB core is configured as a USB device. Indicates the Device mode transceiver status. * 1'b0: B-session is not valid. * 1'b1: B-session is valid. */ uint32_t asesvld : 1; /**< A-Session Valid (ASesVld) Valid only when O2P USB core is configured as a USB host. Indicates the Host mode transceiver status. * 1'b0: A-session is not valid * 1'b1: A-session is valid */ uint32_t dbnctime : 1; /**< Long/Short Debounce Time (DbncTime) In the present version of the core this bit will only read as '0'. */ uint32_t conidsts : 1; /**< Connector ID Status (ConIDSts) Indicates the connector ID status on a connect event. * 1'b0: The O2P USB core is in A-device mode * 1'b1: The O2P USB core is in B-device mode */ uint32_t reserved_12_15 : 4; uint32_t devhnpen : 1; /**< Device HNP Enabled (DevHNPEn) Since O2P USB core is not HNP capable this bit is 0x0. */ uint32_t hstsethnpen : 1; /**< Host Set HNP Enable (HstSetHNPEn) Since O2P USB core is not HNP capable this bit is 0x0. */ uint32_t hnpreq : 1; /**< HNP Request (HNPReq) Since O2P USB core is not HNP capable this bit is 0x0. */ uint32_t hstnegscs : 1; /**< Host Negotiation Success (HstNegScs) Since O2P USB core is not HNP capable this bit is 0x0. */ uint32_t reserved_2_7 : 6; uint32_t sesreq : 1; /**< Session Request (SesReq) Since O2P USB core is not SRP capable this bit is 0x0. */ uint32_t sesreqscs : 1; /**< Session Request Success (SesReqScs) Since O2P USB core is not SRP capable this bit is 0x0. */ #else uint32_t sesreqscs : 1; uint32_t sesreq : 1; uint32_t reserved_2_7 : 6; uint32_t hstnegscs : 1; uint32_t hnpreq : 1; uint32_t hstsethnpen : 1; uint32_t devhnpen : 1; uint32_t reserved_12_15 : 4; uint32_t conidsts : 1; uint32_t dbnctime : 1; uint32_t asesvld : 1; uint32_t bsesvld : 1; uint32_t reserved_20_31 : 12; #endif } s; struct cvmx_usbcx_gotgctl_s cn30xx; struct cvmx_usbcx_gotgctl_s cn31xx; struct cvmx_usbcx_gotgctl_s cn50xx; struct cvmx_usbcx_gotgctl_s cn52xx; struct cvmx_usbcx_gotgctl_s cn52xxp1; struct cvmx_usbcx_gotgctl_s cn56xx; struct cvmx_usbcx_gotgctl_s cn56xxp1; } cvmx_usbcx_gotgctl_t; /** * cvmx_usbc#_gotgint * * OTG Interrupt Register (GOTGINT) * * The application reads this register whenever there is an OTG interrupt and clears the bits in this register * to clear the OTG interrupt. It is shown in Interrupt .: */ typedef union { uint32_t u32; struct cvmx_usbcx_gotgint_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_20_31 : 12; uint32_t dbncedone : 1; /**< Debounce Done (DbnceDone) In the present version of the code this bit is tied to '0'. */ uint32_t adevtoutchg : 1; /**< A-Device Timeout Change (ADevTOUTChg) Since O2P USB core is not HNP or SRP capable this bit is always 0x0. */ uint32_t hstnegdet : 1; /**< Host Negotiation Detected (HstNegDet) Since O2P USB core is not HNP or SRP capable this bit is always 0x0. */ uint32_t reserved_10_16 : 7; uint32_t hstnegsucstschng : 1; /**< Host Negotiation Success Status Change (HstNegSucStsChng) Since O2P USB core is not HNP or SRP capable this bit is always 0x0. */ uint32_t sesreqsucstschng : 1; /**< Session Request Success Status Change Since O2P USB core is not HNP or SRP capable this bit is always 0x0. */ uint32_t reserved_3_7 : 5; uint32_t sesenddet : 1; /**< Session End Detected (SesEndDet) Since O2P USB core is not HNP or SRP capable this bit is always 0x0. */ uint32_t reserved_0_1 : 2; #else uint32_t reserved_0_1 : 2; uint32_t sesenddet : 1; uint32_t reserved_3_7 : 5; uint32_t sesreqsucstschng : 1; uint32_t hstnegsucstschng : 1; uint32_t reserved_10_16 : 7; uint32_t hstnegdet : 1; uint32_t adevtoutchg : 1; uint32_t dbncedone : 1; uint32_t reserved_20_31 : 12; #endif } s; struct cvmx_usbcx_gotgint_s cn30xx; struct cvmx_usbcx_gotgint_s cn31xx; struct cvmx_usbcx_gotgint_s cn50xx; struct cvmx_usbcx_gotgint_s cn52xx; struct cvmx_usbcx_gotgint_s cn52xxp1; struct cvmx_usbcx_gotgint_s cn56xx; struct cvmx_usbcx_gotgint_s cn56xxp1; } cvmx_usbcx_gotgint_t; /** * cvmx_usbc#_grstctl * * Core Reset Register (GRSTCTL) * * The application uses this register to reset various hardware features inside the core. */ typedef union { uint32_t u32; struct cvmx_usbcx_grstctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ahbidle : 1; /**< AHB Master Idle (AHBIdle) Indicates that the AHB Master State Machine is in the IDLE condition. */ uint32_t dmareq : 1; /**< DMA Request Signal (DMAReq) Indicates that the DMA request is in progress. Used for debug. */ uint32_t reserved_11_29 : 19; uint32_t txfnum : 5; /**< TxFIFO Number (TxFNum) This is the FIFO number that must be flushed using the TxFIFO Flush bit. This field must not be changed until the core clears the TxFIFO Flush bit. * 5'h0: Non-Periodic TxFIFO flush * 5'h1: Periodic TxFIFO 1 flush in Device mode or Periodic TxFIFO flush in Host mode * 5'h2: Periodic TxFIFO 2 flush in Device mode - ... * 5'hF: Periodic TxFIFO 15 flush in Device mode * 5'h10: Flush all the Periodic and Non-Periodic TxFIFOs in the core */ uint32_t txfflsh : 1; /**< TxFIFO Flush (TxFFlsh) This bit selectively flushes a single or all transmit FIFOs, but cannot do so if the core is in the midst of a transaction. The application must only write this bit after checking that the core is neither writing to the TxFIFO nor reading from the TxFIFO. The application must wait until the core clears this bit before performing any operations. This bit takes 8 clocks (of phy_clk or hclk, whichever is slower) to clear. */ uint32_t rxfflsh : 1; /**< RxFIFO Flush (RxFFlsh) The application can flush the entire RxFIFO using this bit, but must first ensure that the core is not in the middle of a transaction. The application must only write to this bit after checking that the core is neither reading from the RxFIFO nor writing to the RxFIFO. The application must wait until the bit is cleared before performing any other operations. This bit will take 8 clocks (slowest of PHY or AHB clock) to clear. */ uint32_t intknqflsh : 1; /**< IN Token Sequence Learning Queue Flush (INTknQFlsh) The application writes this bit to flush the IN Token Sequence Learning Queue. */ uint32_t frmcntrrst : 1; /**< Host Frame Counter Reset (FrmCntrRst) The application writes this bit to reset the (micro)frame number counter inside the core. When the (micro)frame counter is reset, the subsequent SOF sent out by the core will have a (micro)frame number of 0. */ uint32_t hsftrst : 1; /**< HClk Soft Reset (HSftRst) The application uses this bit to flush the control logic in the AHB Clock domain. Only AHB Clock Domain pipelines are reset. * FIFOs are not flushed with this bit. * All state machines in the AHB clock domain are reset to the Idle state after terminating the transactions on the AHB, following the protocol. * CSR control bits used by the AHB clock domain state machines are cleared. * To clear this interrupt, status mask bits that control the interrupt status and are generated by the AHB clock domain state machine are cleared. * Because interrupt status bits are not cleared, the application can get the status of any core events that occurred after it set this bit. This is a self-clearing bit that the core clears after all necessary logic is reset in the core. This may take several clocks, depending on the core's current state. */ uint32_t csftrst : 1; /**< Core Soft Reset (CSftRst) Resets the hclk and phy_clock domains as follows: * Clears the interrupts and all the CSR registers except the following register bits: - PCGCCTL.RstPdwnModule - PCGCCTL.GateHclk - PCGCCTL.PwrClmp - PCGCCTL.StopPPhyLPwrClkSelclk - GUSBCFG.PhyLPwrClkSel - GUSBCFG.DDRSel - GUSBCFG.PHYSel - GUSBCFG.FSIntf - GUSBCFG.ULPI_UTMI_Sel - GUSBCFG.PHYIf - HCFG.FSLSPclkSel - DCFG.DevSpd * All module state machines (except the AHB Slave Unit) are reset to the IDLE state, and all the transmit FIFOs and the receive FIFO are flushed. * Any transactions on the AHB Master are terminated as soon as possible, after gracefully completing the last data phase of an AHB transfer. Any transactions on the USB are terminated immediately. The application can write to this bit any time it wants to reset the core. This is a self-clearing bit and the core clears this bit after all the necessary logic is reset in the core, which may take several clocks, depending on the current state of the core. Once this bit is cleared software should wait at least 3 PHY clocks before doing any access to the PHY domain (synchronization delay). Software should also should check that bit 31 of this register is 1 (AHB Master is IDLE) before starting any operation. Typically software reset is used during software development and also when you dynamically change the PHY selection bits in the USB configuration registers listed above. When you change the PHY, the corresponding clock for the PHY is selected and used in the PHY domain. Once a new clock is selected, the PHY domain has to be reset for proper operation. */ #else uint32_t csftrst : 1; uint32_t hsftrst : 1; uint32_t frmcntrrst : 1; uint32_t intknqflsh : 1; uint32_t rxfflsh : 1; uint32_t txfflsh : 1; uint32_t txfnum : 5; uint32_t reserved_11_29 : 19; uint32_t dmareq : 1; uint32_t ahbidle : 1; #endif } s; struct cvmx_usbcx_grstctl_s cn30xx; struct cvmx_usbcx_grstctl_s cn31xx; struct cvmx_usbcx_grstctl_s cn50xx; struct cvmx_usbcx_grstctl_s cn52xx; struct cvmx_usbcx_grstctl_s cn52xxp1; struct cvmx_usbcx_grstctl_s cn56xx; struct cvmx_usbcx_grstctl_s cn56xxp1; } cvmx_usbcx_grstctl_t; /** * cvmx_usbc#_grxfsiz * * Receive FIFO Size Register (GRXFSIZ) * * The application can program the RAM size that must be allocated to the RxFIFO. */ typedef union { uint32_t u32; struct cvmx_usbcx_grxfsiz_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_16_31 : 16; uint32_t rxfdep : 16; /**< RxFIFO Depth (RxFDep) This value is in terms of 32-bit words. * Minimum value is 16 * Maximum value is 32768 */ #else uint32_t rxfdep : 16; uint32_t reserved_16_31 : 16; #endif } s; struct cvmx_usbcx_grxfsiz_s cn30xx; struct cvmx_usbcx_grxfsiz_s cn31xx; struct cvmx_usbcx_grxfsiz_s cn50xx; struct cvmx_usbcx_grxfsiz_s cn52xx; struct cvmx_usbcx_grxfsiz_s cn52xxp1; struct cvmx_usbcx_grxfsiz_s cn56xx; struct cvmx_usbcx_grxfsiz_s cn56xxp1; } cvmx_usbcx_grxfsiz_t; /** * cvmx_usbc#_grxstspd * * Receive Status Debug Read Register, Device Mode (GRXSTSPD) * * A read to the Receive Status Read and Pop register returns and additionally pops the top data entry out of the RxFIFO. * This Description is only valid when the core is in Device Mode. For Host Mode use USBC_GRXSTSPH instead. * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the same offset in the O2P USB core. * The offset difference shown in this document is for software clarity and is actually ignored by the * hardware. */ typedef union { uint32_t u32; struct cvmx_usbcx_grxstspd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_25_31 : 7; uint32_t fn : 4; /**< Frame Number (FN) This is the least significant 4 bits of the (micro)frame number in which the packet is received on the USB. This field is supported only when the isochronous OUT endpoints are supported. */ uint32_t pktsts : 4; /**< Packet Status (PktSts) Indicates the status of the received packet * 4'b0001: Glogal OUT NAK (triggers an interrupt) * 4'b0010: OUT data packet received * 4'b0100: SETUP transaction completed (triggers an interrupt) * 4'b0110: SETUP data packet received * Others: Reserved */ uint32_t dpid : 2; /**< Data PID (DPID) * 2'b00: DATA0 * 2'b10: DATA1 * 2'b01: DATA2 * 2'b11: MDATA */ uint32_t bcnt : 11; /**< Byte Count (BCnt) Indicates the byte count of the received data packet */ uint32_t epnum : 4; /**< Endpoint Number (EPNum) Indicates the endpoint number to which the current received packet belongs. */ #else uint32_t epnum : 4; uint32_t bcnt : 11; uint32_t dpid : 2; uint32_t pktsts : 4; uint32_t fn : 4; uint32_t reserved_25_31 : 7; #endif } s; struct cvmx_usbcx_grxstspd_s cn30xx; struct cvmx_usbcx_grxstspd_s cn31xx; struct cvmx_usbcx_grxstspd_s cn50xx; struct cvmx_usbcx_grxstspd_s cn52xx; struct cvmx_usbcx_grxstspd_s cn52xxp1; struct cvmx_usbcx_grxstspd_s cn56xx; struct cvmx_usbcx_grxstspd_s cn56xxp1; } cvmx_usbcx_grxstspd_t; /** * cvmx_usbc#_grxstsph * * Receive Status Read and Pop Register, Host Mode (GRXSTSPH) * * A read to the Receive Status Read and Pop register returns and additionally pops the top data entry out of the RxFIFO. * This Description is only valid when the core is in Host Mode. For Device Mode use USBC_GRXSTSPD instead. * NOTE: GRXSTSPH and GRXSTSPD are physically the same register and share the same offset in the O2P USB core. * The offset difference shown in this document is for software clarity and is actually ignored by the * hardware. */ typedef union { uint32_t u32; struct cvmx_usbcx_grxstsph_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_21_31 : 11; uint32_t pktsts : 4; /**< Packet Status (PktSts) Indicates the status of the received packet * 4'b0010: IN data packet received * 4'b0011: IN transfer completed (triggers an interrupt) * 4'b0101: Data toggle error (triggers an interrupt) * 4'b0111: Channel halted (triggers an interrupt) * Others: Reserved */ uint32_t dpid : 2; /**< Data PID (DPID) * 2'b00: DATA0 * 2'b10: DATA1 * 2'b01: DATA2 * 2'b11: MDATA */ uint32_t bcnt : 11; /**< Byte Count (BCnt) Indicates the byte count of the received IN data packet */ uint32_t chnum : 4; /**< Channel Number (ChNum) Indicates the channel number to which the current received packet belongs. */ #else uint32_t chnum : 4; uint32_t bcnt : 11; uint32_t dpid : 2; uint32_t pktsts : 4; uint32_t reserved_21_31 : 11; #endif } s; struct cvmx_usbcx_grxstsph_s cn30xx; struct cvmx_usbcx_grxstsph_s cn31xx; struct cvmx_usbcx_grxstsph_s cn50xx; struct cvmx_usbcx_grxstsph_s cn52xx; struct cvmx_usbcx_grxstsph_s cn52xxp1; struct cvmx_usbcx_grxstsph_s cn56xx; struct cvmx_usbcx_grxstsph_s cn56xxp1; } cvmx_usbcx_grxstsph_t; /** * cvmx_usbc#_grxstsrd * * Receive Status Debug Read Register, Device Mode (GRXSTSRD) * * A read to the Receive Status Debug Read register returns the contents of the top of the Receive FIFO. * This Description is only valid when the core is in Device Mode. For Host Mode use USBC_GRXSTSRH instead. * NOTE: GRXSTSRH and GRXSTSRD are physically the same register and share the same offset in the O2P USB core. * The offset difference shown in this document is for software clarity and is actually ignored by the * hardware. */ typedef union { uint32_t u32; struct cvmx_usbcx_grxstsrd_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_25_31 : 7; uint32_t fn : 4; /**< Frame Number (FN) This is the least significant 4 bits of the (micro)frame number in which the packet is received on the USB. This field is supported only when the isochronous OUT endpoints are supported. */ uint32_t pktsts : 4; /**< Packet Status (PktSts) Indicates the status of the received packet * 4'b0001: Glogal OUT NAK (triggers an interrupt) * 4'b0010: OUT data packet received * 4'b0100: SETUP transaction completed (triggers an interrupt) * 4'b0110: SETUP data packet received * Others: Reserved */ uint32_t dpid : 2; /**< Data PID (DPID) * 2'b00: DATA0 * 2'b10: DATA1 * 2'b01: DATA2 * 2'b11: MDATA */ uint32_t bcnt : 11; /**< Byte Count (BCnt) Indicates the byte count of the received data packet */ uint32_t epnum : 4; /**< Endpoint Number (EPNum) Indicates the endpoint number to which the current received packet belongs. */ #else uint32_t epnum : 4; uint32_t bcnt : 11; uint32_t dpid : 2; uint32_t pktsts : 4; uint32_t fn : 4; uint32_t reserved_25_31 : 7; #endif } s; struct cvmx_usbcx_grxstsrd_s cn30xx; struct cvmx_usbcx_grxstsrd_s cn31xx; struct cvmx_usbcx_grxstsrd_s cn50xx; struct cvmx_usbcx_grxstsrd_s cn52xx; struct cvmx_usbcx_grxstsrd_s cn52xxp1; struct cvmx_usbcx_grxstsrd_s cn56xx; struct cvmx_usbcx_grxstsrd_s cn56xxp1; } cvmx_usbcx_grxstsrd_t; /** * cvmx_usbc#_grxstsrh * * Receive Status Debug Read Register, Host Mode (GRXSTSRH) * * A read to the Receive Status Debug Read register returns the contents of the top of the Receive FIFO. * This Description is only valid when the core is in Host Mode. For Device Mode use USBC_GRXSTSRD instead. * NOTE: GRXSTSRH and GRXSTSRD are physically the same register and share the same offset in the O2P USB core. * The offset difference shown in this document is for software clarity and is actually ignored by the * hardware. */ typedef union { uint32_t u32; struct cvmx_usbcx_grxstsrh_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_21_31 : 11; uint32_t pktsts : 4; /**< Packet Status (PktSts) Indicates the status of the received packet * 4'b0010: IN data packet received * 4'b0011: IN transfer completed (triggers an interrupt) * 4'b0101: Data toggle error (triggers an interrupt) * 4'b0111: Channel halted (triggers an interrupt) * Others: Reserved */ uint32_t dpid : 2; /**< Data PID (DPID) * 2'b00: DATA0 * 2'b10: DATA1 * 2'b01: DATA2 * 2'b11: MDATA */ uint32_t bcnt : 11; /**< Byte Count (BCnt) Indicates the byte count of the received IN data packet */ uint32_t chnum : 4; /**< Channel Number (ChNum) Indicates the channel number to which the current received packet belongs. */ #else uint32_t chnum : 4; uint32_t bcnt : 11; uint32_t dpid : 2; uint32_t pktsts : 4; uint32_t reserved_21_31 : 11; #endif } s; struct cvmx_usbcx_grxstsrh_s cn30xx; struct cvmx_usbcx_grxstsrh_s cn31xx; struct cvmx_usbcx_grxstsrh_s cn50xx; struct cvmx_usbcx_grxstsrh_s cn52xx; struct cvmx_usbcx_grxstsrh_s cn52xxp1; struct cvmx_usbcx_grxstsrh_s cn56xx; struct cvmx_usbcx_grxstsrh_s cn56xxp1; } cvmx_usbcx_grxstsrh_t; /** * cvmx_usbc#_gsnpsid * * Synopsys ID Register (GSNPSID) * * This is a read-only register that contains the release number of the core being used. */ typedef union { uint32_t u32; struct cvmx_usbcx_gsnpsid_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t synopsysid : 32; /**< 0x4F54\A, release number of the core being used. 0x4F54220A => pass1.x, 0x4F54240A => pass2.x */ #else uint32_t synopsysid : 32; #endif } s; struct cvmx_usbcx_gsnpsid_s cn30xx; struct cvmx_usbcx_gsnpsid_s cn31xx; struct cvmx_usbcx_gsnpsid_s cn50xx; struct cvmx_usbcx_gsnpsid_s cn52xx; struct cvmx_usbcx_gsnpsid_s cn52xxp1; struct cvmx_usbcx_gsnpsid_s cn56xx; struct cvmx_usbcx_gsnpsid_s cn56xxp1; } cvmx_usbcx_gsnpsid_t; /** * cvmx_usbc#_gusbcfg * * Core USB Configuration Register (GUSBCFG) * * This register can be used to configure the core after power-on or a changing to Host mode or Device mode. * It contains USB and USB-PHY related configuration parameters. The application must program this register * before starting any transactions on either the AHB or the USB. * Do not make changes to this register after the initial programming. */ typedef union { uint32_t u32; struct cvmx_usbcx_gusbcfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_17_31 : 15; uint32_t otgi2csel : 1; /**< UTMIFS or I2C Interface Select (OtgI2CSel) This bit is always 0x0. */ uint32_t phylpwrclksel : 1; /**< PHY Low-Power Clock Select (PhyLPwrClkSel) Software should set this bit to 0x0. Selects either 480-MHz or 48-MHz (low-power) PHY mode. In FS and LS modes, the PHY can usually operate on a 48-MHz clock to save power. * 1'b0: 480-MHz Internal PLL clock * 1'b1: 48-MHz External Clock In 480 MHz mode, the UTMI interface operates at either 60 or 30-MHz, depending upon whether 8- or 16-bit data width is selected. In 48-MHz mode, the UTMI interface operates at 48 MHz in FS mode and at either 48 or 6 MHz in LS mode (depending on the PHY vendor). This bit drives the utmi_fsls_low_power core output signal, and is valid only for UTMI+ PHYs. */ uint32_t reserved_14_14 : 1; uint32_t usbtrdtim : 4; /**< USB Turnaround Time (USBTrdTim) Sets the turnaround time in PHY clocks. Specifies the response time for a MAC request to the Packet FIFO Controller (PFC) to fetch data from the DFIFO (SPRAM). This must be programmed to 0x5. */ uint32_t hnpcap : 1; /**< HNP-Capable (HNPCap) This bit is always 0x0. */ uint32_t srpcap : 1; /**< SRP-Capable (SRPCap) This bit is always 0x0. */ uint32_t ddrsel : 1; /**< ULPI DDR Select (DDRSel) Software should set this bit to 0x0. */ uint32_t physel : 1; /**< USB 2.0 High-Speed PHY or USB 1.1 Full-Speed Serial Software should set this bit to 0x0. */ uint32_t fsintf : 1; /**< Full-Speed Serial Interface Select (FSIntf) Software should set this bit to 0x0. */ uint32_t ulpi_utmi_sel : 1; /**< ULPI or UTMI+ Select (ULPI_UTMI_Sel) This bit is always 0x0. */ uint32_t phyif : 1; /**< PHY Interface (PHYIf) This bit is always 0x1. */ uint32_t toutcal : 3; /**< HS/FS Timeout Calibration (TOutCal) The number of PHY clocks that the application programs in this field is added to the high-speed/full-speed interpacket timeout duration in the core to account for any additional delays introduced by the PHY. This may be required, since the delay introduced by the PHY in generating the linestate condition may vary from one PHY to another. The USB standard timeout value for high-speed operation is 736 to 816 (inclusive) bit times. The USB standard timeout value for full-speed operation is 16 to 18 (inclusive) bit times. The application must program this field based on the speed of enumeration. The number of bit times added per PHY clock are: High-speed operation: * One 30-MHz PHY clock = 16 bit times * One 60-MHz PHY clock = 8 bit times Full-speed operation: * One 30-MHz PHY clock = 0.4 bit times * One 60-MHz PHY clock = 0.2 bit times * One 48-MHz PHY clock = 0.25 bit times */ #else uint32_t toutcal : 3; uint32_t phyif : 1; uint32_t ulpi_utmi_sel : 1; uint32_t fsintf : 1; uint32_t physel : 1; uint32_t ddrsel : 1; uint32_t srpcap : 1; uint32_t hnpcap : 1; uint32_t usbtrdtim : 4; uint32_t reserved_14_14 : 1; uint32_t phylpwrclksel : 1; uint32_t otgi2csel : 1; uint32_t reserved_17_31 : 15; #endif } s; struct cvmx_usbcx_gusbcfg_s cn30xx; struct cvmx_usbcx_gusbcfg_s cn31xx; struct cvmx_usbcx_gusbcfg_s cn50xx; struct cvmx_usbcx_gusbcfg_s cn52xx; struct cvmx_usbcx_gusbcfg_s cn52xxp1; struct cvmx_usbcx_gusbcfg_s cn56xx; struct cvmx_usbcx_gusbcfg_s cn56xxp1; } cvmx_usbcx_gusbcfg_t; /** * cvmx_usbc#_haint * * Host All Channels Interrupt Register (HAINT) * * When a significant event occurs on a channel, the Host All Channels Interrupt register * interrupts the application using the Host Channels Interrupt bit of the Core Interrupt * register (GINTSTS.HChInt). This is shown in Interrupt . There is one interrupt bit per * channel, up to a maximum of 16 bits. Bits in this register are set and cleared when the * application sets and clears bits in the corresponding Host Channel-n Interrupt register. */ typedef union { uint32_t u32; struct cvmx_usbcx_haint_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_16_31 : 16; uint32_t haint : 16; /**< Channel Interrupts (HAINT) One bit per channel: Bit 0 for Channel 0, bit 15 for Channel 15 */ #else uint32_t haint : 16; uint32_t reserved_16_31 : 16; #endif } s; struct cvmx_usbcx_haint_s cn30xx; struct cvmx_usbcx_haint_s cn31xx; struct cvmx_usbcx_haint_s cn50xx; struct cvmx_usbcx_haint_s cn52xx; struct cvmx_usbcx_haint_s cn52xxp1; struct cvmx_usbcx_haint_s cn56xx; struct cvmx_usbcx_haint_s cn56xxp1; } cvmx_usbcx_haint_t; /** * cvmx_usbc#_haintmsk * * Host All Channels Interrupt Mask Register (HAINTMSK) * * The Host All Channel Interrupt Mask register works with the Host All Channel Interrupt * register to interrupt the application when an event occurs on a channel. There is one * interrupt mask bit per channel, up to a maximum of 16 bits. * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 */ typedef union { uint32_t u32; struct cvmx_usbcx_haintmsk_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_16_31 : 16; uint32_t haintmsk : 16; /**< Channel Interrupt Mask (HAINTMsk) One bit per channel: Bit 0 for channel 0, bit 15 for channel 15 */ #else uint32_t haintmsk : 16; uint32_t reserved_16_31 : 16; #endif } s; struct cvmx_usbcx_haintmsk_s cn30xx; struct cvmx_usbcx_haintmsk_s cn31xx; struct cvmx_usbcx_haintmsk_s cn50xx; struct cvmx_usbcx_haintmsk_s cn52xx; struct cvmx_usbcx_haintmsk_s cn52xxp1; struct cvmx_usbcx_haintmsk_s cn56xx; struct cvmx_usbcx_haintmsk_s cn56xxp1; } cvmx_usbcx_haintmsk_t; /** * cvmx_usbc#_hcchar# * * Host Channel-n Characteristics Register (HCCHAR) * */ typedef union { uint32_t u32; struct cvmx_usbcx_hccharx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t chena : 1; /**< Channel Enable (ChEna) This field is set by the application and cleared by the OTG host. * 1'b0: Channel disabled * 1'b1: Channel enabled */ uint32_t chdis : 1; /**< Channel Disable (ChDis) The application sets this bit to stop transmitting/receiving data on a channel, even before the transfer for that channel is complete. The application must wait for the Channel Disabled interrupt before treating the channel as disabled. */ uint32_t oddfrm : 1; /**< Odd Frame (OddFrm) This field is set (reset) by the application to indicate that the OTG host must perform a transfer in an odd (micro)frame. This field is applicable for only periodic (isochronous and interrupt) transactions. * 1'b0: Even (micro)frame * 1'b1: Odd (micro)frame */ uint32_t devaddr : 7; /**< Device Address (DevAddr) This field selects the specific device serving as the data source or sink. */ uint32_t ec : 2; /**< Multi Count (MC) / Error Count (EC) When the Split Enable bit of the Host Channel-n Split Control register (HCSPLTn.SpltEna) is reset (1'b0), this field indicates to the host the number of transactions that should be executed per microframe for this endpoint. * 2'b00: Reserved. This field yields undefined results. * 2'b01: 1 transaction * 2'b10: 2 transactions to be issued for this endpoint per microframe * 2'b11: 3 transactions to be issued for this endpoint per microframe When HCSPLTn.SpltEna is set (1'b1), this field indicates the number of immediate retries to be performed for a periodic split transactions on transaction errors. This field must be set to at least 2'b01. */ uint32_t eptype : 2; /**< Endpoint Type (EPType) Indicates the transfer type selected. * 2'b00: Control * 2'b01: Isochronous * 2'b10: Bulk * 2'b11: Interrupt */ uint32_t lspddev : 1; /**< Low-Speed Device (LSpdDev) This field is set by the application to indicate that this channel is communicating to a low-speed device. */ uint32_t reserved_16_16 : 1; uint32_t epdir : 1; /**< Endpoint Direction (EPDir) Indicates whether the transaction is IN or OUT. * 1'b0: OUT * 1'b1: IN */ uint32_t epnum : 4; /**< Endpoint Number (EPNum) Indicates the endpoint number on the device serving as the data source or sink. */ uint32_t mps : 11; /**< Maximum Packet Size (MPS) Indicates the maximum packet size of the associated endpoint. */ #else uint32_t mps : 11; uint32_t epnum : 4; uint32_t epdir : 1; uint32_t reserved_16_16 : 1; uint32_t lspddev : 1; uint32_t eptype : 2; uint32_t ec : 2; uint32_t devaddr : 7; uint32_t oddfrm : 1; uint32_t chdis : 1; uint32_t chena : 1; #endif } s; struct cvmx_usbcx_hccharx_s cn30xx; struct cvmx_usbcx_hccharx_s cn31xx; struct cvmx_usbcx_hccharx_s cn50xx; struct cvmx_usbcx_hccharx_s cn52xx; struct cvmx_usbcx_hccharx_s cn52xxp1; struct cvmx_usbcx_hccharx_s cn56xx; struct cvmx_usbcx_hccharx_s cn56xxp1; } cvmx_usbcx_hccharx_t; /** * cvmx_usbc#_hcfg * * Host Configuration Register (HCFG) * * This register configures the core after power-on. Do not make changes to this register after initializing the host. */ typedef union { uint32_t u32; struct cvmx_usbcx_hcfg_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_3_31 : 29; uint32_t fslssupp : 1; /**< FS- and LS-Only Support (FSLSSupp) The application uses this bit to control the core's enumeration speed. Using this bit, the application can make the core enumerate as a FS host, even if the connected device supports HS traffic. Do not make changes to this field after initial programming. * 1'b0: HS/FS/LS, based on the maximum speed supported by the connected device * 1'b1: FS/LS-only, even if the connected device can support HS */ uint32_t fslspclksel : 2; /**< FS/LS PHY Clock Select (FSLSPclkSel) When the core is in FS Host mode * 2'b00: PHY clock is running at 30/60 MHz * 2'b01: PHY clock is running at 48 MHz * Others: Reserved When the core is in LS Host mode * 2'b00: PHY clock is running at 30/60 MHz. When the UTMI+/ULPI PHY Low Power mode is not selected, use 30/60 MHz. * 2'b01: PHY clock is running at 48 MHz. When the UTMI+ PHY Low Power mode is selected, use 48MHz if the PHY supplies a 48 MHz clock during LS mode. * 2'b10: PHY clock is running at 6 MHz. In USB 1.1 FS mode, use 6 MHz when the UTMI+ PHY Low Power mode is selected and the PHY supplies a 6 MHz clock during LS mode. If you select a 6 MHz clock during LS mode, you must do a soft reset. * 2'b11: Reserved */ #else uint32_t fslspclksel : 2; uint32_t fslssupp : 1; uint32_t reserved_3_31 : 29; #endif } s; struct cvmx_usbcx_hcfg_s cn30xx; struct cvmx_usbcx_hcfg_s cn31xx; struct cvmx_usbcx_hcfg_s cn50xx; struct cvmx_usbcx_hcfg_s cn52xx; struct cvmx_usbcx_hcfg_s cn52xxp1; struct cvmx_usbcx_hcfg_s cn56xx; struct cvmx_usbcx_hcfg_s cn56xxp1; } cvmx_usbcx_hcfg_t; /** * cvmx_usbc#_hcint# * * Host Channel-n Interrupt Register (HCINT) * * This register indicates the status of a channel with respect to USB- and AHB-related events. * The application must read this register when the Host Channels Interrupt bit of the Core Interrupt * register (GINTSTS.HChInt) is set. Before the application can read this register, it must first read * the Host All Channels Interrupt (HAINT) register to get the exact channel number for the Host Channel-n * Interrupt register. The application must clear the appropriate bit in this register to clear the * corresponding bits in the HAINT and GINTSTS registers. */ typedef union { uint32_t u32; struct cvmx_usbcx_hcintx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_11_31 : 21; uint32_t datatglerr : 1; /**< Data Toggle Error (DataTglErr) */ uint32_t frmovrun : 1; /**< Frame Overrun (FrmOvrun) */ uint32_t bblerr : 1; /**< Babble Error (BblErr) */ uint32_t xacterr : 1; /**< Transaction Error (XactErr) */ uint32_t nyet : 1; /**< NYET Response Received Interrupt (NYET) */ uint32_t ack : 1; /**< ACK Response Received Interrupt (ACK) */ uint32_t nak : 1; /**< NAK Response Received Interrupt (NAK) */ uint32_t stall : 1; /**< STALL Response Received Interrupt (STALL) */ uint32_t ahberr : 1; /**< This bit is always 0x0. */ uint32_t chhltd : 1; /**< Channel Halted (ChHltd) Indicates the transfer completed abnormally either because of any USB transaction error or in response to disable request by the application. */ uint32_t xfercompl : 1; /**< Transfer Completed (XferCompl) Transfer completed normally without any errors. */ #else uint32_t xfercompl : 1; uint32_t chhltd : 1; uint32_t ahberr : 1; uint32_t stall : 1; uint32_t nak : 1; uint32_t ack : 1; uint32_t nyet : 1; uint32_t xacterr : 1; uint32_t bblerr : 1; uint32_t frmovrun : 1; uint32_t datatglerr : 1; uint32_t reserved_11_31 : 21; #endif } s; struct cvmx_usbcx_hcintx_s cn30xx; struct cvmx_usbcx_hcintx_s cn31xx; struct cvmx_usbcx_hcintx_s cn50xx; struct cvmx_usbcx_hcintx_s cn52xx; struct cvmx_usbcx_hcintx_s cn52xxp1; struct cvmx_usbcx_hcintx_s cn56xx; struct cvmx_usbcx_hcintx_s cn56xxp1; } cvmx_usbcx_hcintx_t; /** * cvmx_usbc#_hcintmsk# * * Host Channel-n Interrupt Mask Register (HCINTMSKn) * * This register reflects the mask for each channel status described in the previous section. * Mask interrupt: 1'b0 Unmask interrupt: 1'b1 */ typedef union { uint32_t u32; struct cvmx_usbcx_hcintmskx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_11_31 : 21; uint32_t datatglerrmsk : 1; /**< Data Toggle Error Mask (DataTglErrMsk) */ uint32_t frmovrunmsk : 1; /**< Frame Overrun Mask (FrmOvrunMsk) */ uint32_t bblerrmsk : 1; /**< Babble Error Mask (BblErrMsk) */ uint32_t xacterrmsk : 1; /**< Transaction Error Mask (XactErrMsk) */ uint32_t nyetmsk : 1; /**< NYET Response Received Interrupt Mask (NyetMsk) */ uint32_t ackmsk : 1; /**< ACK Response Received Interrupt Mask (AckMsk) */ uint32_t nakmsk : 1; /**< NAK Response Received Interrupt Mask (NakMsk) */ uint32_t stallmsk : 1; /**< STALL Response Received Interrupt Mask (StallMsk) */ uint32_t ahberrmsk : 1; /**< AHB Error Mask (AHBErrMsk) */ uint32_t chhltdmsk : 1; /**< Channel Halted Mask (ChHltdMsk) */ uint32_t xfercomplmsk : 1; /**< Transfer Completed Mask (XferComplMsk) */ #else uint32_t xfercomplmsk : 1; uint32_t chhltdmsk : 1; uint32_t ahberrmsk : 1; uint32_t stallmsk : 1; uint32_t nakmsk : 1; uint32_t ackmsk : 1; uint32_t nyetmsk : 1; uint32_t xacterrmsk : 1; uint32_t bblerrmsk : 1; uint32_t frmovrunmsk : 1; uint32_t datatglerrmsk : 1; uint32_t reserved_11_31 : 21; #endif } s; struct cvmx_usbcx_hcintmskx_s cn30xx; struct cvmx_usbcx_hcintmskx_s cn31xx; struct cvmx_usbcx_hcintmskx_s cn50xx; struct cvmx_usbcx_hcintmskx_s cn52xx; struct cvmx_usbcx_hcintmskx_s cn52xxp1; struct cvmx_usbcx_hcintmskx_s cn56xx; struct cvmx_usbcx_hcintmskx_s cn56xxp1; } cvmx_usbcx_hcintmskx_t; /** * cvmx_usbc#_hcsplt# * * Host Channel-n Split Control Register (HCSPLT) * */ typedef union { uint32_t u32; struct cvmx_usbcx_hcspltx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t spltena : 1; /**< Split Enable (SpltEna) The application sets this field to indicate that this channel is enabled to perform split transactions. */ uint32_t reserved_17_30 : 14; uint32_t compsplt : 1; /**< Do Complete Split (CompSplt) The application sets this field to request the OTG host to perform a complete split transaction. */ uint32_t xactpos : 2; /**< Transaction Position (XactPos) This field is used to determine whether to send all, first, middle, or last payloads with each OUT transaction. * 2'b11: All. This is the entire data payload is of this transaction (which is less than or equal to 188 bytes). * 2'b10: Begin. This is the first data payload of this transaction (which is larger than 188 bytes). * 2'b00: Mid. This is the middle payload of this transaction (which is larger than 188 bytes). * 2'b01: End. This is the last payload of this transaction (which is larger than 188 bytes). */ uint32_t hubaddr : 7; /**< Hub Address (HubAddr) This field holds the device address of the transaction translator's hub. */ uint32_t prtaddr : 7; /**< Port Address (PrtAddr) This field is the port number of the recipient transaction translator. */ #else uint32_t prtaddr : 7; uint32_t hubaddr : 7; uint32_t xactpos : 2; uint32_t compsplt : 1; uint32_t reserved_17_30 : 14; uint32_t spltena : 1; #endif } s; struct cvmx_usbcx_hcspltx_s cn30xx; struct cvmx_usbcx_hcspltx_s cn31xx; struct cvmx_usbcx_hcspltx_s cn50xx; struct cvmx_usbcx_hcspltx_s cn52xx; struct cvmx_usbcx_hcspltx_s cn52xxp1; struct cvmx_usbcx_hcspltx_s cn56xx; struct cvmx_usbcx_hcspltx_s cn56xxp1; } cvmx_usbcx_hcspltx_t; /** * cvmx_usbc#_hctsiz# * * Host Channel-n Transfer Size Register (HCTSIZ) * */ typedef union { uint32_t u32; struct cvmx_usbcx_hctsizx_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t dopng : 1; /**< Do Ping (DoPng) Setting this field to 1 directs the host to do PING protocol. */ uint32_t pid : 2; /**< PID (Pid) The application programs this field with the type of PID to use for the initial transaction. The host will maintain this field for the rest of the transfer. * 2'b00: DATA0 * 2'b01: DATA2 * 2'b10: DATA1 * 2'b11: MDATA (non-control)/SETUP (control) */ uint32_t pktcnt : 10; /**< Packet Count (PktCnt) This field is programmed by the application with the expected number of packets to be transmitted (OUT) or received (IN). The host decrements this count on every successful transmission or reception of an OUT/IN packet. Once this count reaches zero, the application is interrupted to indicate normal completion. */ uint32_t xfersize : 19; /**< Transfer Size (XferSize) For an OUT, this field is the number of data bytes the host will send during the transfer. For an IN, this field is the buffer size that the application has reserved for the transfer. The application is expected to program this field as an integer multiple of the maximum packet size for IN transactions (periodic and non-periodic). */ #else uint32_t xfersize : 19; uint32_t pktcnt : 10; uint32_t pid : 2; uint32_t dopng : 1; #endif } s; struct cvmx_usbcx_hctsizx_s cn30xx; struct cvmx_usbcx_hctsizx_s cn31xx; struct cvmx_usbcx_hctsizx_s cn50xx; struct cvmx_usbcx_hctsizx_s cn52xx; struct cvmx_usbcx_hctsizx_s cn52xxp1; struct cvmx_usbcx_hctsizx_s cn56xx; struct cvmx_usbcx_hctsizx_s cn56xxp1; } cvmx_usbcx_hctsizx_t; /** * cvmx_usbc#_hfir * * Host Frame Interval Register (HFIR) * * This register stores the frame interval information for the current speed to which the O2P USB core has enumerated. */ typedef union { uint32_t u32; struct cvmx_usbcx_hfir_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_16_31 : 16; uint32_t frint : 16; /**< Frame Interval (FrInt) The value that the application programs to this field specifies the interval between two consecutive SOFs (FS) or micro- SOFs (HS) or Keep-Alive tokens (HS). This field contains the number of PHY clocks that constitute the required frame interval. The default value set in this field for a FS operation when the PHY clock frequency is 60 MHz. The application can write a value to this register only after the Port Enable bit of the Host Port Control and Status register (HPRT.PrtEnaPort) has been set. If no value is programmed, the core calculates the value based on the PHY clock specified in the FS/LS PHY Clock Select field of the Host Configuration register (HCFG.FSLSPclkSel). Do not change the value of this field after the initial configuration. * 125 us (PHY clock frequency for HS) * 1 ms (PHY clock frequency for FS/LS) */ #else uint32_t frint : 16; uint32_t reserved_16_31 : 16; #endif } s; struct cvmx_usbcx_hfir_s cn30xx; struct cvmx_usbcx_hfir_s cn31xx; struct cvmx_usbcx_hfir_s cn50xx; struct cvmx_usbcx_hfir_s cn52xx; struct cvmx_usbcx_hfir_s cn52xxp1; struct cvmx_usbcx_hfir_s cn56xx; struct cvmx_usbcx_hfir_s cn56xxp1; } cvmx_usbcx_hfir_t; /** * cvmx_usbc#_hfnum * * Host Frame Number/Frame Time Remaining Register (HFNUM) * * This register indicates the current frame number. * It also indicates the time remaining (in terms of the number of PHY clocks) * in the current (micro)frame. */ typedef union { uint32_t u32; struct cvmx_usbcx_hfnum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t frrem : 16; /**< Frame Time Remaining (FrRem) Indicates the amount of time remaining in the current microframe (HS) or frame (FS/LS), in terms of PHY clocks. This field decrements on each PHY clock. When it reaches zero, this field is reloaded with the value in the Frame Interval register and a new SOF is transmitted on the USB. */ uint32_t frnum : 16; /**< Frame Number (FrNum) This field increments when a new SOF is transmitted on the USB, and is reset to 0 when it reaches 16'h3FFF. */ #else uint32_t frnum : 16; uint32_t frrem : 16; #endif } s; struct cvmx_usbcx_hfnum_s cn30xx; struct cvmx_usbcx_hfnum_s cn31xx; struct cvmx_usbcx_hfnum_s cn50xx; struct cvmx_usbcx_hfnum_s cn52xx; struct cvmx_usbcx_hfnum_s cn52xxp1; struct cvmx_usbcx_hfnum_s cn56xx; struct cvmx_usbcx_hfnum_s cn56xxp1; } cvmx_usbcx_hfnum_t; /** * cvmx_usbc#_hprt * * Host Port Control and Status Register (HPRT) * * This register is available in both Host and Device modes. * Currently, the OTG Host supports only one port. * A single register holds USB port-related information such as USB reset, enable, suspend, resume, * connect status, and test mode for each port. The R_SS_WC bits in this register can trigger an * interrupt to the application through the Host Port Interrupt bit of the Core Interrupt * register (GINTSTS.PrtInt). On a Port Interrupt, the application must read this register and clear * the bit that caused the interrupt. For the R_SS_WC bits, the application must write a 1 to the bit * to clear the interrupt. */ typedef union { uint32_t u32; struct cvmx_usbcx_hprt_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_19_31 : 13; uint32_t prtspd : 2; /**< Port Speed (PrtSpd) Indicates the speed of the device attached to this port. * 2'b00: High speed * 2'b01: Full speed * 2'b10: Low speed * 2'b11: Reserved */ uint32_t prttstctl : 4; /**< Port Test Control (PrtTstCtl) The application writes a nonzero value to this field to put the port into a Test mode, and the corresponding pattern is signaled on the port. * 4'b0000: Test mode disabled * 4'b0001: Test_J mode * 4'b0010: Test_K mode * 4'b0011: Test_SE0_NAK mode * 4'b0100: Test_Packet mode * 4'b0101: Test_Force_Enable * Others: Reserved PrtSpd must be zero (i.e. the interface must be in high-speed mode) to use the PrtTstCtl test modes. */ uint32_t prtpwr : 1; /**< Port Power (PrtPwr) The application uses this field to control power to this port, and the core clears this bit on an overcurrent condition. * 1'b0: Power off * 1'b1: Power on */ uint32_t prtlnsts : 2; /**< Port Line Status (PrtLnSts) Indicates the current logic level USB data lines * Bit [10]: Logic level of D- * Bit [11]: Logic level of D+ */ uint32_t reserved_9_9 : 1; uint32_t prtrst : 1; /**< Port Reset (PrtRst) When the application sets this bit, a reset sequence is started on this port. The application must time the reset period and clear this bit after the reset sequence is complete. * 1'b0: Port not in reset * 1'b1: Port in reset The application must leave this bit set for at least a minimum duration mentioned below to start a reset on the port. The application can leave it set for another 10 ms in addition to the required minimum duration, before clearing the bit, even though there is no maximum limit set by the USB standard. * High speed: 50 ms * Full speed/Low speed: 10 ms */ uint32_t prtsusp : 1; /**< Port Suspend (PrtSusp) The application sets this bit to put this port in Suspend mode. The core only stops sending SOFs when this is set. To stop the PHY clock, the application must set the Port Clock Stop bit, which will assert the suspend input pin of the PHY. The read value of this bit reflects the current suspend status of the port. This bit is cleared by the core after a remote wakeup signal is detected or the application sets the Port Reset bit or Port Resume bit in this register or the Resume/Remote Wakeup Detected Interrupt bit or Disconnect Detected Interrupt bit in the Core Interrupt register (GINTSTS.WkUpInt or GINTSTS.DisconnInt, respectively). * 1'b0: Port not in Suspend mode * 1'b1: Port in Suspend mode */ uint32_t prtres : 1; /**< Port Resume (PrtRes) The application sets this bit to drive resume signaling on the port. The core continues to drive the resume signal until the application clears this bit. If the core detects a USB remote wakeup sequence, as indicated by the Port Resume/Remote Wakeup Detected Interrupt bit of the Core Interrupt register (GINTSTS.WkUpInt), the core starts driving resume signaling without application intervention and clears this bit when it detects a disconnect condition. The read value of this bit indicates whether the core is currently driving resume signaling. * 1'b0: No resume driven * 1'b1: Resume driven */ uint32_t prtovrcurrchng : 1; /**< Port Overcurrent Change (PrtOvrCurrChng) The core sets this bit when the status of the Port Overcurrent Active bit (bit 4) in this register changes. */ uint32_t prtovrcurract : 1; /**< Port Overcurrent Active (PrtOvrCurrAct) Indicates the overcurrent condition of the port. * 1'b0: No overcurrent condition * 1'b1: Overcurrent condition */ uint32_t prtenchng : 1; /**< Port Enable/Disable Change (PrtEnChng) The core sets this bit when the status of the Port Enable bit [2] of this register changes. */ uint32_t prtena : 1; /**< Port Enable (PrtEna) A port is enabled only by the core after a reset sequence, and is disabled by an overcurrent condition, a disconnect condition, or by the application clearing this bit. The application cannot set this bit by a register write. It can only clear it to disable the port. This bit does not trigger any interrupt to the application. * 1'b0: Port disabled * 1'b1: Port enabled */ uint32_t prtconndet : 1; /**< Port Connect Detected (PrtConnDet) The core sets this bit when a device connection is detected to trigger an interrupt to the application using the Host Port Interrupt bit of the Core Interrupt register (GINTSTS.PrtInt). The application must write a 1 to this bit to clear the interrupt. */ uint32_t prtconnsts : 1; /**< Port Connect Status (PrtConnSts) * 0: No device is attached to the port. * 1: A device is attached to the port. */ #else uint32_t prtconnsts : 1; uint32_t prtconndet : 1; uint32_t prtena : 1; uint32_t prtenchng : 1; uint32_t prtovrcurract : 1; uint32_t prtovrcurrchng : 1; uint32_t prtres : 1; uint32_t prtsusp : 1; uint32_t prtrst : 1; uint32_t reserved_9_9 : 1; uint32_t prtlnsts : 2; uint32_t prtpwr : 1; uint32_t prttstctl : 4; uint32_t prtspd : 2; uint32_t reserved_19_31 : 13; #endif } s; struct cvmx_usbcx_hprt_s cn30xx; struct cvmx_usbcx_hprt_s cn31xx; struct cvmx_usbcx_hprt_s cn50xx; struct cvmx_usbcx_hprt_s cn52xx; struct cvmx_usbcx_hprt_s cn52xxp1; struct cvmx_usbcx_hprt_s cn56xx; struct cvmx_usbcx_hprt_s cn56xxp1; } cvmx_usbcx_hprt_t; /** * cvmx_usbc#_hptxfsiz * * Host Periodic Transmit FIFO Size Register (HPTXFSIZ) * * This register holds the size and the memory start address of the Periodic TxFIFO, as shown in Figures 310 and 311. */ typedef union { uint32_t u32; struct cvmx_usbcx_hptxfsiz_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ptxfsize : 16; /**< Host Periodic TxFIFO Depth (PTxFSize) This value is in terms of 32-bit words. * Minimum value is 16 * Maximum value is 32768 */ uint32_t ptxfstaddr : 16; /**< Host Periodic TxFIFO Start Address (PTxFStAddr) */ #else uint32_t ptxfstaddr : 16; uint32_t ptxfsize : 16; #endif } s; struct cvmx_usbcx_hptxfsiz_s cn30xx; struct cvmx_usbcx_hptxfsiz_s cn31xx; struct cvmx_usbcx_hptxfsiz_s cn50xx; struct cvmx_usbcx_hptxfsiz_s cn52xx; struct cvmx_usbcx_hptxfsiz_s cn52xxp1; struct cvmx_usbcx_hptxfsiz_s cn56xx; struct cvmx_usbcx_hptxfsiz_s cn56xxp1; } cvmx_usbcx_hptxfsiz_t; /** * cvmx_usbc#_hptxsts * * Host Periodic Transmit FIFO/Queue Status Register (HPTXSTS) * * This read-only register contains the free space information for the Periodic TxFIFO and * the Periodic Transmit Request Queue */ typedef union { uint32_t u32; struct cvmx_usbcx_hptxsts_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t ptxqtop : 8; /**< Top of the Periodic Transmit Request Queue (PTxQTop) This indicates the entry in the Periodic Tx Request Queue that is currently being processes by the MAC. This register is used for debugging. * Bit [31]: Odd/Even (micro)frame - 1'b0: send in even (micro)frame - 1'b1: send in odd (micro)frame * Bits [30:27]: Channel/endpoint number * Bits [26:25]: Type - 2'b00: IN/OUT - 2'b01: Zero-length packet - 2'b10: CSPLIT - 2'b11: Disable channel command * Bit [24]: Terminate (last entry for the selected channel/endpoint) */ uint32_t ptxqspcavail : 8; /**< Periodic Transmit Request Queue Space Available (PTxQSpcAvail) Indicates the number of free locations available to be written in the Periodic Transmit Request Queue. This queue holds both IN and OUT requests. * 8'h0: Periodic Transmit Request Queue is full * 8'h1: 1 location available * 8'h2: 2 locations available * n: n locations available (0..8) * Others: Reserved */ uint32_t ptxfspcavail : 16; /**< Periodic Transmit Data FIFO Space Available (PTxFSpcAvail) Indicates the number of free locations available to be written to in the Periodic TxFIFO. Values are in terms of 32-bit words * 16'h0: Periodic TxFIFO is full * 16'h1: 1 word available * 16'h2: 2 words available * 16'hn: n words available (where 0..32768) * 16'h8000: 32768 words available * Others: Reserved */ #else uint32_t ptxfspcavail : 16; uint32_t ptxqspcavail : 8; uint32_t ptxqtop : 8; #endif } s; struct cvmx_usbcx_hptxsts_s cn30xx; struct cvmx_usbcx_hptxsts_s cn31xx; struct cvmx_usbcx_hptxsts_s cn50xx; struct cvmx_usbcx_hptxsts_s cn52xx; struct cvmx_usbcx_hptxsts_s cn52xxp1; struct cvmx_usbcx_hptxsts_s cn56xx; struct cvmx_usbcx_hptxsts_s cn56xxp1; } cvmx_usbcx_hptxsts_t; /** * cvmx_usbc#_nptxdfifo# * * NPTX Data Fifo (NPTXDFIFO) * * A slave mode application uses this register to access the Tx FIFO for channel n. */ typedef union { uint32_t u32; struct cvmx_usbcx_nptxdfifox_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t data : 32; /**< Reserved */ #else uint32_t data : 32; #endif } s; struct cvmx_usbcx_nptxdfifox_s cn30xx; struct cvmx_usbcx_nptxdfifox_s cn31xx; struct cvmx_usbcx_nptxdfifox_s cn50xx; struct cvmx_usbcx_nptxdfifox_s cn52xx; struct cvmx_usbcx_nptxdfifox_s cn52xxp1; struct cvmx_usbcx_nptxdfifox_s cn56xx; struct cvmx_usbcx_nptxdfifox_s cn56xxp1; } cvmx_usbcx_nptxdfifox_t; /** * cvmx_usbc#_pcgcctl * * Power and Clock Gating Control Register (PCGCCTL) * * The application can use this register to control the core's power-down and clock gating features. */ typedef union { uint32_t u32; struct cvmx_usbcx_pcgcctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint32_t reserved_5_31 : 27; uint32_t physuspended : 1; /**< PHY Suspended. (PhySuspended) Indicates that the PHY has been suspended. After the application sets the Stop Pclk bit (bit 0), this bit is updated once the PHY is suspended. Since the UTMI+ PHY suspend is controlled through a port, the UTMI+ PHY is suspended immediately after Stop Pclk is set. However, the ULPI PHY takes a few clocks to suspend, because the suspend information is conveyed through the ULPI protocol to the ULPI PHY. */ uint32_t rstpdwnmodule : 1; /**< Reset Power-Down Modules (RstPdwnModule) This bit is valid only in Partial Power-Down mode. The application sets this bit when the power is turned off. The application clears this bit after the power is turned on and the PHY clock is up. */ uint32_t pwrclmp : 1; /**< Power Clamp (PwrClmp) This bit is only valid in Partial Power-Down mode. The application sets this bit before the power is turned off to clamp the signals between the power-on modules and the power-off modules. The application clears the bit to disable the clamping before the power is turned on. */ uint32_t gatehclk : 1; /**< Gate Hclk (GateHclk) The application sets this bit to gate hclk to modules other than the AHB Slave and Master and wakeup logic when the USB is suspended or the session is not valid. The application clears this bit when the USB is resumed or a new session starts. */ uint32_t stoppclk : 1; /**< Stop Pclk (StopPclk) The application sets this bit to stop the PHY clock (phy_clk) when the USB is suspended, the session is not valid, or the device is disconnected. The application clears this bit when the USB is resumed or a new session starts. */ #else uint32_t stoppclk : 1; uint32_t gatehclk : 1; uint32_t pwrclmp : 1; uint32_t rstpdwnmodule : 1; uint32_t physuspended : 1; uint32_t reserved_5_31 : 27; #endif } s; struct cvmx_usbcx_pcgcctl_s cn30xx; struct cvmx_usbcx_pcgcctl_s cn31xx; struct cvmx_usbcx_pcgcctl_s cn50xx; struct cvmx_usbcx_pcgcctl_s cn52xx; struct cvmx_usbcx_pcgcctl_s cn52xxp1; struct cvmx_usbcx_pcgcctl_s cn56xx; struct cvmx_usbcx_pcgcctl_s cn56xxp1; } cvmx_usbcx_pcgcctl_t; /** * cvmx_usbn#_bist_status * * USBN_BIST_STATUS = USBN's Control and Status * * Contain general control bits and status information for the USBN. */ typedef union { uint64_t u64; struct cvmx_usbnx_bist_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_7_63 : 57; uint64_t u2nc_bis : 1; /**< Bist status U2N CTL FIFO Memory. */ uint64_t u2nf_bis : 1; /**< Bist status U2N FIFO Memory. */ uint64_t e2hc_bis : 1; /**< Bist status E2H CTL FIFO Memory. */ uint64_t n2uf_bis : 1; /**< Bist status N2U FIFO Memory. */ uint64_t usbc_bis : 1; /**< Bist status USBC FIFO Memory. */ uint64_t nif_bis : 1; /**< Bist status for Inbound Memory. */ uint64_t nof_bis : 1; /**< Bist status for Outbound Memory. */ #else uint64_t nof_bis : 1; uint64_t nif_bis : 1; uint64_t usbc_bis : 1; uint64_t n2uf_bis : 1; uint64_t e2hc_bis : 1; uint64_t u2nf_bis : 1; uint64_t u2nc_bis : 1; uint64_t reserved_7_63 : 57; #endif } s; struct cvmx_usbnx_bist_status_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_3_63 : 61; uint64_t usbc_bis : 1; /**< Bist status USBC FIFO Memory. */ uint64_t nif_bis : 1; /**< Bist status for Inbound Memory. */ uint64_t nof_bis : 1; /**< Bist status for Outbound Memory. */ #else uint64_t nof_bis : 1; uint64_t nif_bis : 1; uint64_t usbc_bis : 1; uint64_t reserved_3_63 : 61; #endif } cn30xx; struct cvmx_usbnx_bist_status_cn30xx cn31xx; struct cvmx_usbnx_bist_status_s cn50xx; struct cvmx_usbnx_bist_status_s cn52xx; struct cvmx_usbnx_bist_status_s cn52xxp1; struct cvmx_usbnx_bist_status_s cn56xx; struct cvmx_usbnx_bist_status_s cn56xxp1; } cvmx_usbnx_bist_status_t; /** * cvmx_usbn#_clk_ctl * * USBN_CLK_CTL = USBN's Clock Control * * This register is used to control the frequency of the hclk and the hreset and phy_rst signals. */ typedef union { uint64_t u64; struct cvmx_usbnx_clk_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t divide2 : 2; /**< The 'hclk' used by the USB subsystem is derived from the eclk. Also see the field DIVIDE. DIVIDE2<1> must currently be zero because it is not implemented, so the maximum ratio of eclk/hclk is currently 16. The actual divide number for hclk is: (DIVIDE2 + 1) * (DIVIDE + 1) */ uint64_t hclk_rst : 1; /**< When this field is '0' the HCLK-DIVIDER used to generate the hclk in the USB Subsystem is held in reset. This bit must be set to '0' before changing the value os DIVIDE in this register. The reset to the HCLK_DIVIDERis also asserted when core reset is asserted. */ uint64_t p_x_on : 1; /**< Force USB-PHY on during suspend. '1' USB-PHY XO block is powered-down during suspend. '0' USB-PHY XO block is powered-up during suspend. The value of this field must be set while POR is active. */ uint64_t reserved_14_15 : 2; uint64_t p_com_on : 1; /**< '0' Force USB-PHY XO Bias, Bandgap and PLL to remain powered in Suspend Mode. '1' The USB-PHY XO Bias, Bandgap and PLL are powered down in suspend mode. The value of this field must be set while POR is active. */ uint64_t p_c_sel : 2; /**< Phy clock speed select. Selects the reference clock / crystal frequency. '11': Reserved '10': 48 MHz (reserved when a crystal is used) '01': 24 MHz (reserved when a crystal is used) '00': 12 MHz The value of this field must be set while POR is active. NOTE: if a crystal is used as a reference clock, this field must be set to 12 MHz. */ uint64_t cdiv_byp : 1; /**< Used to enable the bypass input to the USB_CLK_DIV. */ uint64_t sd_mode : 2; /**< Scaledown mode for the USBC. Control timing events in the USBC, for normal operation this must be '0'. */ uint64_t s_bist : 1; /**< Starts bist on the hclk memories, during the '0' to '1' transition. */ uint64_t por : 1; /**< Power On Reset for the PHY. Resets all the PHYS registers and state machines. */ uint64_t enable : 1; /**< When '1' allows the generation of the hclk. When '0' the hclk will not be generated. SEE DIVIDE field of this register. */ uint64_t prst : 1; /**< When this field is '0' the reset associated with the phy_clk functionality in the USB Subsystem is help in reset. This bit should not be set to '1' until the time it takes 6 clocks (hclk or phy_clk, whichever is slower) has passed. Under normal operation once this bit is set to '1' it should not be set to '0'. */ uint64_t hrst : 1; /**< When this field is '0' the reset associated with the hclk functioanlity in the USB Subsystem is held in reset.This bit should not be set to '1' until 12ms after phy_clk is stable. Under normal operation, once this bit is set to '1' it should not be set to '0'. */ uint64_t divide : 3; /**< The frequency of 'hclk' used by the USB subsystem is the eclk frequency divided by the value of (DIVIDE2 + 1) * (DIVIDE + 1), also see the field DIVIDE2 of this register. The hclk frequency should be less than 125Mhz. After writing a value to this field the SW should read the field for the value written. The ENABLE field of this register should not be set until AFTER this field is set and then read. */ #else uint64_t divide : 3; uint64_t hrst : 1; uint64_t prst : 1; uint64_t enable : 1; uint64_t por : 1; uint64_t s_bist : 1; uint64_t sd_mode : 2; uint64_t cdiv_byp : 1; uint64_t p_c_sel : 2; uint64_t p_com_on : 1; uint64_t reserved_14_15 : 2; uint64_t p_x_on : 1; uint64_t hclk_rst : 1; uint64_t divide2 : 2; uint64_t reserved_20_63 : 44; #endif } s; struct cvmx_usbnx_clk_ctl_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_18_63 : 46; uint64_t hclk_rst : 1; /**< When this field is '0' the HCLK-DIVIDER used to generate the hclk in the USB Subsystem is held in reset. This bit must be set to '0' before changing the value os DIVIDE in this register. The reset to the HCLK_DIVIDERis also asserted when core reset is asserted. */ uint64_t p_x_on : 1; /**< Force USB-PHY on during suspend. '1' USB-PHY XO block is powered-down during suspend. '0' USB-PHY XO block is powered-up during suspend. The value of this field must be set while POR is active. */ uint64_t p_rclk : 1; /**< Phy refrence clock enable. '1' The PHY PLL uses the XO block output as a reference. '0' Reserved. */ uint64_t p_xenbn : 1; /**< Phy external clock enable. '1' The XO block uses the clock from a crystal. '0' The XO block uses an external clock supplied on the XO pin. USB_XI should be tied to ground for this usage. */ uint64_t p_com_on : 1; /**< '0' Force USB-PHY XO Bias, Bandgap and PLL to remain powered in Suspend Mode. '1' The USB-PHY XO Bias, Bandgap and PLL are powered down in suspend mode. The value of this field must be set while POR is active. */ uint64_t p_c_sel : 2; /**< Phy clock speed select. Selects the reference clock / crystal frequency. '11': Reserved '10': 48 MHz '01': 24 MHz '00': 12 MHz The value of this field must be set while POR is active. */ uint64_t cdiv_byp : 1; /**< Used to enable the bypass input to the USB_CLK_DIV. */ uint64_t sd_mode : 2; /**< Scaledown mode for the USBC. Control timing events in the USBC, for normal operation this must be '0'. */ uint64_t s_bist : 1; /**< Starts bist on the hclk memories, during the '0' to '1' transition. */ uint64_t por : 1; /**< Power On Reset for the PHY. Resets all the PHYS registers and state machines. */ uint64_t enable : 1; /**< When '1' allows the generation of the hclk. When '0' the hclk will not be generated. */ uint64_t prst : 1; /**< When this field is '0' the reset associated with the phy_clk functionality in the USB Subsystem is help in reset. This bit should not be set to '1' until the time it takes 6 clocks (hclk or phy_clk, whichever is slower) has passed. Under normal operation once this bit is set to '1' it should not be set to '0'. */ uint64_t hrst : 1; /**< When this field is '0' the reset associated with the hclk functioanlity in the USB Subsystem is held in reset.This bit should not be set to '1' until 12ms after phy_clk is stable. Under normal operation, once this bit is set to '1' it should not be set to '0'. */ uint64_t divide : 3; /**< The 'hclk' used by the USB subsystem is derived from the eclk. The eclk will be divided by the value of this field +1 to determine the hclk frequency. (Also see HRST of this register). The hclk frequency must be less than 125 MHz. */ #else uint64_t divide : 3; uint64_t hrst : 1; uint64_t prst : 1; uint64_t enable : 1; uint64_t por : 1; uint64_t s_bist : 1; uint64_t sd_mode : 2; uint64_t cdiv_byp : 1; uint64_t p_c_sel : 2; uint64_t p_com_on : 1; uint64_t p_xenbn : 1; uint64_t p_rclk : 1; uint64_t p_x_on : 1; uint64_t hclk_rst : 1; uint64_t reserved_18_63 : 46; #endif } cn30xx; struct cvmx_usbnx_clk_ctl_cn30xx cn31xx; struct cvmx_usbnx_clk_ctl_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_20_63 : 44; uint64_t divide2 : 2; /**< The 'hclk' used by the USB subsystem is derived from the eclk. Also see the field DIVIDE. DIVIDE2<1> must currently be zero because it is not implemented, so the maximum ratio of eclk/hclk is currently 16. The actual divide number for hclk is: (DIVIDE2 + 1) * (DIVIDE + 1) */ uint64_t hclk_rst : 1; /**< When this field is '0' the HCLK-DIVIDER used to generate the hclk in the USB Subsystem is held in reset. This bit must be set to '0' before changing the value os DIVIDE in this register. The reset to the HCLK_DIVIDERis also asserted when core reset is asserted. */ uint64_t reserved_16_16 : 1; uint64_t p_rtype : 2; /**< PHY reference clock type '0' The USB-PHY uses a 12MHz crystal as a clock source at the USB_XO and USB_XI pins '1' Reserved '2' The USB_PHY uses 12/24/48MHz 2.5V board clock at the USB_XO pin. USB_XI should be tied to ground in this case. '3' Reserved (bit 14 was P_XENBN on 3xxx) (bit 15 was P_RCLK on 3xxx) */ uint64_t p_com_on : 1; /**< '0' Force USB-PHY XO Bias, Bandgap and PLL to remain powered in Suspend Mode. '1' The USB-PHY XO Bias, Bandgap and PLL are powered down in suspend mode. The value of this field must be set while POR is active. */ uint64_t p_c_sel : 2; /**< Phy clock speed select. Selects the reference clock / crystal frequency. '11': Reserved '10': 48 MHz (reserved when a crystal is used) '01': 24 MHz (reserved when a crystal is used) '00': 12 MHz The value of this field must be set while POR is active. NOTE: if a crystal is used as a reference clock, this field must be set to 12 MHz. */ uint64_t cdiv_byp : 1; /**< Used to enable the bypass input to the USB_CLK_DIV. */ uint64_t sd_mode : 2; /**< Scaledown mode for the USBC. Control timing events in the USBC, for normal operation this must be '0'. */ uint64_t s_bist : 1; /**< Starts bist on the hclk memories, during the '0' to '1' transition. */ uint64_t por : 1; /**< Power On Reset for the PHY. Resets all the PHYS registers and state machines. */ uint64_t enable : 1; /**< When '1' allows the generation of the hclk. When '0' the hclk will not be generated. SEE DIVIDE field of this register. */ uint64_t prst : 1; /**< When this field is '0' the reset associated with the phy_clk functionality in the USB Subsystem is help in reset. This bit should not be set to '1' until the time it takes 6 clocks (hclk or phy_clk, whichever is slower) has passed. Under normal operation once this bit is set to '1' it should not be set to '0'. */ uint64_t hrst : 1; /**< When this field is '0' the reset associated with the hclk functioanlity in the USB Subsystem is held in reset.This bit should not be set to '1' until 12ms after phy_clk is stable. Under normal operation, once this bit is set to '1' it should not be set to '0'. */ uint64_t divide : 3; /**< The frequency of 'hclk' used by the USB subsystem is the eclk frequency divided by the value of (DIVIDE2 + 1) * (DIVIDE + 1), also see the field DIVIDE2 of this register. The hclk frequency should be less than 125Mhz. After writing a value to this field the SW should read the field for the value written. The ENABLE field of this register should not be set until AFTER this field is set and then read. */ #else uint64_t divide : 3; uint64_t hrst : 1; uint64_t prst : 1; uint64_t enable : 1; uint64_t por : 1; uint64_t s_bist : 1; uint64_t sd_mode : 2; uint64_t cdiv_byp : 1; uint64_t p_c_sel : 2; uint64_t p_com_on : 1; uint64_t p_rtype : 2; uint64_t reserved_16_16 : 1; uint64_t hclk_rst : 1; uint64_t divide2 : 2; uint64_t reserved_20_63 : 44; #endif } cn50xx; struct cvmx_usbnx_clk_ctl_cn50xx cn52xx; struct cvmx_usbnx_clk_ctl_cn50xx cn52xxp1; struct cvmx_usbnx_clk_ctl_cn50xx cn56xx; struct cvmx_usbnx_clk_ctl_cn50xx cn56xxp1; } cvmx_usbnx_clk_ctl_t; /** * cvmx_usbn#_ctl_status * * USBN_CTL_STATUS = USBN's Control And Status Register * * Contains general control and status information for the USBN block. */ typedef union { uint64_t u64; struct cvmx_usbnx_ctl_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_6_63 : 58; uint64_t dma_0pag : 1; /**< When '1' sets the DMA engine will set the zero-Page bit in the L2C store operation to the IOB. */ uint64_t dma_stt : 1; /**< When '1' sets the DMA engine to use STT operations. */ uint64_t dma_test : 1; /**< When '1' sets the DMA engine into Test-Mode. For normal operation this bit should be '0'. */ uint64_t inv_a2 : 1; /**< When '1' causes the address[2] driven on the AHB for USB-CORE FIFO access to be inverted. Also data writen to and read from the AHB will have it byte order swapped. If the orginal order was A-B-C-D the new byte order will be D-C-B-A. */ uint64_t l2c_emod : 2; /**< Endian format for data from/to the L2C. IN: A-B-C-D-E-F-G-H OUT0: A-B-C-D-E-F-G-H OUT1: H-G-F-E-D-C-B-A OUT2: D-C-B-A-H-G-F-E OUT3: E-F-G-H-A-B-C-D */ #else uint64_t l2c_emod : 2; uint64_t inv_a2 : 1; uint64_t dma_test : 1; uint64_t dma_stt : 1; uint64_t dma_0pag : 1; uint64_t reserved_6_63 : 58; #endif } s; struct cvmx_usbnx_ctl_status_s cn30xx; struct cvmx_usbnx_ctl_status_s cn31xx; struct cvmx_usbnx_ctl_status_s cn50xx; struct cvmx_usbnx_ctl_status_s cn52xx; struct cvmx_usbnx_ctl_status_s cn52xxp1; struct cvmx_usbnx_ctl_status_s cn56xx; struct cvmx_usbnx_ctl_status_s cn56xxp1; } cvmx_usbnx_ctl_status_t; /** * cvmx_usbn#_dma0_inb_chn0 * * USBN_DMA0_INB_CHN0 = USBN's Inbound DMA for USB0 Channel0 * * Contains the starting address for use when USB0 writes to L2C via Channel0. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_inb_chn0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Write to L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_inb_chn0_s cn30xx; struct cvmx_usbnx_dma0_inb_chn0_s cn31xx; struct cvmx_usbnx_dma0_inb_chn0_s cn50xx; struct cvmx_usbnx_dma0_inb_chn0_s cn52xx; struct cvmx_usbnx_dma0_inb_chn0_s cn52xxp1; struct cvmx_usbnx_dma0_inb_chn0_s cn56xx; struct cvmx_usbnx_dma0_inb_chn0_s cn56xxp1; } cvmx_usbnx_dma0_inb_chn0_t; /** * cvmx_usbn#_dma0_inb_chn1 * * USBN_DMA0_INB_CHN1 = USBN's Inbound DMA for USB0 Channel1 * * Contains the starting address for use when USB0 writes to L2C via Channel1. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_inb_chn1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Write to L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_inb_chn1_s cn30xx; struct cvmx_usbnx_dma0_inb_chn1_s cn31xx; struct cvmx_usbnx_dma0_inb_chn1_s cn50xx; struct cvmx_usbnx_dma0_inb_chn1_s cn52xx; struct cvmx_usbnx_dma0_inb_chn1_s cn52xxp1; struct cvmx_usbnx_dma0_inb_chn1_s cn56xx; struct cvmx_usbnx_dma0_inb_chn1_s cn56xxp1; } cvmx_usbnx_dma0_inb_chn1_t; /** * cvmx_usbn#_dma0_inb_chn2 * * USBN_DMA0_INB_CHN2 = USBN's Inbound DMA for USB0 Channel2 * * Contains the starting address for use when USB0 writes to L2C via Channel2. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_inb_chn2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Write to L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_inb_chn2_s cn30xx; struct cvmx_usbnx_dma0_inb_chn2_s cn31xx; struct cvmx_usbnx_dma0_inb_chn2_s cn50xx; struct cvmx_usbnx_dma0_inb_chn2_s cn52xx; struct cvmx_usbnx_dma0_inb_chn2_s cn52xxp1; struct cvmx_usbnx_dma0_inb_chn2_s cn56xx; struct cvmx_usbnx_dma0_inb_chn2_s cn56xxp1; } cvmx_usbnx_dma0_inb_chn2_t; /** * cvmx_usbn#_dma0_inb_chn3 * * USBN_DMA0_INB_CHN3 = USBN's Inbound DMA for USB0 Channel3 * * Contains the starting address for use when USB0 writes to L2C via Channel3. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_inb_chn3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Write to L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_inb_chn3_s cn30xx; struct cvmx_usbnx_dma0_inb_chn3_s cn31xx; struct cvmx_usbnx_dma0_inb_chn3_s cn50xx; struct cvmx_usbnx_dma0_inb_chn3_s cn52xx; struct cvmx_usbnx_dma0_inb_chn3_s cn52xxp1; struct cvmx_usbnx_dma0_inb_chn3_s cn56xx; struct cvmx_usbnx_dma0_inb_chn3_s cn56xxp1; } cvmx_usbnx_dma0_inb_chn3_t; /** * cvmx_usbn#_dma0_inb_chn4 * * USBN_DMA0_INB_CHN4 = USBN's Inbound DMA for USB0 Channel4 * * Contains the starting address for use when USB0 writes to L2C via Channel4. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_inb_chn4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Write to L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_inb_chn4_s cn30xx; struct cvmx_usbnx_dma0_inb_chn4_s cn31xx; struct cvmx_usbnx_dma0_inb_chn4_s cn50xx; struct cvmx_usbnx_dma0_inb_chn4_s cn52xx; struct cvmx_usbnx_dma0_inb_chn4_s cn52xxp1; struct cvmx_usbnx_dma0_inb_chn4_s cn56xx; struct cvmx_usbnx_dma0_inb_chn4_s cn56xxp1; } cvmx_usbnx_dma0_inb_chn4_t; /** * cvmx_usbn#_dma0_inb_chn5 * * USBN_DMA0_INB_CHN5 = USBN's Inbound DMA for USB0 Channel5 * * Contains the starting address for use when USB0 writes to L2C via Channel5. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_inb_chn5_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Write to L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_inb_chn5_s cn30xx; struct cvmx_usbnx_dma0_inb_chn5_s cn31xx; struct cvmx_usbnx_dma0_inb_chn5_s cn50xx; struct cvmx_usbnx_dma0_inb_chn5_s cn52xx; struct cvmx_usbnx_dma0_inb_chn5_s cn52xxp1; struct cvmx_usbnx_dma0_inb_chn5_s cn56xx; struct cvmx_usbnx_dma0_inb_chn5_s cn56xxp1; } cvmx_usbnx_dma0_inb_chn5_t; /** * cvmx_usbn#_dma0_inb_chn6 * * USBN_DMA0_INB_CHN6 = USBN's Inbound DMA for USB0 Channel6 * * Contains the starting address for use when USB0 writes to L2C via Channel6. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_inb_chn6_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Write to L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_inb_chn6_s cn30xx; struct cvmx_usbnx_dma0_inb_chn6_s cn31xx; struct cvmx_usbnx_dma0_inb_chn6_s cn50xx; struct cvmx_usbnx_dma0_inb_chn6_s cn52xx; struct cvmx_usbnx_dma0_inb_chn6_s cn52xxp1; struct cvmx_usbnx_dma0_inb_chn6_s cn56xx; struct cvmx_usbnx_dma0_inb_chn6_s cn56xxp1; } cvmx_usbnx_dma0_inb_chn6_t; /** * cvmx_usbn#_dma0_inb_chn7 * * USBN_DMA0_INB_CHN7 = USBN's Inbound DMA for USB0 Channel7 * * Contains the starting address for use when USB0 writes to L2C via Channel7. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_inb_chn7_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Write to L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_inb_chn7_s cn30xx; struct cvmx_usbnx_dma0_inb_chn7_s cn31xx; struct cvmx_usbnx_dma0_inb_chn7_s cn50xx; struct cvmx_usbnx_dma0_inb_chn7_s cn52xx; struct cvmx_usbnx_dma0_inb_chn7_s cn52xxp1; struct cvmx_usbnx_dma0_inb_chn7_s cn56xx; struct cvmx_usbnx_dma0_inb_chn7_s cn56xxp1; } cvmx_usbnx_dma0_inb_chn7_t; /** * cvmx_usbn#_dma0_outb_chn0 * * USBN_DMA0_OUTB_CHN0 = USBN's Outbound DMA for USB0 Channel0 * * Contains the starting address for use when USB0 reads from L2C via Channel0. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_outb_chn0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Read from L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_outb_chn0_s cn30xx; struct cvmx_usbnx_dma0_outb_chn0_s cn31xx; struct cvmx_usbnx_dma0_outb_chn0_s cn50xx; struct cvmx_usbnx_dma0_outb_chn0_s cn52xx; struct cvmx_usbnx_dma0_outb_chn0_s cn52xxp1; struct cvmx_usbnx_dma0_outb_chn0_s cn56xx; struct cvmx_usbnx_dma0_outb_chn0_s cn56xxp1; } cvmx_usbnx_dma0_outb_chn0_t; /** * cvmx_usbn#_dma0_outb_chn1 * * USBN_DMA0_OUTB_CHN1 = USBN's Outbound DMA for USB0 Channel1 * * Contains the starting address for use when USB0 reads from L2C via Channel1. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_outb_chn1_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Read from L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_outb_chn1_s cn30xx; struct cvmx_usbnx_dma0_outb_chn1_s cn31xx; struct cvmx_usbnx_dma0_outb_chn1_s cn50xx; struct cvmx_usbnx_dma0_outb_chn1_s cn52xx; struct cvmx_usbnx_dma0_outb_chn1_s cn52xxp1; struct cvmx_usbnx_dma0_outb_chn1_s cn56xx; struct cvmx_usbnx_dma0_outb_chn1_s cn56xxp1; } cvmx_usbnx_dma0_outb_chn1_t; /** * cvmx_usbn#_dma0_outb_chn2 * * USBN_DMA0_OUTB_CHN2 = USBN's Outbound DMA for USB0 Channel2 * * Contains the starting address for use when USB0 reads from L2C via Channel2. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_outb_chn2_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Read from L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_outb_chn2_s cn30xx; struct cvmx_usbnx_dma0_outb_chn2_s cn31xx; struct cvmx_usbnx_dma0_outb_chn2_s cn50xx; struct cvmx_usbnx_dma0_outb_chn2_s cn52xx; struct cvmx_usbnx_dma0_outb_chn2_s cn52xxp1; struct cvmx_usbnx_dma0_outb_chn2_s cn56xx; struct cvmx_usbnx_dma0_outb_chn2_s cn56xxp1; } cvmx_usbnx_dma0_outb_chn2_t; /** * cvmx_usbn#_dma0_outb_chn3 * * USBN_DMA0_OUTB_CHN3 = USBN's Outbound DMA for USB0 Channel3 * * Contains the starting address for use when USB0 reads from L2C via Channel3. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_outb_chn3_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Read from L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_outb_chn3_s cn30xx; struct cvmx_usbnx_dma0_outb_chn3_s cn31xx; struct cvmx_usbnx_dma0_outb_chn3_s cn50xx; struct cvmx_usbnx_dma0_outb_chn3_s cn52xx; struct cvmx_usbnx_dma0_outb_chn3_s cn52xxp1; struct cvmx_usbnx_dma0_outb_chn3_s cn56xx; struct cvmx_usbnx_dma0_outb_chn3_s cn56xxp1; } cvmx_usbnx_dma0_outb_chn3_t; /** * cvmx_usbn#_dma0_outb_chn4 * * USBN_DMA0_OUTB_CHN4 = USBN's Outbound DMA for USB0 Channel4 * * Contains the starting address for use when USB0 reads from L2C via Channel4. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_outb_chn4_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Read from L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_outb_chn4_s cn30xx; struct cvmx_usbnx_dma0_outb_chn4_s cn31xx; struct cvmx_usbnx_dma0_outb_chn4_s cn50xx; struct cvmx_usbnx_dma0_outb_chn4_s cn52xx; struct cvmx_usbnx_dma0_outb_chn4_s cn52xxp1; struct cvmx_usbnx_dma0_outb_chn4_s cn56xx; struct cvmx_usbnx_dma0_outb_chn4_s cn56xxp1; } cvmx_usbnx_dma0_outb_chn4_t; /** * cvmx_usbn#_dma0_outb_chn5 * * USBN_DMA0_OUTB_CHN5 = USBN's Outbound DMA for USB0 Channel5 * * Contains the starting address for use when USB0 reads from L2C via Channel5. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_outb_chn5_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Read from L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_outb_chn5_s cn30xx; struct cvmx_usbnx_dma0_outb_chn5_s cn31xx; struct cvmx_usbnx_dma0_outb_chn5_s cn50xx; struct cvmx_usbnx_dma0_outb_chn5_s cn52xx; struct cvmx_usbnx_dma0_outb_chn5_s cn52xxp1; struct cvmx_usbnx_dma0_outb_chn5_s cn56xx; struct cvmx_usbnx_dma0_outb_chn5_s cn56xxp1; } cvmx_usbnx_dma0_outb_chn5_t; /** * cvmx_usbn#_dma0_outb_chn6 * * USBN_DMA0_OUTB_CHN6 = USBN's Outbound DMA for USB0 Channel6 * * Contains the starting address for use when USB0 reads from L2C via Channel6. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_outb_chn6_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Read from L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_outb_chn6_s cn30xx; struct cvmx_usbnx_dma0_outb_chn6_s cn31xx; struct cvmx_usbnx_dma0_outb_chn6_s cn50xx; struct cvmx_usbnx_dma0_outb_chn6_s cn52xx; struct cvmx_usbnx_dma0_outb_chn6_s cn52xxp1; struct cvmx_usbnx_dma0_outb_chn6_s cn56xx; struct cvmx_usbnx_dma0_outb_chn6_s cn56xxp1; } cvmx_usbnx_dma0_outb_chn6_t; /** * cvmx_usbn#_dma0_outb_chn7 * * USBN_DMA0_OUTB_CHN7 = USBN's Outbound DMA for USB0 Channel7 * * Contains the starting address for use when USB0 reads from L2C via Channel7. * Writing of this register sets the base address. */ typedef union { uint64_t u64; struct cvmx_usbnx_dma0_outb_chn7_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_36_63 : 28; uint64_t addr : 36; /**< Base address for DMA Read from L2C. */ #else uint64_t addr : 36; uint64_t reserved_36_63 : 28; #endif } s; struct cvmx_usbnx_dma0_outb_chn7_s cn30xx; struct cvmx_usbnx_dma0_outb_chn7_s cn31xx; struct cvmx_usbnx_dma0_outb_chn7_s cn50xx; struct cvmx_usbnx_dma0_outb_chn7_s cn52xx; struct cvmx_usbnx_dma0_outb_chn7_s cn52xxp1; struct cvmx_usbnx_dma0_outb_chn7_s cn56xx; struct cvmx_usbnx_dma0_outb_chn7_s cn56xxp1; } cvmx_usbnx_dma0_outb_chn7_t; /** * cvmx_usbn#_dma_test * * USBN_DMA_TEST = USBN's DMA TestRegister * * This register can cause the external DMA engine to the USB-Core to make transfers from/to L2C/USB-FIFOs */ typedef union { uint64_t u64; struct cvmx_usbnx_dma_test_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_40_63 : 24; uint64_t done : 1; /**< This field is set when a DMA completes. Writing a '1' to this field clears this bit. */ uint64_t req : 1; /**< DMA Request. Writing a 1 to this register will cause a DMA request as specified in the other fields of this register to take place. This field will always read as '0'. */ uint64_t f_addr : 18; /**< The address to read from in the Data-Fifo. */ uint64_t count : 11; /**< DMA Request Count. */ uint64_t channel : 5; /**< DMA Channel/Enpoint. */ uint64_t burst : 4; /**< DMA Burst Size. */ #else uint64_t burst : 4; uint64_t channel : 5; uint64_t count : 11; uint64_t f_addr : 18; uint64_t req : 1; uint64_t done : 1; uint64_t reserved_40_63 : 24; #endif } s; struct cvmx_usbnx_dma_test_s cn30xx; struct cvmx_usbnx_dma_test_s cn31xx; struct cvmx_usbnx_dma_test_s cn50xx; struct cvmx_usbnx_dma_test_s cn52xx; struct cvmx_usbnx_dma_test_s cn52xxp1; struct cvmx_usbnx_dma_test_s cn56xx; struct cvmx_usbnx_dma_test_s cn56xxp1; } cvmx_usbnx_dma_test_t; /** * cvmx_usbn#_int_enb * * USBN_INT_ENB = USBN's Interrupt Enable * * The USBN's interrupt enable register. */ typedef union { uint64_t u64; struct cvmx_usbnx_int_enb_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t nd4o_dpf : 1; /**< When set (1) and bit 37 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nd4o_dpe : 1; /**< When set (1) and bit 36 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nd4o_rpf : 1; /**< When set (1) and bit 35 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nd4o_rpe : 1; /**< When set (1) and bit 34 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t ltl_f_pf : 1; /**< When set (1) and bit 33 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t ltl_f_pe : 1; /**< When set (1) and bit 32 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t u2n_c_pe : 1; /**< When set (1) and bit 31 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t u2n_c_pf : 1; /**< When set (1) and bit 30 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t u2n_d_pf : 1; /**< When set (1) and bit 29 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t u2n_d_pe : 1; /**< When set (1) and bit 28 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t n2u_pe : 1; /**< When set (1) and bit 27 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t n2u_pf : 1; /**< When set (1) and bit 26 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t uod_pf : 1; /**< When set (1) and bit 25 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t uod_pe : 1; /**< When set (1) and bit 24 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rq_q3_e : 1; /**< When set (1) and bit 23 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rq_q3_f : 1; /**< When set (1) and bit 22 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rq_q2_e : 1; /**< When set (1) and bit 21 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rq_q2_f : 1; /**< When set (1) and bit 20 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rg_fi_f : 1; /**< When set (1) and bit 19 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rg_fi_e : 1; /**< When set (1) and bit 18 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t l2_fi_f : 1; /**< When set (1) and bit 17 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t l2_fi_e : 1; /**< When set (1) and bit 16 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t l2c_a_f : 1; /**< When set (1) and bit 15 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t l2c_s_e : 1; /**< When set (1) and bit 14 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t dcred_f : 1; /**< When set (1) and bit 13 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t dcred_e : 1; /**< When set (1) and bit 12 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t lt_pu_f : 1; /**< When set (1) and bit 11 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t lt_po_e : 1; /**< When set (1) and bit 10 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nt_pu_f : 1; /**< When set (1) and bit 9 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nt_po_e : 1; /**< When set (1) and bit 8 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t pt_pu_f : 1; /**< When set (1) and bit 7 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t pt_po_e : 1; /**< When set (1) and bit 6 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t lr_pu_f : 1; /**< When set (1) and bit 5 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t lr_po_e : 1; /**< When set (1) and bit 4 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nr_pu_f : 1; /**< When set (1) and bit 3 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nr_po_e : 1; /**< When set (1) and bit 2 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t pr_pu_f : 1; /**< When set (1) and bit 1 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t pr_po_e : 1; /**< When set (1) and bit 0 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ #else uint64_t pr_po_e : 1; uint64_t pr_pu_f : 1; uint64_t nr_po_e : 1; uint64_t nr_pu_f : 1; uint64_t lr_po_e : 1; uint64_t lr_pu_f : 1; uint64_t pt_po_e : 1; uint64_t pt_pu_f : 1; uint64_t nt_po_e : 1; uint64_t nt_pu_f : 1; uint64_t lt_po_e : 1; uint64_t lt_pu_f : 1; uint64_t dcred_e : 1; uint64_t dcred_f : 1; uint64_t l2c_s_e : 1; uint64_t l2c_a_f : 1; uint64_t l2_fi_e : 1; uint64_t l2_fi_f : 1; uint64_t rg_fi_e : 1; uint64_t rg_fi_f : 1; uint64_t rq_q2_f : 1; uint64_t rq_q2_e : 1; uint64_t rq_q3_f : 1; uint64_t rq_q3_e : 1; uint64_t uod_pe : 1; uint64_t uod_pf : 1; uint64_t n2u_pf : 1; uint64_t n2u_pe : 1; uint64_t u2n_d_pe : 1; uint64_t u2n_d_pf : 1; uint64_t u2n_c_pf : 1; uint64_t u2n_c_pe : 1; uint64_t ltl_f_pe : 1; uint64_t ltl_f_pf : 1; uint64_t nd4o_rpe : 1; uint64_t nd4o_rpf : 1; uint64_t nd4o_dpe : 1; uint64_t nd4o_dpf : 1; uint64_t reserved_38_63 : 26; #endif } s; struct cvmx_usbnx_int_enb_s cn30xx; struct cvmx_usbnx_int_enb_s cn31xx; struct cvmx_usbnx_int_enb_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t nd4o_dpf : 1; /**< When set (1) and bit 37 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nd4o_dpe : 1; /**< When set (1) and bit 36 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nd4o_rpf : 1; /**< When set (1) and bit 35 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nd4o_rpe : 1; /**< When set (1) and bit 34 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t ltl_f_pf : 1; /**< When set (1) and bit 33 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t ltl_f_pe : 1; /**< When set (1) and bit 32 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t reserved_26_31 : 6; uint64_t uod_pf : 1; /**< When set (1) and bit 25 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t uod_pe : 1; /**< When set (1) and bit 24 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rq_q3_e : 1; /**< When set (1) and bit 23 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rq_q3_f : 1; /**< When set (1) and bit 22 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rq_q2_e : 1; /**< When set (1) and bit 21 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rq_q2_f : 1; /**< When set (1) and bit 20 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rg_fi_f : 1; /**< When set (1) and bit 19 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t rg_fi_e : 1; /**< When set (1) and bit 18 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t l2_fi_f : 1; /**< When set (1) and bit 17 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t l2_fi_e : 1; /**< When set (1) and bit 16 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t l2c_a_f : 1; /**< When set (1) and bit 15 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t l2c_s_e : 1; /**< When set (1) and bit 14 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t dcred_f : 1; /**< When set (1) and bit 13 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t dcred_e : 1; /**< When set (1) and bit 12 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t lt_pu_f : 1; /**< When set (1) and bit 11 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t lt_po_e : 1; /**< When set (1) and bit 10 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nt_pu_f : 1; /**< When set (1) and bit 9 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nt_po_e : 1; /**< When set (1) and bit 8 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t pt_pu_f : 1; /**< When set (1) and bit 7 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t pt_po_e : 1; /**< When set (1) and bit 6 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t lr_pu_f : 1; /**< When set (1) and bit 5 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t lr_po_e : 1; /**< When set (1) and bit 4 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nr_pu_f : 1; /**< When set (1) and bit 3 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t nr_po_e : 1; /**< When set (1) and bit 2 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t pr_pu_f : 1; /**< When set (1) and bit 1 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ uint64_t pr_po_e : 1; /**< When set (1) and bit 0 of the USBN_INT_SUM register is asserted the USBN will assert an interrupt. */ #else uint64_t pr_po_e : 1; uint64_t pr_pu_f : 1; uint64_t nr_po_e : 1; uint64_t nr_pu_f : 1; uint64_t lr_po_e : 1; uint64_t lr_pu_f : 1; uint64_t pt_po_e : 1; uint64_t pt_pu_f : 1; uint64_t nt_po_e : 1; uint64_t nt_pu_f : 1; uint64_t lt_po_e : 1; uint64_t lt_pu_f : 1; uint64_t dcred_e : 1; uint64_t dcred_f : 1; uint64_t l2c_s_e : 1; uint64_t l2c_a_f : 1; uint64_t l2_fi_e : 1; uint64_t l2_fi_f : 1; uint64_t rg_fi_e : 1; uint64_t rg_fi_f : 1; uint64_t rq_q2_f : 1; uint64_t rq_q2_e : 1; uint64_t rq_q3_f : 1; uint64_t rq_q3_e : 1; uint64_t uod_pe : 1; uint64_t uod_pf : 1; uint64_t reserved_26_31 : 6; uint64_t ltl_f_pe : 1; uint64_t ltl_f_pf : 1; uint64_t nd4o_rpe : 1; uint64_t nd4o_rpf : 1; uint64_t nd4o_dpe : 1; uint64_t nd4o_dpf : 1; uint64_t reserved_38_63 : 26; #endif } cn50xx; struct cvmx_usbnx_int_enb_cn50xx cn52xx; struct cvmx_usbnx_int_enb_cn50xx cn52xxp1; struct cvmx_usbnx_int_enb_cn50xx cn56xx; struct cvmx_usbnx_int_enb_cn50xx cn56xxp1; } cvmx_usbnx_int_enb_t; /** * cvmx_usbn#_int_sum * * USBN_INT_SUM = USBN's Interrupt Summary Register * * Contains the diffrent interrupt summary bits of the USBN. */ typedef union { uint64_t u64; struct cvmx_usbnx_int_sum_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t nd4o_dpf : 1; /**< NCB DMA Out Data Fifo Push Full. */ uint64_t nd4o_dpe : 1; /**< NCB DMA Out Data Fifo Pop Empty. */ uint64_t nd4o_rpf : 1; /**< NCB DMA Out Request Fifo Push Full. */ uint64_t nd4o_rpe : 1; /**< NCB DMA Out Request Fifo Pop Empty. */ uint64_t ltl_f_pf : 1; /**< L2C Transfer Length Fifo Push Full. */ uint64_t ltl_f_pe : 1; /**< L2C Transfer Length Fifo Pop Empty. */ uint64_t u2n_c_pe : 1; /**< U2N Control Fifo Pop Empty. */ uint64_t u2n_c_pf : 1; /**< U2N Control Fifo Push Full. */ uint64_t u2n_d_pf : 1; /**< U2N Data Fifo Push Full. */ uint64_t u2n_d_pe : 1; /**< U2N Data Fifo Pop Empty. */ uint64_t n2u_pe : 1; /**< N2U Fifo Pop Empty. */ uint64_t n2u_pf : 1; /**< N2U Fifo Push Full. */ uint64_t uod_pf : 1; /**< UOD Fifo Push Full. */ uint64_t uod_pe : 1; /**< UOD Fifo Pop Empty. */ uint64_t rq_q3_e : 1; /**< Request Queue-3 Fifo Pushed When Full. */ uint64_t rq_q3_f : 1; /**< Request Queue-3 Fifo Pushed When Full. */ uint64_t rq_q2_e : 1; /**< Request Queue-2 Fifo Pushed When Full. */ uint64_t rq_q2_f : 1; /**< Request Queue-2 Fifo Pushed When Full. */ uint64_t rg_fi_f : 1; /**< Register Request Fifo Pushed When Full. */ uint64_t rg_fi_e : 1; /**< Register Request Fifo Pushed When Full. */ uint64_t lt_fi_f : 1; /**< L2C Request Fifo Pushed When Full. */ uint64_t lt_fi_e : 1; /**< L2C Request Fifo Pushed When Full. */ uint64_t l2c_a_f : 1; /**< L2C Credit Count Added When Full. */ uint64_t l2c_s_e : 1; /**< L2C Credit Count Subtracted When Empty. */ uint64_t dcred_f : 1; /**< Data CreditFifo Pushed When Full. */ uint64_t dcred_e : 1; /**< Data Credit Fifo Pushed When Full. */ uint64_t lt_pu_f : 1; /**< L2C Trasaction Fifo Pushed When Full. */ uint64_t lt_po_e : 1; /**< L2C Trasaction Fifo Popped When Full. */ uint64_t nt_pu_f : 1; /**< NPI Trasaction Fifo Pushed When Full. */ uint64_t nt_po_e : 1; /**< NPI Trasaction Fifo Popped When Full. */ uint64_t pt_pu_f : 1; /**< PP Trasaction Fifo Pushed When Full. */ uint64_t pt_po_e : 1; /**< PP Trasaction Fifo Popped When Full. */ uint64_t lr_pu_f : 1; /**< L2C Request Fifo Pushed When Full. */ uint64_t lr_po_e : 1; /**< L2C Request Fifo Popped When Empty. */ uint64_t nr_pu_f : 1; /**< NPI Request Fifo Pushed When Full. */ uint64_t nr_po_e : 1; /**< NPI Request Fifo Popped When Empty. */ uint64_t pr_pu_f : 1; /**< PP Request Fifo Pushed When Full. */ uint64_t pr_po_e : 1; /**< PP Request Fifo Popped When Empty. */ #else uint64_t pr_po_e : 1; uint64_t pr_pu_f : 1; uint64_t nr_po_e : 1; uint64_t nr_pu_f : 1; uint64_t lr_po_e : 1; uint64_t lr_pu_f : 1; uint64_t pt_po_e : 1; uint64_t pt_pu_f : 1; uint64_t nt_po_e : 1; uint64_t nt_pu_f : 1; uint64_t lt_po_e : 1; uint64_t lt_pu_f : 1; uint64_t dcred_e : 1; uint64_t dcred_f : 1; uint64_t l2c_s_e : 1; uint64_t l2c_a_f : 1; uint64_t lt_fi_e : 1; uint64_t lt_fi_f : 1; uint64_t rg_fi_e : 1; uint64_t rg_fi_f : 1; uint64_t rq_q2_f : 1; uint64_t rq_q2_e : 1; uint64_t rq_q3_f : 1; uint64_t rq_q3_e : 1; uint64_t uod_pe : 1; uint64_t uod_pf : 1; uint64_t n2u_pf : 1; uint64_t n2u_pe : 1; uint64_t u2n_d_pe : 1; uint64_t u2n_d_pf : 1; uint64_t u2n_c_pf : 1; uint64_t u2n_c_pe : 1; uint64_t ltl_f_pe : 1; uint64_t ltl_f_pf : 1; uint64_t nd4o_rpe : 1; uint64_t nd4o_rpf : 1; uint64_t nd4o_dpe : 1; uint64_t nd4o_dpf : 1; uint64_t reserved_38_63 : 26; #endif } s; struct cvmx_usbnx_int_sum_s cn30xx; struct cvmx_usbnx_int_sum_s cn31xx; struct cvmx_usbnx_int_sum_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t nd4o_dpf : 1; /**< NCB DMA Out Data Fifo Push Full. */ uint64_t nd4o_dpe : 1; /**< NCB DMA Out Data Fifo Pop Empty. */ uint64_t nd4o_rpf : 1; /**< NCB DMA Out Request Fifo Push Full. */ uint64_t nd4o_rpe : 1; /**< NCB DMA Out Request Fifo Pop Empty. */ uint64_t ltl_f_pf : 1; /**< L2C Transfer Length Fifo Push Full. */ uint64_t ltl_f_pe : 1; /**< L2C Transfer Length Fifo Pop Empty. */ uint64_t reserved_26_31 : 6; uint64_t uod_pf : 1; /**< UOD Fifo Push Full. */ uint64_t uod_pe : 1; /**< UOD Fifo Pop Empty. */ uint64_t rq_q3_e : 1; /**< Request Queue-3 Fifo Pushed When Full. */ uint64_t rq_q3_f : 1; /**< Request Queue-3 Fifo Pushed When Full. */ uint64_t rq_q2_e : 1; /**< Request Queue-2 Fifo Pushed When Full. */ uint64_t rq_q2_f : 1; /**< Request Queue-2 Fifo Pushed When Full. */ uint64_t rg_fi_f : 1; /**< Register Request Fifo Pushed When Full. */ uint64_t rg_fi_e : 1; /**< Register Request Fifo Pushed When Full. */ uint64_t lt_fi_f : 1; /**< L2C Request Fifo Pushed When Full. */ uint64_t lt_fi_e : 1; /**< L2C Request Fifo Pushed When Full. */ uint64_t l2c_a_f : 1; /**< L2C Credit Count Added When Full. */ uint64_t l2c_s_e : 1; /**< L2C Credit Count Subtracted When Empty. */ uint64_t dcred_f : 1; /**< Data CreditFifo Pushed When Full. */ uint64_t dcred_e : 1; /**< Data Credit Fifo Pushed When Full. */ uint64_t lt_pu_f : 1; /**< L2C Trasaction Fifo Pushed When Full. */ uint64_t lt_po_e : 1; /**< L2C Trasaction Fifo Popped When Full. */ uint64_t nt_pu_f : 1; /**< NPI Trasaction Fifo Pushed When Full. */ uint64_t nt_po_e : 1; /**< NPI Trasaction Fifo Popped When Full. */ uint64_t pt_pu_f : 1; /**< PP Trasaction Fifo Pushed When Full. */ uint64_t pt_po_e : 1; /**< PP Trasaction Fifo Popped When Full. */ uint64_t lr_pu_f : 1; /**< L2C Request Fifo Pushed When Full. */ uint64_t lr_po_e : 1; /**< L2C Request Fifo Popped When Empty. */ uint64_t nr_pu_f : 1; /**< NPI Request Fifo Pushed When Full. */ uint64_t nr_po_e : 1; /**< NPI Request Fifo Popped When Empty. */ uint64_t pr_pu_f : 1; /**< PP Request Fifo Pushed When Full. */ uint64_t pr_po_e : 1; /**< PP Request Fifo Popped When Empty. */ #else uint64_t pr_po_e : 1; uint64_t pr_pu_f : 1; uint64_t nr_po_e : 1; uint64_t nr_pu_f : 1; uint64_t lr_po_e : 1; uint64_t lr_pu_f : 1; uint64_t pt_po_e : 1; uint64_t pt_pu_f : 1; uint64_t nt_po_e : 1; uint64_t nt_pu_f : 1; uint64_t lt_po_e : 1; uint64_t lt_pu_f : 1; uint64_t dcred_e : 1; uint64_t dcred_f : 1; uint64_t l2c_s_e : 1; uint64_t l2c_a_f : 1; uint64_t lt_fi_e : 1; uint64_t lt_fi_f : 1; uint64_t rg_fi_e : 1; uint64_t rg_fi_f : 1; uint64_t rq_q2_f : 1; uint64_t rq_q2_e : 1; uint64_t rq_q3_f : 1; uint64_t rq_q3_e : 1; uint64_t uod_pe : 1; uint64_t uod_pf : 1; uint64_t reserved_26_31 : 6; uint64_t ltl_f_pe : 1; uint64_t ltl_f_pf : 1; uint64_t nd4o_rpe : 1; uint64_t nd4o_rpf : 1; uint64_t nd4o_dpe : 1; uint64_t nd4o_dpf : 1; uint64_t reserved_38_63 : 26; #endif } cn50xx; struct cvmx_usbnx_int_sum_cn50xx cn52xx; struct cvmx_usbnx_int_sum_cn50xx cn52xxp1; struct cvmx_usbnx_int_sum_cn50xx cn56xx; struct cvmx_usbnx_int_sum_cn50xx cn56xxp1; } cvmx_usbnx_int_sum_t; /** * cvmx_usbn#_usbp_ctl_status * * USBN_USBP_CTL_STATUS = USBP Control And Status Register * * Contains general control and status information for the USBN block. */ typedef union { uint64_t u64; struct cvmx_usbnx_usbp_ctl_status_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t txrisetune : 1; /**< HS Transmitter Rise/Fall Time Adjustment */ uint64_t txvreftune : 4; /**< HS DC Voltage Level Adjustment */ uint64_t txfslstune : 4; /**< FS/LS Source Impedence Adjustment */ uint64_t txhsxvtune : 2; /**< Transmitter High-Speed Crossover Adjustment */ uint64_t sqrxtune : 3; /**< Squelch Threshold Adjustment */ uint64_t compdistune : 3; /**< Disconnect Threshold Adjustment */ uint64_t otgtune : 3; /**< VBUS Valid Threshold Adjustment */ uint64_t otgdisable : 1; /**< OTG Block Disable */ uint64_t portreset : 1; /**< Per_Port Reset */ uint64_t drvvbus : 1; /**< Drive VBUS */ uint64_t lsbist : 1; /**< Low-Speed BIST Enable. */ uint64_t fsbist : 1; /**< Full-Speed BIST Enable. */ uint64_t hsbist : 1; /**< High-Speed BIST Enable. */ uint64_t bist_done : 1; /**< PHY Bist Done. Asserted at the end of the PHY BIST sequence. */ uint64_t bist_err : 1; /**< PHY Bist Error. Indicates an internal error was detected during the BIST sequence. */ uint64_t tdata_out : 4; /**< PHY Test Data Out. Presents either internaly generated signals or test register contents, based upon the value of test_data_out_sel. */ uint64_t siddq : 1; /**< Drives the USBP (USB-PHY) SIDDQ input. Normally should be set to zero. When customers have no intent to use USB PHY interface, they should: - still provide 3.3V to USB_VDD33, and - tie USB_REXT to 3.3V supply, and - set USBN*_USBP_CTL_STATUS[SIDDQ]=1 */ uint64_t txpreemphasistune : 1; /**< HS Transmitter Pre-Emphasis Enable */ uint64_t dma_bmode : 1; /**< When set to 1 the L2C DMA address will be updated with byte-counts between packets. When set to 0 the L2C DMA address is incremented to the next 4-byte aligned address after adding byte-count. */ uint64_t usbc_end : 1; /**< Bigendian input to the USB Core. This should be set to '0' for operation. */ uint64_t usbp_bist : 1; /**< PHY, This is cleared '0' to run BIST on the USBP. */ uint64_t tclk : 1; /**< PHY Test Clock, used to load TDATA_IN to the USBP. */ uint64_t dp_pulld : 1; /**< PHY DP_PULLDOWN input to the USB-PHY. This signal enables the pull-down resistance on the D+ line. '1' pull down-resistance is connected to D+/ '0' pull down resistance is not connected to D+. When an A/B device is acting as a host (downstream-facing port), dp_pulldown and dm_pulldown are enabled. This must not toggle during normal opeartion. */ uint64_t dm_pulld : 1; /**< PHY DM_PULLDOWN input to the USB-PHY. This signal enables the pull-down resistance on the D- line. '1' pull down-resistance is connected to D-. '0' pull down resistance is not connected to D-. When an A/B device is acting as a host (downstream-facing port), dp_pulldown and dm_pulldown are enabled. This must not toggle during normal opeartion. */ uint64_t hst_mode : 1; /**< When '0' the USB is acting as HOST, when '1' USB is acting as device. This field needs to be set while the USB is in reset. */ uint64_t tuning : 4; /**< Transmitter Tuning for High-Speed Operation. Tunes the current supply and rise/fall output times for high-speed operation. [20:19] == 11: Current supply increased approximately 9% [20:19] == 10: Current supply increased approximately 4.5% [20:19] == 01: Design default. [20:19] == 00: Current supply decreased approximately 4.5% [22:21] == 11: Rise and fall times are increased. [22:21] == 10: Design default. [22:21] == 01: Rise and fall times are decreased. [22:21] == 00: Rise and fall times are decreased further as compared to the 01 setting. */ uint64_t tx_bs_enh : 1; /**< Transmit Bit Stuffing on [15:8]. Enables or disables bit stuffing on data[15:8] when bit-stuffing is enabled. */ uint64_t tx_bs_en : 1; /**< Transmit Bit Stuffing on [7:0]. Enables or disables bit stuffing on data[7:0] when bit-stuffing is enabled. */ uint64_t loop_enb : 1; /**< PHY Loopback Test Enable. '1': During data transmission the receive is enabled. '0': During data transmission the receive is disabled. Must be '0' for normal operation. */ uint64_t vtest_enb : 1; /**< Analog Test Pin Enable. '1' The PHY's analog_test pin is enabled for the input and output of applicable analog test signals. '0' THe analog_test pin is disabled. */ uint64_t bist_enb : 1; /**< Built-In Self Test Enable. Used to activate BIST in the PHY. */ uint64_t tdata_sel : 1; /**< Test Data Out Select. '1' test_data_out[3:0] (PHY) register contents are output. '0' internaly generated signals are output. */ uint64_t taddr_in : 4; /**< Mode Address for Test Interface. Specifies the register address for writing to or reading from the PHY test interface register. */ uint64_t tdata_in : 8; /**< Internal Testing Register Input Data and Select This is a test bus. Data is present on [3:0], and its corresponding select (enable) is present on bits [7:4]. */ uint64_t ate_reset : 1; /**< Reset input from automatic test equipment. This is a test signal. When the USB Core is powered up (not in Susned Mode), an automatic tester can use this to disable phy_clock and free_clk, then re-eanable them with an aligned phase. '1': The phy_clk and free_clk outputs are disabled. "0": The phy_clock and free_clk outputs are available within a specific period after the de-assertion. */ #else uint64_t ate_reset : 1; uint64_t tdata_in : 8; uint64_t taddr_in : 4; uint64_t tdata_sel : 1; uint64_t bist_enb : 1; uint64_t vtest_enb : 1; uint64_t loop_enb : 1; uint64_t tx_bs_en : 1; uint64_t tx_bs_enh : 1; uint64_t tuning : 4; uint64_t hst_mode : 1; uint64_t dm_pulld : 1; uint64_t dp_pulld : 1; uint64_t tclk : 1; uint64_t usbp_bist : 1; uint64_t usbc_end : 1; uint64_t dma_bmode : 1; uint64_t txpreemphasistune : 1; uint64_t siddq : 1; uint64_t tdata_out : 4; uint64_t bist_err : 1; uint64_t bist_done : 1; uint64_t hsbist : 1; uint64_t fsbist : 1; uint64_t lsbist : 1; uint64_t drvvbus : 1; uint64_t portreset : 1; uint64_t otgdisable : 1; uint64_t otgtune : 3; uint64_t compdistune : 3; uint64_t sqrxtune : 3; uint64_t txhsxvtune : 2; uint64_t txfslstune : 4; uint64_t txvreftune : 4; uint64_t txrisetune : 1; #endif } s; struct cvmx_usbnx_usbp_ctl_status_cn30xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_38_63 : 26; uint64_t bist_done : 1; /**< PHY Bist Done. Asserted at the end of the PHY BIST sequence. */ uint64_t bist_err : 1; /**< PHY Bist Error. Indicates an internal error was detected during the BIST sequence. */ uint64_t tdata_out : 4; /**< PHY Test Data Out. Presents either internaly generated signals or test register contents, based upon the value of test_data_out_sel. */ uint64_t reserved_30_31 : 2; uint64_t dma_bmode : 1; /**< When set to 1 the L2C DMA address will be updated with byte-counts between packets. When set to 0 the L2C DMA address is incremented to the next 4-byte aligned address after adding byte-count. */ uint64_t usbc_end : 1; /**< Bigendian input to the USB Core. This should be set to '0' for operation. */ uint64_t usbp_bist : 1; /**< PHY, This is cleared '0' to run BIST on the USBP. */ uint64_t tclk : 1; /**< PHY Test Clock, used to load TDATA_IN to the USBP. */ uint64_t dp_pulld : 1; /**< PHY DP_PULLDOWN input to the USB-PHY. This signal enables the pull-down resistance on the D+ line. '1' pull down-resistance is connected to D+/ '0' pull down resistance is not connected to D+. When an A/B device is acting as a host (downstream-facing port), dp_pulldown and dm_pulldown are enabled. This must not toggle during normal opeartion. */ uint64_t dm_pulld : 1; /**< PHY DM_PULLDOWN input to the USB-PHY. This signal enables the pull-down resistance on the D- line. '1' pull down-resistance is connected to D-. '0' pull down resistance is not connected to D-. When an A/B device is acting as a host (downstream-facing port), dp_pulldown and dm_pulldown are enabled. This must not toggle during normal opeartion. */ uint64_t hst_mode : 1; /**< When '0' the USB is acting as HOST, when '1' USB is acting as device. This field needs to be set while the USB is in reset. */ uint64_t tuning : 4; /**< Transmitter Tuning for High-Speed Operation. Tunes the current supply and rise/fall output times for high-speed operation. [20:19] == 11: Current supply increased approximately 9% [20:19] == 10: Current supply increased approximately 4.5% [20:19] == 01: Design default. [20:19] == 00: Current supply decreased approximately 4.5% [22:21] == 11: Rise and fall times are increased. [22:21] == 10: Design default. [22:21] == 01: Rise and fall times are decreased. [22:21] == 00: Rise and fall times are decreased further as compared to the 01 setting. */ uint64_t tx_bs_enh : 1; /**< Transmit Bit Stuffing on [15:8]. Enables or disables bit stuffing on data[15:8] when bit-stuffing is enabled. */ uint64_t tx_bs_en : 1; /**< Transmit Bit Stuffing on [7:0]. Enables or disables bit stuffing on data[7:0] when bit-stuffing is enabled. */ uint64_t loop_enb : 1; /**< PHY Loopback Test Enable. '1': During data transmission the receive is enabled. '0': During data transmission the receive is disabled. Must be '0' for normal operation. */ uint64_t vtest_enb : 1; /**< Analog Test Pin Enable. '1' The PHY's analog_test pin is enabled for the input and output of applicable analog test signals. '0' THe analog_test pin is disabled. */ uint64_t bist_enb : 1; /**< Built-In Self Test Enable. Used to activate BIST in the PHY. */ uint64_t tdata_sel : 1; /**< Test Data Out Select. '1' test_data_out[3:0] (PHY) register contents are output. '0' internaly generated signals are output. */ uint64_t taddr_in : 4; /**< Mode Address for Test Interface. Specifies the register address for writing to or reading from the PHY test interface register. */ uint64_t tdata_in : 8; /**< Internal Testing Register Input Data and Select This is a test bus. Data is present on [3:0], and its corresponding select (enable) is present on bits [7:4]. */ uint64_t ate_reset : 1; /**< Reset input from automatic test equipment. This is a test signal. When the USB Core is powered up (not in Susned Mode), an automatic tester can use this to disable phy_clock and free_clk, then re-eanable them with an aligned phase. '1': The phy_clk and free_clk outputs are disabled. "0": The phy_clock and free_clk outputs are available within a specific period after the de-assertion. */ #else uint64_t ate_reset : 1; uint64_t tdata_in : 8; uint64_t taddr_in : 4; uint64_t tdata_sel : 1; uint64_t bist_enb : 1; uint64_t vtest_enb : 1; uint64_t loop_enb : 1; uint64_t tx_bs_en : 1; uint64_t tx_bs_enh : 1; uint64_t tuning : 4; uint64_t hst_mode : 1; uint64_t dm_pulld : 1; uint64_t dp_pulld : 1; uint64_t tclk : 1; uint64_t usbp_bist : 1; uint64_t usbc_end : 1; uint64_t dma_bmode : 1; uint64_t reserved_30_31 : 2; uint64_t tdata_out : 4; uint64_t bist_err : 1; uint64_t bist_done : 1; uint64_t reserved_38_63 : 26; #endif } cn30xx; struct cvmx_usbnx_usbp_ctl_status_cn30xx cn31xx; struct cvmx_usbnx_usbp_ctl_status_cn50xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t txrisetune : 1; /**< HS Transmitter Rise/Fall Time Adjustment */ uint64_t txvreftune : 4; /**< HS DC Voltage Level Adjustment */ uint64_t txfslstune : 4; /**< FS/LS Source Impedence Adjustment */ uint64_t txhsxvtune : 2; /**< Transmitter High-Speed Crossover Adjustment */ uint64_t sqrxtune : 3; /**< Squelch Threshold Adjustment */ uint64_t compdistune : 3; /**< Disconnect Threshold Adjustment */ uint64_t otgtune : 3; /**< VBUS Valid Threshold Adjustment */ uint64_t otgdisable : 1; /**< OTG Block Disable */ uint64_t portreset : 1; /**< Per_Port Reset */ uint64_t drvvbus : 1; /**< Drive VBUS */ uint64_t lsbist : 1; /**< Low-Speed BIST Enable. */ uint64_t fsbist : 1; /**< Full-Speed BIST Enable. */ uint64_t hsbist : 1; /**< High-Speed BIST Enable. */ uint64_t bist_done : 1; /**< PHY Bist Done. Asserted at the end of the PHY BIST sequence. */ uint64_t bist_err : 1; /**< PHY Bist Error. Indicates an internal error was detected during the BIST sequence. */ uint64_t tdata_out : 4; /**< PHY Test Data Out. Presents either internaly generated signals or test register contents, based upon the value of test_data_out_sel. */ uint64_t reserved_31_31 : 1; uint64_t txpreemphasistune : 1; /**< HS Transmitter Pre-Emphasis Enable */ uint64_t dma_bmode : 1; /**< When set to 1 the L2C DMA address will be updated with byte-counts between packets. When set to 0 the L2C DMA address is incremented to the next 4-byte aligned address after adding byte-count. */ uint64_t usbc_end : 1; /**< Bigendian input to the USB Core. This should be set to '0' for operation. */ uint64_t usbp_bist : 1; /**< PHY, This is cleared '0' to run BIST on the USBP. */ uint64_t tclk : 1; /**< PHY Test Clock, used to load TDATA_IN to the USBP. */ uint64_t dp_pulld : 1; /**< PHY DP_PULLDOWN input to the USB-PHY. This signal enables the pull-down resistance on the D+ line. '1' pull down-resistance is connected to D+/ '0' pull down resistance is not connected to D+. When an A/B device is acting as a host (downstream-facing port), dp_pulldown and dm_pulldown are enabled. This must not toggle during normal opeartion. */ uint64_t dm_pulld : 1; /**< PHY DM_PULLDOWN input to the USB-PHY. This signal enables the pull-down resistance on the D- line. '1' pull down-resistance is connected to D-. '0' pull down resistance is not connected to D-. When an A/B device is acting as a host (downstream-facing port), dp_pulldown and dm_pulldown are enabled. This must not toggle during normal opeartion. */ uint64_t hst_mode : 1; /**< When '0' the USB is acting as HOST, when '1' USB is acting as device. This field needs to be set while the USB is in reset. */ uint64_t reserved_19_22 : 4; uint64_t tx_bs_enh : 1; /**< Transmit Bit Stuffing on [15:8]. Enables or disables bit stuffing on data[15:8] when bit-stuffing is enabled. */ uint64_t tx_bs_en : 1; /**< Transmit Bit Stuffing on [7:0]. Enables or disables bit stuffing on data[7:0] when bit-stuffing is enabled. */ uint64_t loop_enb : 1; /**< PHY Loopback Test Enable. '1': During data transmission the receive is enabled. '0': During data transmission the receive is disabled. Must be '0' for normal operation. */ uint64_t vtest_enb : 1; /**< Analog Test Pin Enable. '1' The PHY's analog_test pin is enabled for the input and output of applicable analog test signals. '0' THe analog_test pin is disabled. */ uint64_t bist_enb : 1; /**< Built-In Self Test Enable. Used to activate BIST in the PHY. */ uint64_t tdata_sel : 1; /**< Test Data Out Select. '1' test_data_out[3:0] (PHY) register contents are output. '0' internaly generated signals are output. */ uint64_t taddr_in : 4; /**< Mode Address for Test Interface. Specifies the register address for writing to or reading from the PHY test interface register. */ uint64_t tdata_in : 8; /**< Internal Testing Register Input Data and Select This is a test bus. Data is present on [3:0], and its corresponding select (enable) is present on bits [7:4]. */ uint64_t ate_reset : 1; /**< Reset input from automatic test equipment. This is a test signal. When the USB Core is powered up (not in Susned Mode), an automatic tester can use this to disable phy_clock and free_clk, then re-eanable them with an aligned phase. '1': The phy_clk and free_clk outputs are disabled. "0": The phy_clock and free_clk outputs are available within a specific period after the de-assertion. */ #else uint64_t ate_reset : 1; uint64_t tdata_in : 8; uint64_t taddr_in : 4; uint64_t tdata_sel : 1; uint64_t bist_enb : 1; uint64_t vtest_enb : 1; uint64_t loop_enb : 1; uint64_t tx_bs_en : 1; uint64_t tx_bs_enh : 1; uint64_t reserved_19_22 : 4; uint64_t hst_mode : 1; uint64_t dm_pulld : 1; uint64_t dp_pulld : 1; uint64_t tclk : 1; uint64_t usbp_bist : 1; uint64_t usbc_end : 1; uint64_t dma_bmode : 1; uint64_t txpreemphasistune : 1; uint64_t reserved_31_31 : 1; uint64_t tdata_out : 4; uint64_t bist_err : 1; uint64_t bist_done : 1; uint64_t hsbist : 1; uint64_t fsbist : 1; uint64_t lsbist : 1; uint64_t drvvbus : 1; uint64_t portreset : 1; uint64_t otgdisable : 1; uint64_t otgtune : 3; uint64_t compdistune : 3; uint64_t sqrxtune : 3; uint64_t txhsxvtune : 2; uint64_t txfslstune : 4; uint64_t txvreftune : 4; uint64_t txrisetune : 1; #endif } cn50xx; struct cvmx_usbnx_usbp_ctl_status_cn52xx { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t txrisetune : 1; /**< HS Transmitter Rise/Fall Time Adjustment */ uint64_t txvreftune : 4; /**< HS DC Voltage Level Adjustment */ uint64_t txfslstune : 4; /**< FS/LS Source Impedence Adjustment */ uint64_t txhsxvtune : 2; /**< Transmitter High-Speed Crossover Adjustment */ uint64_t sqrxtune : 3; /**< Squelch Threshold Adjustment */ uint64_t compdistune : 3; /**< Disconnect Threshold Adjustment */ uint64_t otgtune : 3; /**< VBUS Valid Threshold Adjustment */ uint64_t otgdisable : 1; /**< OTG Block Disable */ uint64_t portreset : 1; /**< Per_Port Reset */ uint64_t drvvbus : 1; /**< Drive VBUS */ uint64_t lsbist : 1; /**< Low-Speed BIST Enable. */ uint64_t fsbist : 1; /**< Full-Speed BIST Enable. */ uint64_t hsbist : 1; /**< High-Speed BIST Enable. */ uint64_t bist_done : 1; /**< PHY Bist Done. Asserted at the end of the PHY BIST sequence. */ uint64_t bist_err : 1; /**< PHY Bist Error. Indicates an internal error was detected during the BIST sequence. */ uint64_t tdata_out : 4; /**< PHY Test Data Out. Presents either internaly generated signals or test register contents, based upon the value of test_data_out_sel. */ uint64_t siddq : 1; /**< Drives the USBP (USB-PHY) SIDDQ input. Normally should be set to zero. When customers have no intent to use USB PHY interface, they should: - still provide 3.3V to USB_VDD33, and - tie USB_REXT to 3.3V supply, and - set USBN*_USBP_CTL_STATUS[SIDDQ]=1 */ uint64_t txpreemphasistune : 1; /**< HS Transmitter Pre-Emphasis Enable */ uint64_t dma_bmode : 1; /**< When set to 1 the L2C DMA address will be updated with byte-counts between packets. When set to 0 the L2C DMA address is incremented to the next 4-byte aligned address after adding byte-count. */ uint64_t usbc_end : 1; /**< Bigendian input to the USB Core. This should be set to '0' for operation. */ uint64_t usbp_bist : 1; /**< PHY, This is cleared '0' to run BIST on the USBP. */ uint64_t tclk : 1; /**< PHY Test Clock, used to load TDATA_IN to the USBP. */ uint64_t dp_pulld : 1; /**< PHY DP_PULLDOWN input to the USB-PHY. This signal enables the pull-down resistance on the D+ line. '1' pull down-resistance is connected to D+/ '0' pull down resistance is not connected to D+. When an A/B device is acting as a host (downstream-facing port), dp_pulldown and dm_pulldown are enabled. This must not toggle during normal opeartion. */ uint64_t dm_pulld : 1; /**< PHY DM_PULLDOWN input to the USB-PHY. This signal enables the pull-down resistance on the D- line. '1' pull down-resistance is connected to D-. '0' pull down resistance is not connected to D-. When an A/B device is acting as a host (downstream-facing port), dp_pulldown and dm_pulldown are enabled. This must not toggle during normal opeartion. */ uint64_t hst_mode : 1; /**< When '0' the USB is acting as HOST, when '1' USB is acting as device. This field needs to be set while the USB is in reset. */ uint64_t reserved_19_22 : 4; uint64_t tx_bs_enh : 1; /**< Transmit Bit Stuffing on [15:8]. Enables or disables bit stuffing on data[15:8] when bit-stuffing is enabled. */ uint64_t tx_bs_en : 1; /**< Transmit Bit Stuffing on [7:0]. Enables or disables bit stuffing on data[7:0] when bit-stuffing is enabled. */ uint64_t loop_enb : 1; /**< PHY Loopback Test Enable. '1': During data transmission the receive is enabled. '0': During data transmission the receive is disabled. Must be '0' for normal operation. */ uint64_t vtest_enb : 1; /**< Analog Test Pin Enable. '1' The PHY's analog_test pin is enabled for the input and output of applicable analog test signals. '0' THe analog_test pin is disabled. */ uint64_t bist_enb : 1; /**< Built-In Self Test Enable. Used to activate BIST in the PHY. */ uint64_t tdata_sel : 1; /**< Test Data Out Select. '1' test_data_out[3:0] (PHY) register contents are output. '0' internaly generated signals are output. */ uint64_t taddr_in : 4; /**< Mode Address for Test Interface. Specifies the register address for writing to or reading from the PHY test interface register. */ uint64_t tdata_in : 8; /**< Internal Testing Register Input Data and Select This is a test bus. Data is present on [3:0], and its corresponding select (enable) is present on bits [7:4]. */ uint64_t ate_reset : 1; /**< Reset input from automatic test equipment. This is a test signal. When the USB Core is powered up (not in Susned Mode), an automatic tester can use this to disable phy_clock and free_clk, then re-eanable them with an aligned phase. '1': The phy_clk and free_clk outputs are disabled. "0": The phy_clock and free_clk outputs are available within a specific period after the de-assertion. */ #else uint64_t ate_reset : 1; uint64_t tdata_in : 8; uint64_t taddr_in : 4; uint64_t tdata_sel : 1; uint64_t bist_enb : 1; uint64_t vtest_enb : 1; uint64_t loop_enb : 1; uint64_t tx_bs_en : 1; uint64_t tx_bs_enh : 1; uint64_t reserved_19_22 : 4; uint64_t hst_mode : 1; uint64_t dm_pulld : 1; uint64_t dp_pulld : 1; uint64_t tclk : 1; uint64_t usbp_bist : 1; uint64_t usbc_end : 1; uint64_t dma_bmode : 1; uint64_t txpreemphasistune : 1; uint64_t siddq : 1; uint64_t tdata_out : 4; uint64_t bist_err : 1; uint64_t bist_done : 1; uint64_t hsbist : 1; uint64_t fsbist : 1; uint64_t lsbist : 1; uint64_t drvvbus : 1; uint64_t portreset : 1; uint64_t otgdisable : 1; uint64_t otgtune : 3; uint64_t compdistune : 3; uint64_t sqrxtune : 3; uint64_t txhsxvtune : 2; uint64_t txfslstune : 4; uint64_t txvreftune : 4; uint64_t txrisetune : 1; #endif } cn52xx; struct cvmx_usbnx_usbp_ctl_status_cn50xx cn52xxp1; struct cvmx_usbnx_usbp_ctl_status_cn52xx cn56xx; struct cvmx_usbnx_usbp_ctl_status_cn50xx cn56xxp1; } cvmx_usbnx_usbp_ctl_status_t; /** * cvmx_zip_cmd_bist_result * * Notes: * Access to the internal BiST results * Each bit is the BiST result of an individual memory (per bit, 0=pass and 1=fail). */ typedef union { uint64_t u64; struct cvmx_zip_cmd_bist_result_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_31_63 : 33; uint64_t zip_core : 27; /**< BiST result of the ZIP_CORE memories */ uint64_t zip_ctl : 4; /**< BiST result of the ZIP_CTL memories */ #else uint64_t zip_ctl : 4; uint64_t zip_core : 27; uint64_t reserved_31_63 : 33; #endif } s; struct cvmx_zip_cmd_bist_result_s cn31xx; struct cvmx_zip_cmd_bist_result_s cn38xx; struct cvmx_zip_cmd_bist_result_s cn38xxp2; struct cvmx_zip_cmd_bist_result_s cn56xx; struct cvmx_zip_cmd_bist_result_s cn56xxp1; struct cvmx_zip_cmd_bist_result_s cn58xx; struct cvmx_zip_cmd_bist_result_s cn58xxp1; } cvmx_zip_cmd_bist_result_t; /** * cvmx_zip_cmd_buf * * Notes: * Sets the command buffer parameters * The size of the command buffer segments is measured in uint64s. The pool specifies (1 of 8 free * lists to be used when freeing command buffer segments. The PTR field is overwritten with the next * pointer each time that the command buffer segment is exhausted. * When quiescent (i.e. outstanding doorbell count is 0), it is safe to rewrite * this register to effectively reset the command buffer state machine. New commands will then be * read from the newly specified command buffer pointer. */ typedef union { uint64_t u64; struct cvmx_zip_cmd_buf_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_58_63 : 6; uint64_t dwb : 9; /**< Number of DontWriteBacks */ uint64_t pool : 3; /**< Free list used to free command buffer segments */ uint64_t size : 13; /**< Number of uint64s per command buffer segment */ uint64_t ptr : 33; /**< Initial command buffer pointer[39:7] (128B-aligned) */ #else uint64_t ptr : 33; uint64_t size : 13; uint64_t pool : 3; uint64_t dwb : 9; uint64_t reserved_58_63 : 6; #endif } s; struct cvmx_zip_cmd_buf_s cn31xx; struct cvmx_zip_cmd_buf_s cn38xx; struct cvmx_zip_cmd_buf_s cn38xxp2; struct cvmx_zip_cmd_buf_s cn56xx; struct cvmx_zip_cmd_buf_s cn56xxp1; struct cvmx_zip_cmd_buf_s cn58xx; struct cvmx_zip_cmd_buf_s cn58xxp1; } cvmx_zip_cmd_buf_t; /** * cvmx_zip_cmd_ctl */ typedef union { uint64_t u64; struct cvmx_zip_cmd_ctl_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_2_63 : 62; uint64_t forceclk : 1; /**< Force zip_ctl__clock_on_b == 1 when set */ uint64_t reset : 1; /**< Reset oneshot pulse for zip core */ #else uint64_t reset : 1; uint64_t forceclk : 1; uint64_t reserved_2_63 : 62; #endif } s; struct cvmx_zip_cmd_ctl_s cn31xx; struct cvmx_zip_cmd_ctl_s cn38xx; struct cvmx_zip_cmd_ctl_s cn38xxp2; struct cvmx_zip_cmd_ctl_s cn56xx; struct cvmx_zip_cmd_ctl_s cn56xxp1; struct cvmx_zip_cmd_ctl_s cn58xx; struct cvmx_zip_cmd_ctl_s cn58xxp1; } cvmx_zip_cmd_ctl_t; /** * cvmx_zip_constants * * Notes: * Note that this CSR is present only in chip revisions beginning with pass2. * */ typedef union { uint64_t u64; struct cvmx_zip_constants_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_48_63 : 16; uint64_t depth : 16; /**< Maximum search depth for compression */ uint64_t onfsize : 12; /**< Output near full threshhold in bytes */ uint64_t ctxsize : 12; /**< Context size in bytes */ uint64_t reserved_1_7 : 7; uint64_t disabled : 1; /**< 1=zip unit isdisabled, 0=zip unit not disabled */ #else uint64_t disabled : 1; uint64_t reserved_1_7 : 7; uint64_t ctxsize : 12; uint64_t onfsize : 12; uint64_t depth : 16; uint64_t reserved_48_63 : 16; #endif } s; struct cvmx_zip_constants_s cn31xx; struct cvmx_zip_constants_s cn38xx; struct cvmx_zip_constants_s cn38xxp2; struct cvmx_zip_constants_s cn56xx; struct cvmx_zip_constants_s cn56xxp1; struct cvmx_zip_constants_s cn58xx; struct cvmx_zip_constants_s cn58xxp1; } cvmx_zip_constants_t; /** * cvmx_zip_debug0 * * Notes: * Note that this CSR is present only in chip revisions beginning with pass2. * */ typedef union { uint64_t u64; struct cvmx_zip_debug0_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_14_63 : 50; uint64_t asserts : 14; /**< FIFO assertion checks */ #else uint64_t asserts : 14; uint64_t reserved_14_63 : 50; #endif } s; struct cvmx_zip_debug0_s cn31xx; struct cvmx_zip_debug0_s cn38xx; struct cvmx_zip_debug0_s cn38xxp2; struct cvmx_zip_debug0_s cn56xx; struct cvmx_zip_debug0_s cn56xxp1; struct cvmx_zip_debug0_s cn58xx; struct cvmx_zip_debug0_s cn58xxp1; } cvmx_zip_debug0_t; /** * cvmx_zip_error * * Notes: * Note that this CSR is present only in chip revisions beginning with pass2. * */ typedef union { uint64_t u64; struct cvmx_zip_error_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t doorbell : 1; /**< A doorbell count has overflowed */ #else uint64_t doorbell : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_zip_error_s cn31xx; struct cvmx_zip_error_s cn38xx; struct cvmx_zip_error_s cn38xxp2; struct cvmx_zip_error_s cn56xx; struct cvmx_zip_error_s cn56xxp1; struct cvmx_zip_error_s cn58xx; struct cvmx_zip_error_s cn58xxp1; } cvmx_zip_error_t; /** * cvmx_zip_int_mask * * Notes: * Note that this CSR is present only in chip revisions beginning with pass2. * When a mask bit is set, the corresponding interrupt is enabled. */ typedef union { uint64_t u64; struct cvmx_zip_int_mask_s { #if __BYTE_ORDER == __BIG_ENDIAN uint64_t reserved_1_63 : 63; uint64_t doorbell : 1; /**< Bit mask corresponding to PKO_REG_ERROR[1] above */ #else uint64_t doorbell : 1; uint64_t reserved_1_63 : 63; #endif } s; struct cvmx_zip_int_mask_s cn31xx; struct cvmx_zip_int_mask_s cn38xx; struct cvmx_zip_int_mask_s cn38xxp2; struct cvmx_zip_int_mask_s cn56xx; struct cvmx_zip_int_mask_s cn56xxp1; struct cvmx_zip_int_mask_s cn58xx; struct cvmx_zip_int_mask_s cn58xxp1; } cvmx_zip_int_mask_t; #endif /* __CVMX_CSR_TYPEDEFS_H__ */