1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_SMIX_TYPEDEFS_H__
53 #define __CVMX_SMIX_TYPEDEFS_H__
55 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
56 static inline uint64_t CVMX_SMIX_CLK(unsigned long offset)
59 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
60 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
61 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
62 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
63 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
64 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
65 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
66 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
67 cvmx_warn("CVMX_SMIX_CLK(%lu) is invalid on this chip\n", offset);
68 return CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 1) * 256;
71 #define CVMX_SMIX_CLK(offset) (CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 1) * 256)
73 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
74 static inline uint64_t CVMX_SMIX_CMD(unsigned long offset)
77 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
78 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
79 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
80 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
81 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
82 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
83 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
84 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
85 cvmx_warn("CVMX_SMIX_CMD(%lu) is invalid on this chip\n", offset);
86 return CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 1) * 256;
89 #define CVMX_SMIX_CMD(offset) (CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 1) * 256)
91 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
92 static inline uint64_t CVMX_SMIX_EN(unsigned long offset)
95 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
96 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
97 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
98 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
99 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
100 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
101 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
102 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
103 cvmx_warn("CVMX_SMIX_EN(%lu) is invalid on this chip\n", offset);
104 return CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 1) * 256;
107 #define CVMX_SMIX_EN(offset) (CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 1) * 256)
109 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
110 static inline uint64_t CVMX_SMIX_RD_DAT(unsigned long offset)
113 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
114 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
115 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
116 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
117 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
118 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
119 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
120 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
121 cvmx_warn("CVMX_SMIX_RD_DAT(%lu) is invalid on this chip\n", offset);
122 return CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 1) * 256;
125 #define CVMX_SMIX_RD_DAT(offset) (CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 1) * 256)
127 #if CVMX_ENABLE_CSR_ADDRESS_CHECKING
128 static inline uint64_t CVMX_SMIX_WR_DAT(unsigned long offset)
131 (OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset == 0))) ||
132 (OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset == 0))) ||
133 (OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset == 0))) ||
134 (OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset == 0))) ||
135 (OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 1))) ||
136 (OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 1))) ||
137 (OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset == 0))) ||
138 (OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1)))))
139 cvmx_warn("CVMX_SMIX_WR_DAT(%lu) is invalid on this chip\n", offset);
140 return CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 1) * 256;
143 #define CVMX_SMIX_WR_DAT(offset) (CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 1) * 256)
149 * SMI_CLK = Clock Control Register
155 struct cvmx_smix_clk_s
157 #if __BYTE_ORDER == __BIG_ENDIAN
158 uint64_t reserved_25_63 : 39;
159 uint64_t mode : 1; /**< IEEE operating mode
160 0=Clause 22 complient
161 1=Clause 45 complient */
162 uint64_t reserved_21_23 : 3;
163 uint64_t sample_hi : 5; /**< When to sample read data (extended bits) */
164 uint64_t sample_mode : 1; /**< Read Data sampling mode
165 According to the 802.3 spec, on reads, the STA
166 transitions MDC and the PHY drives MDIO with
167 some delay relative to that edge. This is edge1.
168 The STA then samples MDIO on the next rising edge
169 of MDC. This is edge2. Octeon can sample the
170 read data relative to either edge.
171 0=[SAMPLE_HI,SAMPLE] specify the sample time
173 1=[SAMPLE_HI,SAMPLE] specify the sample time
175 uint64_t reserved_14_14 : 1;
176 uint64_t clk_idle : 1; /**< Do not toggle MDC on idle cycles */
177 uint64_t preamble : 1; /**< Send PREAMBLE on SMI transacton
178 PREAMBLE must be set 1 when MODE=1 in order
179 for the receiving PHY to correctly frame the
181 uint64_t sample : 4; /**< When to sample read data
182 (number of eclks after the rising edge of mdc)
183 ( [SAMPLE_HI,SAMPLE] > 1 )
184 ( [SAMPLE_HI, SAMPLE] + 3 <= 2*PHASE ) */
185 uint64_t phase : 8; /**< MDC Clock Phase
186 (number of eclks that make up an mdc phase)
191 uint64_t preamble : 1;
192 uint64_t clk_idle : 1;
193 uint64_t reserved_14_14 : 1;
194 uint64_t sample_mode : 1;
195 uint64_t sample_hi : 5;
196 uint64_t reserved_21_23 : 3;
198 uint64_t reserved_25_63 : 39;
201 struct cvmx_smix_clk_cn30xx
203 #if __BYTE_ORDER == __BIG_ENDIAN
204 uint64_t reserved_21_63 : 43;
205 uint64_t sample_hi : 5; /**< When to sample read data (extended bits) */
206 uint64_t sample_mode : 1; /**< Read Data sampling mode
207 According to the 802.3 spec, on reads, the STA
208 transitions MDC and the PHY drives MDIO with
209 some delay relative to that edge. This is edge1.
210 The STA then samples MDIO on the next rising edge
211 of MDC. This is edge2. Octeon can sample the
212 read data relative to either edge.
213 0=[SAMPLE_HI,SAMPLE] specify the sample time
215 1=[SAMPLE_HI,SAMPLE] specify the sample time
217 uint64_t reserved_14_14 : 1;
218 uint64_t clk_idle : 1; /**< Do not toggle MDC on idle cycles */
219 uint64_t preamble : 1; /**< Send PREAMBLE on SMI transacton */
220 uint64_t sample : 4; /**< When to sample read data
221 (number of eclks after the rising edge of mdc)
222 ( [SAMPLE_HI,SAMPLE] > 1 )
223 ( [SAMPLE_HI, SAMPLE] + 3 <= 2*PHASE ) */
224 uint64_t phase : 8; /**< MDC Clock Phase
225 (number of eclks that make up an mdc phase)
230 uint64_t preamble : 1;
231 uint64_t clk_idle : 1;
232 uint64_t reserved_14_14 : 1;
233 uint64_t sample_mode : 1;
234 uint64_t sample_hi : 5;
235 uint64_t reserved_21_63 : 43;
238 struct cvmx_smix_clk_cn30xx cn31xx;
239 struct cvmx_smix_clk_cn30xx cn38xx;
240 struct cvmx_smix_clk_cn30xx cn38xxp2;
241 struct cvmx_smix_clk_s cn50xx;
242 struct cvmx_smix_clk_s cn52xx;
243 struct cvmx_smix_clk_s cn52xxp1;
244 struct cvmx_smix_clk_s cn56xx;
245 struct cvmx_smix_clk_s cn56xxp1;
246 struct cvmx_smix_clk_cn30xx cn58xx;
247 struct cvmx_smix_clk_cn30xx cn58xxp1;
248 struct cvmx_smix_clk_s cn63xx;
249 struct cvmx_smix_clk_s cn63xxp1;
251 typedef union cvmx_smix_clk cvmx_smix_clk_t;
256 * SMI_CMD = Force a Read/Write command to the PHY
260 * Writes to this register will create SMI xactions. Software will poll on (depending on the xaction type).
266 struct cvmx_smix_cmd_s
268 #if __BYTE_ORDER == __BIG_ENDIAN
269 uint64_t reserved_18_63 : 46;
270 uint64_t phy_op : 2; /**< PHY Opcode depending on SMI_CLK[MODE]
271 SMI_CLK[MODE] == 0 (<=1Gbs / Clause 22)
274 SMI_CLK[MODE] == 1 (>1Gbs / Clause 45)
278 10=post-read-increment-address */
279 uint64_t reserved_13_15 : 3;
280 uint64_t phy_adr : 5; /**< PHY Address */
281 uint64_t reserved_5_7 : 3;
282 uint64_t reg_adr : 5; /**< PHY Register Offset */
284 uint64_t reg_adr : 5;
285 uint64_t reserved_5_7 : 3;
286 uint64_t phy_adr : 5;
287 uint64_t reserved_13_15 : 3;
289 uint64_t reserved_18_63 : 46;
292 struct cvmx_smix_cmd_cn30xx
294 #if __BYTE_ORDER == __BIG_ENDIAN
295 uint64_t reserved_17_63 : 47;
296 uint64_t phy_op : 1; /**< PHY Opcode
299 uint64_t reserved_13_15 : 3;
300 uint64_t phy_adr : 5; /**< PHY Address */
301 uint64_t reserved_5_7 : 3;
302 uint64_t reg_adr : 5; /**< PHY Register Offset */
304 uint64_t reg_adr : 5;
305 uint64_t reserved_5_7 : 3;
306 uint64_t phy_adr : 5;
307 uint64_t reserved_13_15 : 3;
309 uint64_t reserved_17_63 : 47;
312 struct cvmx_smix_cmd_cn30xx cn31xx;
313 struct cvmx_smix_cmd_cn30xx cn38xx;
314 struct cvmx_smix_cmd_cn30xx cn38xxp2;
315 struct cvmx_smix_cmd_s cn50xx;
316 struct cvmx_smix_cmd_s cn52xx;
317 struct cvmx_smix_cmd_s cn52xxp1;
318 struct cvmx_smix_cmd_s cn56xx;
319 struct cvmx_smix_cmd_s cn56xxp1;
320 struct cvmx_smix_cmd_cn30xx cn58xx;
321 struct cvmx_smix_cmd_cn30xx cn58xxp1;
322 struct cvmx_smix_cmd_s cn63xx;
323 struct cvmx_smix_cmd_s cn63xxp1;
325 typedef union cvmx_smix_cmd cvmx_smix_cmd_t;
330 * SMI_EN = Enable the SMI interface
336 struct cvmx_smix_en_s
338 #if __BYTE_ORDER == __BIG_ENDIAN
339 uint64_t reserved_1_63 : 63;
340 uint64_t en : 1; /**< Interface enable
341 0=SMI Interface is down / no transactions, no MDC
342 1=SMI Interface is up */
345 uint64_t reserved_1_63 : 63;
348 struct cvmx_smix_en_s cn30xx;
349 struct cvmx_smix_en_s cn31xx;
350 struct cvmx_smix_en_s cn38xx;
351 struct cvmx_smix_en_s cn38xxp2;
352 struct cvmx_smix_en_s cn50xx;
353 struct cvmx_smix_en_s cn52xx;
354 struct cvmx_smix_en_s cn52xxp1;
355 struct cvmx_smix_en_s cn56xx;
356 struct cvmx_smix_en_s cn56xxp1;
357 struct cvmx_smix_en_s cn58xx;
358 struct cvmx_smix_en_s cn58xxp1;
359 struct cvmx_smix_en_s cn63xx;
360 struct cvmx_smix_en_s cn63xxp1;
362 typedef union cvmx_smix_en cvmx_smix_en_t;
367 * SMI_RD_DAT = SMI Read Data
371 * VAL will assert when the read xaction completes. A read to this register
372 * will clear VAL. PENDING indicates that an SMI RD transaction is in flight.
374 union cvmx_smix_rd_dat
377 struct cvmx_smix_rd_dat_s
379 #if __BYTE_ORDER == __BIG_ENDIAN
380 uint64_t reserved_18_63 : 46;
381 uint64_t pending : 1; /**< Read Xaction Pending */
382 uint64_t val : 1; /**< Read Data Valid */
383 uint64_t dat : 16; /**< Read Data */
387 uint64_t pending : 1;
388 uint64_t reserved_18_63 : 46;
391 struct cvmx_smix_rd_dat_s cn30xx;
392 struct cvmx_smix_rd_dat_s cn31xx;
393 struct cvmx_smix_rd_dat_s cn38xx;
394 struct cvmx_smix_rd_dat_s cn38xxp2;
395 struct cvmx_smix_rd_dat_s cn50xx;
396 struct cvmx_smix_rd_dat_s cn52xx;
397 struct cvmx_smix_rd_dat_s cn52xxp1;
398 struct cvmx_smix_rd_dat_s cn56xx;
399 struct cvmx_smix_rd_dat_s cn56xxp1;
400 struct cvmx_smix_rd_dat_s cn58xx;
401 struct cvmx_smix_rd_dat_s cn58xxp1;
402 struct cvmx_smix_rd_dat_s cn63xx;
403 struct cvmx_smix_rd_dat_s cn63xxp1;
405 typedef union cvmx_smix_rd_dat cvmx_smix_rd_dat_t;
410 * SMI_WR_DAT = SMI Write Data
414 * VAL will assert when the write xaction completes. A read to this register
415 * will clear VAL. PENDING indicates that an SMI WR transaction is in flight.
417 union cvmx_smix_wr_dat
420 struct cvmx_smix_wr_dat_s
422 #if __BYTE_ORDER == __BIG_ENDIAN
423 uint64_t reserved_18_63 : 46;
424 uint64_t pending : 1; /**< Write Xaction Pending */
425 uint64_t val : 1; /**< Write Data Valid */
426 uint64_t dat : 16; /**< Write Data */
430 uint64_t pending : 1;
431 uint64_t reserved_18_63 : 46;
434 struct cvmx_smix_wr_dat_s cn30xx;
435 struct cvmx_smix_wr_dat_s cn31xx;
436 struct cvmx_smix_wr_dat_s cn38xx;
437 struct cvmx_smix_wr_dat_s cn38xxp2;
438 struct cvmx_smix_wr_dat_s cn50xx;
439 struct cvmx_smix_wr_dat_s cn52xx;
440 struct cvmx_smix_wr_dat_s cn52xxp1;
441 struct cvmx_smix_wr_dat_s cn56xx;
442 struct cvmx_smix_wr_dat_s cn56xxp1;
443 struct cvmx_smix_wr_dat_s cn58xx;
444 struct cvmx_smix_wr_dat_s cn58xxp1;
445 struct cvmx_smix_wr_dat_s cn63xx;
446 struct cvmx_smix_wr_dat_s cn63xxp1;
448 typedef union cvmx_smix_wr_dat cvmx_smix_wr_dat_t;