1 /***********************license start***************
2 * Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Configuration and status register (CSR) type definitions for
47 * This file is auto generated. Do not edit.
52 #ifndef __CVMX_SMIX_DEFS_H__
53 #define __CVMX_SMIX_DEFS_H__
55 static inline uint64_t CVMX_SMIX_CLK(unsigned long offset)
57 switch(cvmx_get_octeon_family()) {
58 case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
59 case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
60 case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
61 case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
62 case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
64 return CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 0) * 256;
66 case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
67 case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
68 case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
69 case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
70 case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
71 case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
73 return CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 1) * 256;
75 case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
77 return CVMX_ADD_IO_SEG(0x0001180000003818ull) + ((offset) & 3) * 128;
80 cvmx_warn("CVMX_SMIX_CLK (offset = %lu) not supported on this chip\n", offset);
81 return CVMX_ADD_IO_SEG(0x0001180000001818ull) + ((offset) & 1) * 256;
83 static inline uint64_t CVMX_SMIX_CMD(unsigned long offset)
85 switch(cvmx_get_octeon_family()) {
86 case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
87 case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
88 case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
89 case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
90 case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
92 return CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 0) * 256;
94 case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
95 case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
96 case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
97 case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
98 case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
99 case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
101 return CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 1) * 256;
103 case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
105 return CVMX_ADD_IO_SEG(0x0001180000003800ull) + ((offset) & 3) * 128;
108 cvmx_warn("CVMX_SMIX_CMD (offset = %lu) not supported on this chip\n", offset);
109 return CVMX_ADD_IO_SEG(0x0001180000001800ull) + ((offset) & 1) * 256;
111 static inline uint64_t CVMX_SMIX_EN(unsigned long offset)
113 switch(cvmx_get_octeon_family()) {
114 case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
115 case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
116 case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
117 case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
118 case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
120 return CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 0) * 256;
122 case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
123 case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
124 case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
125 case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
126 case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
127 case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
129 return CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 1) * 256;
131 case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
133 return CVMX_ADD_IO_SEG(0x0001180000003820ull) + ((offset) & 3) * 128;
136 cvmx_warn("CVMX_SMIX_EN (offset = %lu) not supported on this chip\n", offset);
137 return CVMX_ADD_IO_SEG(0x0001180000001820ull) + ((offset) & 1) * 256;
139 static inline uint64_t CVMX_SMIX_RD_DAT(unsigned long offset)
141 switch(cvmx_get_octeon_family()) {
142 case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
143 case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
144 case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
145 case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
146 case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
148 return CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 0) * 256;
150 case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
151 case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
152 case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
153 case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
154 case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
155 case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
157 return CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 1) * 256;
159 case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
161 return CVMX_ADD_IO_SEG(0x0001180000003810ull) + ((offset) & 3) * 128;
164 cvmx_warn("CVMX_SMIX_RD_DAT (offset = %lu) not supported on this chip\n", offset);
165 return CVMX_ADD_IO_SEG(0x0001180000001810ull) + ((offset) & 1) * 256;
167 static inline uint64_t CVMX_SMIX_WR_DAT(unsigned long offset)
169 switch(cvmx_get_octeon_family()) {
170 case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
171 case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
172 case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
173 case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
174 case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
176 return CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 0) * 256;
178 case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
179 case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
180 case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
181 case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
182 case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
183 case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
185 return CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 1) * 256;
187 case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
189 return CVMX_ADD_IO_SEG(0x0001180000003808ull) + ((offset) & 3) * 128;
192 cvmx_warn("CVMX_SMIX_WR_DAT (offset = %lu) not supported on this chip\n", offset);
193 return CVMX_ADD_IO_SEG(0x0001180000001808ull) + ((offset) & 1) * 256;
199 * SMI_CLK = Clock Control Register
202 union cvmx_smix_clk {
204 struct cvmx_smix_clk_s {
205 #ifdef __BIG_ENDIAN_BITFIELD
206 uint64_t reserved_25_63 : 39;
207 uint64_t mode : 1; /**< IEEE operating mode
208 0=Clause 22 complient
209 1=Clause 45 complient */
210 uint64_t reserved_21_23 : 3;
211 uint64_t sample_hi : 5; /**< When to sample read data (extended bits) */
212 uint64_t sample_mode : 1; /**< Read Data sampling mode
213 According to the 802.3 spec, on reads, the STA
214 transitions MDC and the PHY drives MDIO with
215 some delay relative to that edge. This is edge1.
216 The STA then samples MDIO on the next rising edge
217 of MDC. This is edge2. Octeon can sample the
218 read data relative to either edge.
219 0=[SAMPLE_HI,SAMPLE] specify the sample time
221 1=[SAMPLE_HI,SAMPLE] specify the sample time
223 uint64_t reserved_14_14 : 1;
224 uint64_t clk_idle : 1; /**< Do not toggle MDC on idle cycles */
225 uint64_t preamble : 1; /**< Send PREAMBLE on SMI transacton
226 PREAMBLE must be set 1 when MODE=1 in order
227 for the receiving PHY to correctly frame the
229 uint64_t sample : 4; /**< When to sample read data
230 (number of eclks after the rising edge of mdc)
231 ( [SAMPLE_HI,SAMPLE] > 1 )
232 ( [SAMPLE_HI, SAMPLE] + 3 <= 2*PHASE ) */
233 uint64_t phase : 8; /**< MDC Clock Phase
234 (number of eclks that make up an mdc phase)
239 uint64_t preamble : 1;
240 uint64_t clk_idle : 1;
241 uint64_t reserved_14_14 : 1;
242 uint64_t sample_mode : 1;
243 uint64_t sample_hi : 5;
244 uint64_t reserved_21_23 : 3;
246 uint64_t reserved_25_63 : 39;
249 struct cvmx_smix_clk_cn30xx {
250 #ifdef __BIG_ENDIAN_BITFIELD
251 uint64_t reserved_21_63 : 43;
252 uint64_t sample_hi : 5; /**< When to sample read data (extended bits) */
253 uint64_t sample_mode : 1; /**< Read Data sampling mode
254 According to the 802.3 spec, on reads, the STA
255 transitions MDC and the PHY drives MDIO with
256 some delay relative to that edge. This is edge1.
257 The STA then samples MDIO on the next rising edge
258 of MDC. This is edge2. Octeon can sample the
259 read data relative to either edge.
260 0=[SAMPLE_HI,SAMPLE] specify the sample time
262 1=[SAMPLE_HI,SAMPLE] specify the sample time
264 uint64_t reserved_14_14 : 1;
265 uint64_t clk_idle : 1; /**< Do not toggle MDC on idle cycles */
266 uint64_t preamble : 1; /**< Send PREAMBLE on SMI transacton */
267 uint64_t sample : 4; /**< When to sample read data
268 (number of eclks after the rising edge of mdc)
269 ( [SAMPLE_HI,SAMPLE] > 1 )
270 ( [SAMPLE_HI, SAMPLE] + 3 <= 2*PHASE ) */
271 uint64_t phase : 8; /**< MDC Clock Phase
272 (number of eclks that make up an mdc phase)
277 uint64_t preamble : 1;
278 uint64_t clk_idle : 1;
279 uint64_t reserved_14_14 : 1;
280 uint64_t sample_mode : 1;
281 uint64_t sample_hi : 5;
282 uint64_t reserved_21_63 : 43;
285 struct cvmx_smix_clk_cn30xx cn31xx;
286 struct cvmx_smix_clk_cn30xx cn38xx;
287 struct cvmx_smix_clk_cn30xx cn38xxp2;
288 struct cvmx_smix_clk_s cn50xx;
289 struct cvmx_smix_clk_s cn52xx;
290 struct cvmx_smix_clk_s cn52xxp1;
291 struct cvmx_smix_clk_s cn56xx;
292 struct cvmx_smix_clk_s cn56xxp1;
293 struct cvmx_smix_clk_cn30xx cn58xx;
294 struct cvmx_smix_clk_cn30xx cn58xxp1;
295 struct cvmx_smix_clk_s cn61xx;
296 struct cvmx_smix_clk_s cn63xx;
297 struct cvmx_smix_clk_s cn63xxp1;
298 struct cvmx_smix_clk_s cn66xx;
299 struct cvmx_smix_clk_s cn68xx;
300 struct cvmx_smix_clk_s cn68xxp1;
301 struct cvmx_smix_clk_s cnf71xx;
303 typedef union cvmx_smix_clk cvmx_smix_clk_t;
308 * SMI_CMD = Force a Read/Write command to the PHY
312 * Writes to this register will create SMI xactions. Software will poll on (depending on the xaction type).
315 union cvmx_smix_cmd {
317 struct cvmx_smix_cmd_s {
318 #ifdef __BIG_ENDIAN_BITFIELD
319 uint64_t reserved_18_63 : 46;
320 uint64_t phy_op : 2; /**< PHY Opcode depending on SMI_CLK[MODE]
321 SMI_CLK[MODE] == 0 (<=1Gbs / Clause 22)
324 SMI_CLK[MODE] == 1 (>1Gbs / Clause 45)
328 10=post-read-increment-address */
329 uint64_t reserved_13_15 : 3;
330 uint64_t phy_adr : 5; /**< PHY Address */
331 uint64_t reserved_5_7 : 3;
332 uint64_t reg_adr : 5; /**< PHY Register Offset */
334 uint64_t reg_adr : 5;
335 uint64_t reserved_5_7 : 3;
336 uint64_t phy_adr : 5;
337 uint64_t reserved_13_15 : 3;
339 uint64_t reserved_18_63 : 46;
342 struct cvmx_smix_cmd_cn30xx {
343 #ifdef __BIG_ENDIAN_BITFIELD
344 uint64_t reserved_17_63 : 47;
345 uint64_t phy_op : 1; /**< PHY Opcode
348 uint64_t reserved_13_15 : 3;
349 uint64_t phy_adr : 5; /**< PHY Address */
350 uint64_t reserved_5_7 : 3;
351 uint64_t reg_adr : 5; /**< PHY Register Offset */
353 uint64_t reg_adr : 5;
354 uint64_t reserved_5_7 : 3;
355 uint64_t phy_adr : 5;
356 uint64_t reserved_13_15 : 3;
358 uint64_t reserved_17_63 : 47;
361 struct cvmx_smix_cmd_cn30xx cn31xx;
362 struct cvmx_smix_cmd_cn30xx cn38xx;
363 struct cvmx_smix_cmd_cn30xx cn38xxp2;
364 struct cvmx_smix_cmd_s cn50xx;
365 struct cvmx_smix_cmd_s cn52xx;
366 struct cvmx_smix_cmd_s cn52xxp1;
367 struct cvmx_smix_cmd_s cn56xx;
368 struct cvmx_smix_cmd_s cn56xxp1;
369 struct cvmx_smix_cmd_cn30xx cn58xx;
370 struct cvmx_smix_cmd_cn30xx cn58xxp1;
371 struct cvmx_smix_cmd_s cn61xx;
372 struct cvmx_smix_cmd_s cn63xx;
373 struct cvmx_smix_cmd_s cn63xxp1;
374 struct cvmx_smix_cmd_s cn66xx;
375 struct cvmx_smix_cmd_s cn68xx;
376 struct cvmx_smix_cmd_s cn68xxp1;
377 struct cvmx_smix_cmd_s cnf71xx;
379 typedef union cvmx_smix_cmd cvmx_smix_cmd_t;
384 * SMI_EN = Enable the SMI interface
389 struct cvmx_smix_en_s {
390 #ifdef __BIG_ENDIAN_BITFIELD
391 uint64_t reserved_1_63 : 63;
392 uint64_t en : 1; /**< Interface enable
393 0=SMI Interface is down / no transactions, no MDC
394 1=SMI Interface is up */
397 uint64_t reserved_1_63 : 63;
400 struct cvmx_smix_en_s cn30xx;
401 struct cvmx_smix_en_s cn31xx;
402 struct cvmx_smix_en_s cn38xx;
403 struct cvmx_smix_en_s cn38xxp2;
404 struct cvmx_smix_en_s cn50xx;
405 struct cvmx_smix_en_s cn52xx;
406 struct cvmx_smix_en_s cn52xxp1;
407 struct cvmx_smix_en_s cn56xx;
408 struct cvmx_smix_en_s cn56xxp1;
409 struct cvmx_smix_en_s cn58xx;
410 struct cvmx_smix_en_s cn58xxp1;
411 struct cvmx_smix_en_s cn61xx;
412 struct cvmx_smix_en_s cn63xx;
413 struct cvmx_smix_en_s cn63xxp1;
414 struct cvmx_smix_en_s cn66xx;
415 struct cvmx_smix_en_s cn68xx;
416 struct cvmx_smix_en_s cn68xxp1;
417 struct cvmx_smix_en_s cnf71xx;
419 typedef union cvmx_smix_en cvmx_smix_en_t;
424 * SMI_RD_DAT = SMI Read Data
428 * VAL will assert when the read xaction completes. A read to this register
429 * will clear VAL. PENDING indicates that an SMI RD transaction is in flight.
431 union cvmx_smix_rd_dat {
433 struct cvmx_smix_rd_dat_s {
434 #ifdef __BIG_ENDIAN_BITFIELD
435 uint64_t reserved_18_63 : 46;
436 uint64_t pending : 1; /**< Read Xaction Pending */
437 uint64_t val : 1; /**< Read Data Valid */
438 uint64_t dat : 16; /**< Read Data */
442 uint64_t pending : 1;
443 uint64_t reserved_18_63 : 46;
446 struct cvmx_smix_rd_dat_s cn30xx;
447 struct cvmx_smix_rd_dat_s cn31xx;
448 struct cvmx_smix_rd_dat_s cn38xx;
449 struct cvmx_smix_rd_dat_s cn38xxp2;
450 struct cvmx_smix_rd_dat_s cn50xx;
451 struct cvmx_smix_rd_dat_s cn52xx;
452 struct cvmx_smix_rd_dat_s cn52xxp1;
453 struct cvmx_smix_rd_dat_s cn56xx;
454 struct cvmx_smix_rd_dat_s cn56xxp1;
455 struct cvmx_smix_rd_dat_s cn58xx;
456 struct cvmx_smix_rd_dat_s cn58xxp1;
457 struct cvmx_smix_rd_dat_s cn61xx;
458 struct cvmx_smix_rd_dat_s cn63xx;
459 struct cvmx_smix_rd_dat_s cn63xxp1;
460 struct cvmx_smix_rd_dat_s cn66xx;
461 struct cvmx_smix_rd_dat_s cn68xx;
462 struct cvmx_smix_rd_dat_s cn68xxp1;
463 struct cvmx_smix_rd_dat_s cnf71xx;
465 typedef union cvmx_smix_rd_dat cvmx_smix_rd_dat_t;
470 * SMI_WR_DAT = SMI Write Data
474 * VAL will assert when the write xaction completes. A read to this register
475 * will clear VAL. PENDING indicates that an SMI WR transaction is in flight.
477 union cvmx_smix_wr_dat {
479 struct cvmx_smix_wr_dat_s {
480 #ifdef __BIG_ENDIAN_BITFIELD
481 uint64_t reserved_18_63 : 46;
482 uint64_t pending : 1; /**< Write Xaction Pending */
483 uint64_t val : 1; /**< Write Data Valid */
484 uint64_t dat : 16; /**< Write Data */
488 uint64_t pending : 1;
489 uint64_t reserved_18_63 : 46;
492 struct cvmx_smix_wr_dat_s cn30xx;
493 struct cvmx_smix_wr_dat_s cn31xx;
494 struct cvmx_smix_wr_dat_s cn38xx;
495 struct cvmx_smix_wr_dat_s cn38xxp2;
496 struct cvmx_smix_wr_dat_s cn50xx;
497 struct cvmx_smix_wr_dat_s cn52xx;
498 struct cvmx_smix_wr_dat_s cn52xxp1;
499 struct cvmx_smix_wr_dat_s cn56xx;
500 struct cvmx_smix_wr_dat_s cn56xxp1;
501 struct cvmx_smix_wr_dat_s cn58xx;
502 struct cvmx_smix_wr_dat_s cn58xxp1;
503 struct cvmx_smix_wr_dat_s cn61xx;
504 struct cvmx_smix_wr_dat_s cn63xx;
505 struct cvmx_smix_wr_dat_s cn63xxp1;
506 struct cvmx_smix_wr_dat_s cn66xx;
507 struct cvmx_smix_wr_dat_s cn68xx;
508 struct cvmx_smix_wr_dat_s cn68xxp1;
509 struct cvmx_smix_wr_dat_s cnf71xx;
511 typedef union cvmx_smix_wr_dat cvmx_smix_wr_dat_t;