1 /***********************license start***************
2 * Copyright (c) 2003-2011 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Interface to the hardware Packet Order / Work unit.
46 * New, starting with SDK 1.7.0, cvmx-pow supports a number of
47 * extended consistency checks. The define
48 * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
49 * internal state checks to find common programming errors. If
50 * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
51 * enabled. For example, cvmx-pow will check for the following
52 * program errors or POW state inconsistency.
53 * - Requesting a POW operation with an active tag switch in
55 * - Waiting for a tag switch to complete for an excessively
56 * long period. This is normally a sign of an error in locking
58 * - Illegal tag switches from NULL_NULL.
59 * - Illegal tag switches from NULL.
60 * - Illegal deschedule request.
61 * - WQE pointer not matching the one attached to the core by
64 * <hr>$Revision: 70030 $<hr>
67 #ifndef __CVMX_POW_H__
68 #define __CVMX_POW_H__
70 #include "cvmx-scratch.h"
73 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
74 #include <asm/octeon/cvmx-sso-defs.h>
76 #include "cvmx-warn.h"
83 #if defined(__FreeBSD__) && defined(_KERNEL)
85 * For the FreeBSD kernel, have POW consistency checks depend on
86 * the setting of INVARIANTS.
88 #ifndef CVMX_ENABLE_POW_CHECKS
90 #define CVMX_ENABLE_POW_CHECKS 1
92 #define CVMX_ENABLE_POW_CHECKS 0
96 /* Default to having all POW constancy checks turned on */
97 #ifndef CVMX_ENABLE_POW_CHECKS
98 #define CVMX_ENABLE_POW_CHECKS 1
103 * Wait flag values for pow functions.
108 CVMX_POW_NO_WAIT = 0,
112 * POW tag operations. These are used in the data stored to the POW.
116 CVMX_POW_TAG_OP_SWTAG = 0L, /**< switch the tag (only) for this PP
117 - the previous tag should be non-NULL in this case
118 - tag switch response required
119 - fields used: op, type, tag */
120 CVMX_POW_TAG_OP_SWTAG_FULL = 1L, /**< switch the tag for this PP, with full information
121 - this should be used when the previous tag is NULL
122 - tag switch response required
123 - fields used: address, op, grp, type, tag */
124 CVMX_POW_TAG_OP_SWTAG_DESCH = 2L, /**< switch the tag (and/or group) for this PP and de-schedule
125 - OK to keep the tag the same and only change the group
126 - fields used: op, no_sched, grp, type, tag */
127 CVMX_POW_TAG_OP_DESCH = 3L, /**< just de-schedule
128 - fields used: op, no_sched */
129 CVMX_POW_TAG_OP_ADDWQ = 4L, /**< create an entirely new work queue entry
130 - fields used: address, op, qos, grp, type, tag */
131 CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,/**< just update the work queue pointer and grp for this PP
132 - fields used: address, op, grp */
133 CVMX_POW_TAG_OP_SET_NSCHED = 6L, /**< set the no_sched bit on the de-schedule list
134 - does nothing if the selected entry is not on the de-schedule list
135 - does nothing if the stored work queue pointer does not match the address field
136 - fields used: address, index, op
137 Before issuing a *_NSCHED operation, SW must guarantee that all
138 prior deschedules and set/clr NSCHED operations are complete and all
139 prior switches are complete. The hardware provides the opsdone bit
140 and swdone bit for SW polling. After issuing a *_NSCHED operation,
141 SW must guarantee that the set/clr NSCHED is complete before
142 any subsequent operations. */
143 CVMX_POW_TAG_OP_CLR_NSCHED = 7L, /**< clears the no_sched bit on the de-schedule list
144 - does nothing if the selected entry is not on the de-schedule list
145 - does nothing if the stored work queue pointer does not match the address field
146 - fields used: address, index, op
147 Before issuing a *_NSCHED operation, SW must guarantee that all
148 prior deschedules and set/clr NSCHED operations are complete and all
149 prior switches are complete. The hardware provides the opsdone bit
150 and swdone bit for SW polling. After issuing a *_NSCHED operation,
151 SW must guarantee that the set/clr NSCHED is complete before
152 any subsequent operations. */
153 CVMX_POW_TAG_OP_NOP = 15L /**< do nothing */
157 * This structure defines the store data on a store to POW
164 #ifdef __BIG_ENDIAN_BITFIELD
165 uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
167 uint64_t index :13; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
168 cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
169 uint64_t unused2 : 2;
170 uint64_t qos : 3; /**< the QOS level for the packet. qos is only used for CVMX_POW_TAG_OP_ADDWQ */
171 uint64_t grp : 4; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
172 cvmx_pow_tag_type_t type : 3; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
173 uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
176 cvmx_pow_tag_type_t type : 3;
179 uint64_t unused2 : 2;
180 cvmx_pow_tag_op_t op : 4;
183 uint64_t no_sched : 1;
187 #ifdef __BIG_ENDIAN_BITFIELD
188 uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
189 cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
190 uint64_t unused1 : 4;
191 uint64_t index :11; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
192 uint64_t unused2 : 1;
193 uint64_t grp : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
194 uint64_t unused3 : 3;
195 cvmx_pow_tag_type_t type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
196 uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
199 cvmx_pow_tag_type_t type : 2;
200 uint64_t unused3 : 3;
202 uint64_t unused2 : 1;
204 uint64_t unused1 : 4;
205 cvmx_pow_tag_op_t op : 4;
206 uint64_t no_sched : 1;
210 #ifdef __BIG_ENDIAN_BITFIELD
211 uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
212 cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
213 uint64_t unused1 : 12;
214 uint64_t qos : 3; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
215 uint64_t unused2 : 1;
216 uint64_t grp : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
217 uint64_t unused3 : 3;
218 cvmx_pow_tag_type_t type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
219 uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
222 cvmx_pow_tag_type_t type : 2;
223 uint64_t unused3 : 3;
225 uint64_t unused2 : 1;
227 uint64_t unused1 : 12;
228 cvmx_pow_tag_op_t op : 4;
229 uint64_t no_sched : 1;
233 #ifdef __BIG_ENDIAN_BITFIELD
234 uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
235 cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
236 uint64_t unused1 : 16;
237 uint64_t grp : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
238 uint64_t unused3 : 3;
239 cvmx_pow_tag_type_t type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
240 uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
243 cvmx_pow_tag_type_t type : 2;
244 uint64_t unused3 : 3;
246 uint64_t unused1 : 16;
247 cvmx_pow_tag_op_t op : 4;
248 uint64_t no_sched : 1;
252 } cvmx_pow_tag_req_t;
259 }cvmx_pow_tag_info_t;
262 * This structure describes the address to load stuff from POW
269 * Address for new work request loads (did<2:0> == 0)
273 #ifdef __BIG_ENDIAN_BITFIELD
274 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
275 uint64_t reserved_49_61 : 13; /**< Must be zero */
276 uint64_t is_io : 1; /**< Must be one */
277 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 0 in this case */
278 uint64_t reserved_4_39 : 36; /**< Must be zero */
279 uint64_t wait : 1; /**< If set, don't return load response until work is available */
280 uint64_t reserved_0_2 : 3; /**< Must be zero */
282 uint64_t reserved_0_2 : 3;
284 uint64_t reserved_4_39 : 36;
287 uint64_t reserved_49_61 : 13;
288 uint64_t mem_region : 2;
293 * Address for loads to get POW internal status
297 #ifdef __BIG_ENDIAN_BITFIELD
298 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
299 uint64_t reserved_49_61 : 13; /**< Must be zero */
300 uint64_t is_io : 1; /**< Must be one */
301 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 1 in this case */
302 uint64_t reserved_10_39 : 30; /**< Must be zero */
303 uint64_t coreid : 4; /**< The core id to get status for */
304 uint64_t get_rev : 1; /**< If set and get_cur is set, return reverse tag-list pointer rather than forward tag-list pointer */
305 uint64_t get_cur : 1; /**< If set, return current status rather than pending status */
306 uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type */
307 uint64_t reserved_0_2 : 3; /**< Must be zero */
309 uint64_t reserved_0_2 : 3;
310 uint64_t get_wqp : 1;
311 uint64_t get_cur : 1;
312 uint64_t get_rev : 1;
314 uint64_t reserved_10_39 : 30;
317 uint64_t reserved_49_61 : 13;
318 uint64_t mem_region : 2;
323 * Address for loads to get 68XX SS0 internal status
327 #ifdef __BIG_ENDIAN_BITFIELD
328 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
329 uint64_t reserved_49_61 : 13; /**< Must be zero */
330 uint64_t is_io : 1; /**< Must be one */
331 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 1 in this case */
332 uint64_t reserved_14_39 : 26; /**< Must be zero */
333 uint64_t coreid : 5; /**< The core id to get status for */
334 uint64_t reserved_6_8 : 3;
335 uint64_t opcode : 3; /**< Status operation */
336 uint64_t reserved_0_2 : 3; /**< Must be zero */
338 uint64_t reserved_0_2 : 3;
340 uint64_t reserved_6_8 : 3;
342 uint64_t reserved_14_39 : 26;
345 uint64_t reserved_49_61 : 13;
346 uint64_t mem_region : 2;
351 * Address for memory loads to get POW internal state
355 #ifdef __BIG_ENDIAN_BITFIELD
356 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
357 uint64_t reserved_49_61 : 13; /**< Must be zero */
358 uint64_t is_io : 1; /**< Must be one */
359 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 2 in this case */
360 uint64_t reserved_16_39 : 24; /**< Must be zero */
361 uint64_t index : 11; /**< POW memory index */
362 uint64_t get_des : 1; /**< If set, return deschedule information rather than the standard
363 response for work-queue index (invalid if the work-queue entry is not on the
365 uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type (no effect when get_des set). */
366 uint64_t reserved_0_2 : 3; /**< Must be zero */
368 uint64_t reserved_0_2 : 3;
369 uint64_t get_wqp : 1;
370 uint64_t get_des : 1;
372 uint64_t reserved_16_39 : 24;
375 uint64_t reserved_49_61 : 13;
376 uint64_t mem_region : 2;
381 * Address for memory loads to get SSO internal state
385 #ifdef __BIG_ENDIAN_BITFIELD
386 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
387 uint64_t reserved_49_61 : 13; /**< Must be zero */
388 uint64_t is_io : 1; /**< Must be one */
389 uint64_t did : 8; /**< the ID of SSO - did<2:0> == 2 in this case */
390 uint64_t reserved_20_39 : 20; /**< Must be zero */
391 uint64_t index : 11; /**< SSO memory index */
392 uint64_t reserved_6_8 : 3; /**< Must be zero */
393 uint64_t opcode : 3; /**< Read TAG/WQ pointer/pending tag/next potr */
394 uint64_t reserved_0_2 : 3; /**< Must be zero */
396 uint64_t reserved_0_2 : 3;
398 uint64_t reserved_3_5 : 3;
400 uint64_t reserved_20_39 : 20;
403 uint64_t reserved_49_61 : 13;
404 uint64_t mem_region : 2;
409 * Address for index/pointer loads
413 #ifdef __BIG_ENDIAN_BITFIELD
414 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
415 uint64_t reserved_49_61 : 13; /**< Must be zero */
416 uint64_t is_io : 1; /**< Must be one */
417 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 3 in this case */
418 uint64_t reserved_9_39 : 31; /**< Must be zero */
419 uint64_t qosgrp : 4; /**< when {get_rmt ==0 AND get_des_get_tail == 0}, this field selects one of
420 eight POW internal-input queues (0-7), one per QOS level; values 8-15 are
421 illegal in this case;
422 when {get_rmt ==0 AND get_des_get_tail == 1}, this field selects one of
423 16 deschedule lists (per group);
424 when get_rmt ==1, this field selects one of 16 memory-input queue lists.
425 The two memory-input queue lists associated with each QOS level are:
426 - qosgrp = 0, qosgrp = 8: QOS0
427 - qosgrp = 1, qosgrp = 9: QOS1
428 - qosgrp = 2, qosgrp = 10: QOS2
429 - qosgrp = 3, qosgrp = 11: QOS3
430 - qosgrp = 4, qosgrp = 12: QOS4
431 - qosgrp = 5, qosgrp = 13: QOS5
432 - qosgrp = 6, qosgrp = 14: QOS6
433 - qosgrp = 7, qosgrp = 15: QOS7 */
434 uint64_t get_des_get_tail: 1; /**< If set and get_rmt is clear, return deschedule list indexes
435 rather than indexes for the specified qos level; if set and get_rmt is set, return
436 the tail pointer rather than the head pointer for the specified qos level. */
437 uint64_t get_rmt : 1; /**< If set, return remote pointers rather than the local indexes for the specified qos level. */
438 uint64_t reserved_0_2 : 3; /**< Must be zero */
440 uint64_t reserved_0_2 : 3;
441 uint64_t get_rmt : 1;
442 uint64_t get_des_get_tail: 1;
444 uint64_t reserved_9_39 : 31;
447 uint64_t reserved_49_61 : 13;
448 uint64_t mem_region : 2;
453 * Address for a Index/Pointer loads to get SSO internal state
457 #ifdef __BIG_ENDIAN_BITFIELD
458 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
459 uint64_t reserved_49_61 : 13; /**< Must be zero */
460 uint64_t is_io : 1; /**< Must be one */
461 uint64_t did : 8; /**< the ID of SSO - did<2:0> == 2 in this case */
462 uint64_t reserved_15_39 : 25; /**< Must be zero */
463 uint64_t qos_grp : 6; /**< When opcode = IPL_IQ, this field specifies IQ (or QOS).
464 When opcode = IPL_DESCHED, this field specifies the group.
465 This field is reserved for all other opcodes. */
466 uint64_t reserved_6_8 : 3; /**< Must be zero */
467 uint64_t opcode : 3; /**< Read TAG/WQ pointer/pending tag/next potr */
468 uint64_t reserved_0_2 : 3; /**< Must be zero */
470 uint64_t reserved_0_2 : 3;
472 uint64_t reserved_3_5 : 3;
473 uint64_t qos_grp : 6;
474 uint64_t reserved_15_39 : 25;
477 uint64_t reserved_49_61 : 13;
478 uint64_t mem_region : 2;
483 * address for NULL_RD request (did<2:0> == 4)
484 * when this is read, HW attempts to change the state to NULL if it is NULL_NULL
485 * (the hardware cannot switch from NULL_NULL to NULL if a POW entry is not available -
486 * software may need to recover by finishing another piece of work before a POW
487 * entry can ever become available.)
491 #ifdef __BIG_ENDIAN_BITFIELD
492 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
493 uint64_t reserved_49_61 : 13; /**< Must be zero */
494 uint64_t is_io : 1; /**< Must be one */
495 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 4 in this case */
496 uint64_t reserved_0_39 : 40; /**< Must be zero */
498 uint64_t reserved_0_39 : 40;
501 uint64_t reserved_49_61 : 13;
502 uint64_t mem_region : 2;
505 } cvmx_pow_load_addr_t;
508 * This structure defines the response to a load/SENDSINGLE to POW (except CSR reads)
515 * Response to new work request loads
519 #ifdef __BIG_ENDIAN_BITFIELD
520 uint64_t no_work : 1; /**< Set when no new work queue entry was returned.
521 If there was de-scheduled work, the HW will definitely
522 return it. When this bit is set, it could mean
524 - There was no work, or
525 - There was no work that the HW could find. This
526 case can happen, regardless of the wait bit value
527 in the original request, when there is work
528 in the IQ's that is too deep down the list. */
529 uint64_t reserved_40_62 : 23; /**< Must be zero */
530 uint64_t addr : 40; /**< 36 in O1 -- the work queue pointer */
533 uint64_t reserved_40_62 : 23;
534 uint64_t no_work : 1;
539 * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
543 #ifdef __BIG_ENDIAN_BITFIELD
544 uint64_t reserved_62_63 : 2;
545 uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or
546 SWTAG_FULL, and the POW entry has not left the list for the original tag. */
547 uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */
548 uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */
549 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
550 uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */
551 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
552 uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */
553 uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */
554 uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */
555 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
556 uint64_t reserved_51 : 1;
557 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
558 uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */
559 uint64_t reserved_34_35 : 2;
560 uint64_t pend_type : 2; /**< This is the tag type when pend_switch or (pend_desched AND pend_desched_switch) are set. */
561 uint64_t pend_tag : 32; /**< - this is the tag when pend_switch or (pend_desched AND pend_desched_switch) are set. */
563 uint64_t pend_tag : 32;
564 uint64_t pend_type : 2;
565 uint64_t reserved_34_35 : 2;
566 uint64_t pend_grp : 4;
567 uint64_t pend_index : 11;
568 uint64_t reserved_51 : 1;
569 uint64_t pend_nosched_clr: 1;
570 uint64_t pend_null_rd : 1;
571 uint64_t pend_new_work_wait: 1;
572 uint64_t pend_new_work : 1;
573 uint64_t pend_nosched : 1;
574 uint64_t pend_desched_switch: 1;
575 uint64_t pend_desched : 1;
576 uint64_t pend_switch_null: 1;
577 uint64_t pend_switch_full: 1;
578 uint64_t pend_switch : 1;
579 uint64_t reserved_62_63 : 2;
584 * Result for a SSO Status Load (when opcode is SL_PENDTAG)
588 #ifdef __BIG_ENDIAN_BITFIELD
589 uint64_t pend_switch : 1; /**< Set when there is a pending non-UNSCHEDULED SWTAG or
590 SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
591 uint64_t pend_get_work : 1; /**< Set when there is a pending GET_WORK */
592 uint64_t pend_get_work_wait: 1; /**< when pend_get_work is set, this biit indicates that the
594 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
595 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
596 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
597 uint64_t pend_alloc_we : 1; /**< Set when there is a pending ALLOC_WE. */
598 uint64_t reserved_48_56 : 9;
599 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
600 uint64_t reserved_34_36 : 3;
601 uint64_t pend_type : 2; /**< This is the tag type when pend_switch is set. */
602 uint64_t pend_tag : 32; /**< This is the tag when pend_switch is set. */
604 uint64_t pend_tag : 32;
605 uint64_t pend_type : 2;
606 uint64_t reserved_34_36 : 3;
607 uint64_t pend_index : 11;
608 uint64_t reserved_48_56 : 9;
609 uint64_t pend_alloc_we : 1;
610 uint64_t pend_desched : 1;
611 uint64_t pend_nosched_clr: 1;
612 uint64_t pend_nosched : 1;
613 uint64_t pend_get_work_wait: 1;
614 uint64_t pend_get_work : 1;
615 uint64_t pend_switch : 1;
620 * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
624 #ifdef __BIG_ENDIAN_BITFIELD
625 uint64_t reserved_62_63 : 2;
626 uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or
627 SWTAG_FULL, and the POW entry has not left the list for the original tag. */
628 uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */
629 uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */
630 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
631 uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */
632 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
633 uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */
634 uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */
635 uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */
636 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
637 uint64_t reserved_51 : 1;
638 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
639 uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */
640 uint64_t pend_wqp : 36; /**< This is the wqp when pend_nosched_clr is set. */
642 uint64_t pend_wqp : 36;
643 uint64_t pend_grp : 4;
644 uint64_t pend_index : 11;
645 uint64_t reserved_51 : 1;
646 uint64_t pend_nosched_clr: 1;
647 uint64_t pend_null_rd : 1;
648 uint64_t pend_new_work_wait: 1;
649 uint64_t pend_new_work : 1;
650 uint64_t pend_nosched : 1;
651 uint64_t pend_desched_switch: 1;
652 uint64_t pend_desched : 1;
653 uint64_t pend_switch_null: 1;
654 uint64_t pend_switch_full: 1;
655 uint64_t pend_switch : 1;
656 uint64_t reserved_62_63 : 2;
661 * Result for a SSO Status Load (when opcode is SL_PENDWQP)
665 #ifdef __BIG_ENDIAN_BITFIELD
666 uint64_t pend_switch : 1; /**< Set when there is a pending non-UNSCHEDULED SWTAG or
667 SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
668 uint64_t pend_get_work : 1; /**< Set when there is a pending GET_WORK */
669 uint64_t pend_get_work_wait: 1; /**< when pend_get_work is set, this biit indicates that the
671 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
672 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
673 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
674 uint64_t pend_alloc_we : 1; /**< Set when there is a pending ALLOC_WE. */
675 uint64_t reserved_51_56 : 6;
676 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
677 uint64_t reserved_38_39 : 2;
678 uint64_t pend_wqp : 38; /**< This is the wqp when pend_nosched_clr is set. */
680 uint64_t pend_wqp : 38;
681 uint64_t reserved_38_39 : 2;
682 uint64_t pend_index : 11;
683 uint64_t reserved_51_56 : 6;
684 uint64_t pend_alloc_we : 1;
685 uint64_t pend_desched : 1;
686 uint64_t pend_nosched_clr: 1;
687 uint64_t pend_nosched : 1;
688 uint64_t pend_get_work_wait: 1;
689 uint64_t pend_get_work : 1;
690 uint64_t pend_switch : 1;
695 * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==0)
699 #ifdef __BIG_ENDIAN_BITFIELD
700 uint64_t reserved_62_63 : 2;
701 uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and
702 tag_type is not NULL or NULL_NULL). */
703 uint64_t index : 11; /**< The POW entry attached to the core. */
704 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
705 uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in
706 the NULL or NULL_NULL state). */
707 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
708 NULL or NULL_NULL state). */
709 uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list
710 entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
711 uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on
712 SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
715 uint64_t tag_type : 2;
720 uint64_t link_index : 11;
721 uint64_t reserved_62_63 : 2;
726 * Result for a SSO Status Load (when opcode is SL_TAG)
730 #ifdef __BIG_ENDIAN_BITFIELD
731 uint64_t reserved_57_63 : 7;
732 uint64_t index : 11; /**< The SSO entry attached to the core. */
733 uint64_t reserved_45 : 1;
734 uint64_t grp : 6; /**< The group attached to the core (updated when new tag list entered on
736 uint64_t head : 1; /**< Set when this SSO entry is at the head of its tag list (also set when in the
737 UNSCHEDULED or EMPTY state). */
738 uint64_t tail : 1; /**< Set when this SSO entry is at the tail of its tag list (also set when in the
739 UNSCHEDULED or EMPTY state). */
740 uint64_t reserved_34_36 : 3;
741 uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list entered
742 on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
743 uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on SWTAG,
744 SWTAG_FULL, or SWTAG_DESCHED). */
747 uint64_t tag_type : 2;
748 uint64_t reserved_34_36 : 3;
752 uint64_t reserved_45 : 1;
754 uint64_t reserved_57_63 : 7;
759 * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
763 #ifdef __BIG_ENDIAN_BITFIELD
764 uint64_t reserved_62_63 : 2;
765 uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0
766 (and tag_type is not NULL or NULL_NULL). This field is unpredictable
767 when the core's state is NULL or NULL_NULL. */
768 uint64_t index : 11; /**< The POW entry attached to the core. */
769 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
770 uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in
771 the NULL or NULL_NULL state). */
772 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
773 NULL or NULL_NULL state). */
774 uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list
775 entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
776 uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on
777 SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
780 uint64_t tag_type : 2;
785 uint64_t revlink_index : 11;
786 uint64_t reserved_62_63 : 2;
791 * Result for a SSO Status Load (when opcode is SL_WQP)
795 #ifdef __BIG_ENDIAN_BITFIELD
796 uint64_t reserved_58_63 : 6;
797 uint64_t index : 11; /**< The SSO entry attached to the core. */
798 uint64_t reserved_46 : 1;
799 uint64_t grp : 6; /**< The group attached to the core (updated when new tag list entered on
801 uint64_t reserved_38_39 : 2;
802 uint64_t wqp : 38; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
805 uint64_t reserved_38_39 : 2;
807 uint64_t reserved_46 : 1;
809 uint64_t reserved_58_63 : 6;
814 * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==0)
818 #ifdef __BIG_ENDIAN_BITFIELD
819 uint64_t reserved_62_63 : 2;
820 uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and
821 tag_type is not NULL or NULL_NULL). */
822 uint64_t index : 11; /**< The POW entry attached to the core. */
823 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
824 uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
829 uint64_t link_index : 11;
830 uint64_t reserved_62_63 : 2;
835 * Result for a SSO Status Load (when opcode is SL_LINKS)
839 #ifdef __BIG_ENDIAN_BITFIELD
840 uint64_t reserved_46_63 : 18;
841 uint64_t index : 11; /**< The SSO entry attached to the core. */
842 uint64_t reserved_34 : 1;
843 uint64_t grp : 6; /**< The group attached to the core (updated when new tag list entered on
845 uint64_t head : 1; /**< Set when this SSO entry is at the head of its tag list (also set when in the
846 UNSCHEDULED or EMPTY state). */
847 uint64_t tail : 1; /**< Set when this SSO entry is at the tail of its tag list (also set when in the
848 UNSCHEDULED or EMPTY state). */
849 uint64_t reserved_24_25 : 2;
850 uint64_t revlink_index : 11; /**< Points to the prior SSO entry in the tag list when head==0 (and tag_type is not UNSCHEDULED or EMPTY). */
851 uint64_t reserved_11_12 : 2;
852 uint64_t link_index : 11; /**< Points to the next SSO entry in the tag list when tail==0 (and tag_type is not UNSCHEDULDED or EMPTY). */
854 uint64_t link_index : 11;
855 uint64_t reserved_11_12 : 2;
856 uint64_t revlink_index : 11;
857 uint64_t reserved_24_25 : 2;
861 uint64_t reserved_34 : 1;
863 uint64_t reserved_46_63 : 18;
868 * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==1)
872 #ifdef __BIG_ENDIAN_BITFIELD
873 uint64_t reserved_62_63 : 2;
874 uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0
875 (and tag_type is not NULL or NULL_NULL). This field is unpredictable
876 when the core's state is NULL or NULL_NULL. */
877 uint64_t index : 11; /**< The POW entry attached to the core. */
878 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
879 uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
884 uint64_t revlink_index : 11;
885 uint64_t reserved_62_63 : 2;
890 * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
894 #ifdef __BIG_ENDIAN_BITFIELD
895 uint64_t reserved_51_63 : 13;
896 uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list
897 (unpredictable if entry is the tail of the list). */
898 uint64_t grp : 4; /**< The group of the POW entry. */
899 uint64_t reserved_35 : 1;
900 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
901 NULL or NULL_NULL state). */
902 uint64_t tag_type : 2; /**< The tag type of the POW entry. */
903 uint64_t tag : 32; /**< The tag of the POW entry. */
906 uint64_t tag_type : 2;
908 uint64_t reserved_35 : 1;
910 uint64_t next_index : 11;
911 uint64_t reserved_51_63 : 13;
916 * Result For SSO Memory Load (opcode is ML_TAG)
920 #ifdef __BIG_ENDIAN_BITFIELD
921 uint64_t reserved_38_63 : 26;
922 uint64_t tail : 1; /**< Set when this SSO entry is at the tail of its tag list (also set when in the
923 NULL or NULL_NULL state). */
924 uint64_t reserved_34_36 : 3;
925 uint64_t tag_type : 2; /**< The tag type of the SSO entry. */
926 uint64_t tag : 32; /**< The tag of the SSO entry. */
929 uint64_t tag_type : 2;
930 uint64_t reserved_34_36 : 3;
932 uint64_t reserved_38_63 : 26;
934 } s_smemload0_cn68xx;
937 * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
941 #ifdef __BIG_ENDIAN_BITFIELD
942 uint64_t reserved_51_63 : 13;
943 uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list
944 (unpredictable if entry is the tail of the list). */
945 uint64_t grp : 4; /**< The group of the POW entry. */
946 uint64_t wqp : 36; /**< The WQP held in the POW entry. */
950 uint64_t next_index : 11;
951 uint64_t reserved_51_63 : 13;
956 * Result For SSO Memory Load (opcode is ML_WQPGRP)
960 #ifdef __BIG_ENDIAN_BITFIELD
961 uint64_t reserved_48_63 : 16;
962 uint64_t nosched : 1; /**< The nosched bit for the SSO entry. */
963 uint64_t reserved_46 : 1;
964 uint64_t grp : 6; /**< The group of the SSO entry. */
965 uint64_t reserved_38_39 : 2;
966 uint64_t wqp : 38; /**< The WQP held in the SSO entry. */
969 uint64_t reserved_38_39 : 2;
971 uint64_t reserved_46 : 1;
972 uint64_t nosched : 1;
973 uint64_t reserved_51_63 : 16;
975 } s_smemload1_cn68xx;
978 * Result For POW Memory Load (get_des == 1)
982 #ifdef __BIG_ENDIAN_BITFIELD
983 uint64_t reserved_51_63 : 13;
984 uint64_t fwd_index : 11; /**< The next entry in the tag list connected to the descheduled head. */
985 uint64_t grp : 4; /**< The group of the POW entry. */
986 uint64_t nosched : 1; /**< The nosched bit for the POW entry. */
987 uint64_t pend_switch : 1; /**< There is a pending tag switch */
988 uint64_t pend_type : 2; /**< The next tag type for the new tag list when pend_switch is set. */
989 uint64_t pend_tag : 32; /**< The next tag for the new tag list when pend_switch is set. */
991 uint64_t pend_tag : 32;
992 uint64_t pend_type : 2;
993 uint64_t pend_switch : 1;
994 uint64_t nosched : 1;
996 uint64_t fwd_index : 11;
997 uint64_t reserved_51_63 : 13;
1002 * Result For SSO Memory Load (opcode is ML_PENTAG)
1006 #ifdef __BIG_ENDIAN_BITFIELD
1007 uint64_t reserved_38_63 : 26;
1008 uint64_t pend_switch : 1; /**< Set when there is a pending non-UNSCHEDULED SWTAG or
1009 SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
1010 uint64_t reserved_34_36 : 3;
1011 uint64_t pend_type : 2; /**< The next tag type for the new tag list when pend_switch is set. */
1012 uint64_t pend_tag : 32; /**< The next tag for the new tag list when pend_switch is set. */
1014 uint64_t pend_tag : 32;
1015 uint64_t pend_type : 2;
1016 uint64_t reserved_34_36 : 3;
1017 uint64_t pend_switch : 1;
1018 uint64_t reserved_38_63 : 26;
1020 } s_smemload2_cn68xx;
1023 * Result For SSO Memory Load (opcode is ML_LINKS)
1027 #ifdef __BIG_ENDIAN_BITFIELD
1028 uint64_t reserved_24_63 : 40;
1029 uint64_t fwd_index : 11; /**< The next entry in the tag list connected to the descheduled head. */
1030 uint64_t reserved_11_12 : 2;
1031 uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list
1032 (unpredicatble if entry is the tail of the list). */
1034 uint64_t next_index : 11;
1035 uint64_t reserved_11_12 : 2;
1036 uint64_t fwd_index : 11;
1037 uint64_t reserved_24_63 : 40;
1039 } s_smemload3_cn68xx;
1042 * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
1046 #ifdef __BIG_ENDIAN_BITFIELD
1047 uint64_t reserved_52_63 : 12;
1048 uint64_t free_val : 1; /**< - set when there is one or more POW entries on the free list. */
1049 uint64_t free_one : 1; /**< - set when there is exactly one POW entry on the free list. */
1050 uint64_t reserved_49 : 1;
1051 uint64_t free_head : 11; /**< - when free_val is set, indicates the first entry on the free list. */
1052 uint64_t reserved_37 : 1;
1053 uint64_t free_tail : 11; /**< - when free_val is set, indicates the last entry on the free list. */
1054 uint64_t loc_val : 1; /**< - set when there is one or more POW entries on the input Q list selected by qosgrp. */
1055 uint64_t loc_one : 1; /**< - set when there is exactly one POW entry on the input Q list selected by qosgrp. */
1056 uint64_t reserved_23 : 1;
1057 uint64_t loc_head : 11; /**< - when loc_val is set, indicates the first entry on the input Q list selected by qosgrp. */
1058 uint64_t reserved_11 : 1;
1059 uint64_t loc_tail : 11; /**< - when loc_val is set, indicates the last entry on the input Q list selected by qosgrp. */
1061 uint64_t loc_tail : 11;
1062 uint64_t reserved_11 : 1;
1063 uint64_t loc_head : 11;
1064 uint64_t reserved_23 : 1;
1065 uint64_t loc_one : 1;
1066 uint64_t loc_val : 1;
1067 uint64_t free_tail : 11;
1068 uint64_t reserved_37 : 1;
1069 uint64_t free_head : 11;
1070 uint64_t reserved_49 : 1;
1071 uint64_t free_one : 1;
1072 uint64_t free_val : 1;
1073 uint64_t reserved_52_63 : 12;
1078 * Result for SSO Index/Pointer Load(opcode == IPL_IQ/IPL_DESCHED/IPL_NOSCHED)
1082 #ifdef __BIG_ENDIAN_BITFIELD
1083 uint64_t reserved_28_63 : 36;
1084 uint64_t queue_val : 1; /**< - If set, one or more valid entries are in the queue. */
1085 uint64_t queue_one : 1; /**< - If set, exactly one valid entry is in the queue. */
1086 uint64_t reserved_24_25 : 2;
1087 uint64_t queue_head : 11; /**< - Index of entry at the head of the queue. */
1088 uint64_t reserved_11_12 : 2;
1089 uint64_t queue_tail : 11; /**< - Index of entry at the tail of the queue. */
1091 uint64_t queue_tail : 11;
1092 uint64_t reserved_11_12 : 2;
1093 uint64_t queue_head : 11;
1094 uint64_t reserved_24_25 : 2;
1095 uint64_t queue_one : 1;
1096 uint64_t queue_val : 1;
1097 uint64_t reserved_28_63 : 36;
1099 } sindexload0_cn68xx;
1102 * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
1106 #ifdef __BIG_ENDIAN_BITFIELD
1107 uint64_t reserved_52_63 : 12;
1108 uint64_t nosched_val : 1; /**< - set when there is one or more POW entries on the nosched list. */
1109 uint64_t nosched_one : 1; /**< - set when there is exactly one POW entry on the nosched list. */
1110 uint64_t reserved_49 : 1;
1111 uint64_t nosched_head : 11; /**< - when nosched_val is set, indicates the first entry on the nosched list. */
1112 uint64_t reserved_37 : 1;
1113 uint64_t nosched_tail : 11; /**< - when nosched_val is set, indicates the last entry on the nosched list. */
1114 uint64_t des_val : 1; /**< - set when there is one or more descheduled heads on the descheduled list selected by qosgrp. */
1115 uint64_t des_one : 1; /**< - set when there is exactly one descheduled head on the descheduled list selected by qosgrp. */
1116 uint64_t reserved_23 : 1;
1117 uint64_t des_head : 11; /**< - when des_val is set, indicates the first descheduled head on the descheduled list selected by qosgrp. */
1118 uint64_t reserved_11 : 1;
1119 uint64_t des_tail : 11; /**< - when des_val is set, indicates the last descheduled head on the descheduled list selected by qosgrp. */
1121 uint64_t des_tail : 11;
1122 uint64_t reserved_11 : 1;
1123 uint64_t des_head : 11;
1124 uint64_t reserved_23 : 1;
1125 uint64_t des_one : 1;
1126 uint64_t des_val : 1;
1127 uint64_t nosched_tail : 11;
1128 uint64_t reserved_37 : 1;
1129 uint64_t nosched_head : 11;
1130 uint64_t reserved_49 : 1;
1131 uint64_t nosched_one : 1;
1132 uint64_t nosched_val : 1;
1133 uint64_t reserved_52_63 : 12;
1138 * Result for SSO Index/Pointer Load(opcode == IPL_FREE0/IPL_FREE1/IPL_FREE2)
1142 #ifdef __BIG_ENDIAN_BITFIELD
1143 uint64_t reserved_60_63 : 4;
1144 uint64_t qnum_head : 2; /**< - Subqueue with current head */
1145 uint64_t qnum_tail : 2; /**< - Subqueue with current tail */
1146 uint64_t reserved_28_55 : 28;
1147 uint64_t queue_val : 1; /**< - If set, one or more valid entries are in the queue. */
1148 uint64_t queue_one : 1; /**< - If set, exactly one valid entry is in the queue. */
1149 uint64_t reserved_24_25 : 2;
1150 uint64_t queue_head : 11; /**< - Index of entry at the head of the queue. */
1151 uint64_t reserved_11_12 : 2;
1152 uint64_t queue_tail : 11; /**< - Index of entry at the tail of the queue. */
1154 uint64_t queue_tail : 11;
1155 uint64_t reserved_11_12 : 2;
1156 uint64_t queue_head : 11;
1157 uint64_t reserved_24_25 : 2;
1158 uint64_t queue_one : 1;
1159 uint64_t queue_val : 1;
1160 uint64_t reserved_28_55 : 28;
1161 uint64_t qnum_tail : 2;
1162 uint64_t qnum_head : 2;
1163 uint64_t reserved_60_63 : 4;
1165 } sindexload1_cn68xx;
1168 * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
1172 #ifdef __BIG_ENDIAN_BITFIELD
1173 uint64_t reserved_39_63 : 25;
1174 uint64_t rmt_is_head : 1; /**< Set when this DRAM list is the current head (i.e. is the next to
1175 be reloaded when the POW hardware reloads a POW entry from DRAM). The
1176 POW hardware alternates between the two DRAM lists associated with a QOS
1177 level when it reloads work from DRAM into the POW unit. */
1178 uint64_t rmt_val : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp
1179 contains one or more pieces of work. */
1180 uint64_t rmt_one : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp
1181 contains exactly one piece of work. */
1182 uint64_t rmt_head : 36; /**< When rmt_val is set, indicates the first piece of work on the
1183 DRAM input Q list selected by qosgrp. */
1185 uint64_t rmt_head : 36;
1186 uint64_t rmt_one : 1;
1187 uint64_t rmt_val : 1;
1188 uint64_t rmt_is_head : 1;
1189 uint64_t reserved_39_63 : 25;
1194 * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 1)
1198 #ifdef __BIG_ENDIAN_BITFIELD
1199 uint64_t reserved_39_63 : 25;
1200 uint64_t rmt_is_head : 1; /**< - set when this DRAM list is the current head (i.e. is the next to
1201 be reloaded when the POW hardware reloads a POW entry from DRAM). The
1202 POW hardware alternates between the two DRAM lists associated with a QOS
1203 level when it reloads work from DRAM into the POW unit. */
1204 uint64_t rmt_val : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp
1205 contains one or more pieces of work. */
1206 uint64_t rmt_one : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp
1207 contains exactly one piece of work. */
1208 uint64_t rmt_tail : 36; /**< - when rmt_val is set, indicates the last piece of work on the DRAM
1209 input Q list selected by qosgrp. */
1211 uint64_t rmt_tail : 36;
1212 uint64_t rmt_one : 1;
1213 uint64_t rmt_val : 1;
1214 uint64_t rmt_is_head : 1;
1215 uint64_t reserved_39_63 : 25;
1220 * Response to NULL_RD request loads
1224 #ifdef __BIG_ENDIAN_BITFIELD
1225 uint64_t unused : 62;
1226 uint64_t state : 2; /**< of type cvmx_pow_tag_type_t. state is one of the following:
1227 - CVMX_POW_TAG_TYPE_ORDERED
1228 - CVMX_POW_TAG_TYPE_ATOMIC
1229 - CVMX_POW_TAG_TYPE_NULL
1230 - CVMX_POW_TAG_TYPE_NULL_NULL */
1233 uint64_t unused : 62;
1237 } cvmx_pow_tag_load_resp_t;
1242 #ifdef __BIG_ENDIAN_BITFIELD
1243 uint64_t reserved_57_63 : 7;
1244 uint64_t index : 11;
1245 uint64_t reserved_45 : 1;
1249 uint64_t reserved_34_36 : 3;
1250 uint64_t tag_type : 2;
1254 uint64_t tag_type : 2;
1255 uint64_t reserved_34_36 : 3;
1259 uint64_t reserved_45 : 1;
1260 uint64_t index : 11;
1261 uint64_t reserved_57_63 : 7;
1264 } cvmx_pow_sl_tag_resp_t;
1267 * This structure describes the address used for stores to the POW.
1268 * The store address is meaningful on stores to the POW. The hardware assumes that an aligned
1269 * 64-bit store was used for all these stores.
1270 * Note the assumption that the work queue entry is aligned on an 8-byte
1271 * boundary (since the low-order 3 address bits must be zero).
1272 * Note that not all fields are used by all operations.
1274 * NOTE: The following is the behavior of the pending switch bit at the PP
1275 * for POW stores (i.e. when did<7:3> == 0xc)
1276 * - did<2:0> == 0 => pending switch bit is set
1277 * - did<2:0> == 1 => no affect on the pending switch bit
1278 * - did<2:0> == 3 => pending switch bit is cleared
1279 * - did<2:0> == 7 => no affect on the pending switch bit
1280 * - did<2:0> == others => must not be used
1281 * - No other loads/stores have an affect on the pending switch bit
1282 * - The switch bus from POW can clear the pending switch bit
1284 * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle ADDWQ command
1285 * that only contains the pointer). SW must never use did<2:0> == 2.
1290 * Unsigned 64 bit integer representation of store address
1296 #ifdef __BIG_ENDIAN_BITFIELD
1297 uint64_t mem_reg : 2; /**< Memory region. Should be CVMX_IO_SEG in most cases */
1298 uint64_t reserved_49_61 : 13; /**< Must be zero */
1299 uint64_t is_io : 1; /**< Must be one */
1300 uint64_t did : 8; /**< Device ID of POW. Note that different sub-dids are used. */
1301 uint64_t reserved_36_39 : 4; /**< Must be zero */
1302 uint64_t addr : 36; /**< Address field. addr<2:0> must be zero */
1305 uint64_t reserved_36_39 : 4;
1308 uint64_t reserved_49_61 : 13;
1309 uint64_t mem_reg : 2;
1312 } cvmx_pow_tag_store_addr_t;
1315 * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
1323 #ifdef __BIG_ENDIAN_BITFIELD
1324 uint64_t scraddr : 8; /**< the (64-bit word) location in scratchpad to write to (if len != 0) */
1325 uint64_t len : 8; /**< the number of words in the response (0 => no response) */
1326 uint64_t did : 8; /**< the ID of the device on the non-coherent bus */
1327 uint64_t unused :36;
1328 uint64_t wait : 1; /**< if set, don't return load response until work is available */
1329 uint64_t unused2 : 3;
1331 uint64_t unused2 : 3;
1333 uint64_t unused :36;
1336 uint64_t scraddr : 8;
1340 } cvmx_pow_iobdma_store_t;
1343 /* CSR typedefs have been moved to cvmx-pow-defs.h */
1346 * Get the POW tag for this core. This returns the current
1347 * tag type, tag, group, and POW entry index associated with
1348 * this core. Index is only valid if the tag type isn't NULL_NULL.
1349 * If a tag switch is pending this routine returns the tag before
1350 * the tag switch, not after.
1352 * @return Current tag
1354 static inline cvmx_pow_tag_info_t cvmx_pow_get_current_tag(void)
1356 cvmx_pow_load_addr_t load_addr;
1357 cvmx_pow_tag_info_t result;
1359 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1360 cvmx_pow_sl_tag_resp_t load_resp;
1362 load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
1363 load_addr.sstatus_cn68xx.is_io = 1;
1364 load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
1365 load_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();
1366 load_addr.sstatus_cn68xx.opcode = 3;
1367 load_resp.u64 = cvmx_read_csr(load_addr.u64);
1368 result.grp = load_resp.s.grp;
1369 result.index = load_resp.s.index;
1370 result.tag_type = load_resp.s.tag_type;
1371 result.tag = load_resp.s.tag;
1373 cvmx_pow_tag_load_resp_t load_resp;
1375 load_addr.sstatus.mem_region = CVMX_IO_SEG;
1376 load_addr.sstatus.is_io = 1;
1377 load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1378 load_addr.sstatus.coreid = cvmx_get_core_num();
1379 load_addr.sstatus.get_cur = 1;
1380 load_resp.u64 = cvmx_read_csr(load_addr.u64);
1381 result.grp = load_resp.s_sstatus2.grp;
1382 result.index = load_resp.s_sstatus2.index;
1383 result.tag_type = load_resp.s_sstatus2.tag_type;
1384 result.tag = load_resp.s_sstatus2.tag;
1390 * Get the POW WQE for this core. This returns the work queue
1391 * entry currently associated with this core.
1393 * @return WQE pointer
1395 static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
1397 cvmx_pow_load_addr_t load_addr;
1398 cvmx_pow_tag_load_resp_t load_resp;
1400 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1402 load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
1403 load_addr.sstatus_cn68xx.is_io = 1;
1404 load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
1405 load_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();
1406 load_addr.sstatus_cn68xx.opcode = 3;
1407 load_resp.u64 = cvmx_read_csr(load_addr.u64);
1408 if (load_resp.s_sstatus3_cn68xx.wqp)
1409 return (cvmx_wqe_t*)cvmx_phys_to_ptr(load_resp.s_sstatus3_cn68xx.wqp);
1411 return (cvmx_wqe_t*)0;
1414 load_addr.sstatus.mem_region = CVMX_IO_SEG;
1415 load_addr.sstatus.is_io = 1;
1416 load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1417 load_addr.sstatus.coreid = cvmx_get_core_num();
1418 load_addr.sstatus.get_cur = 1;
1419 load_addr.sstatus.get_wqp = 1;
1420 load_resp.u64 = cvmx_read_csr(load_addr.u64);
1421 return (cvmx_wqe_t*)cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
1428 * Print a warning if a tag switch is pending for this core
1430 * @param function Function name checking for a pending tag switch
1432 static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
1434 uint64_t switch_complete;
1435 CVMX_MF_CHORD(switch_complete);
1436 cvmx_warn_if(!switch_complete, "%s called with tag switch in progress\n", function);
1441 * Waits for a tag switch to complete by polling the completion bit.
1442 * Note that switches to NULL complete immediately and do not need
1445 static inline void cvmx_pow_tag_sw_wait(void)
1447 const uint64_t MAX_CYCLES = 1ull<<31;
1448 uint64_t switch_complete;
1449 uint64_t start_cycle = cvmx_get_cycle();
1452 CVMX_MF_CHORD(switch_complete);
1453 if (cvmx_unlikely(switch_complete))
1455 if (cvmx_unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES))
1457 cvmx_dprintf("WARNING: Tag switch is taking a long time, possible deadlock\n");
1458 start_cycle = -MAX_CYCLES-1;
1465 * Synchronous work request. Requests work from the POW.
1466 * This function does NOT wait for previous tag switches to complete,
1467 * so the caller must ensure that there is not a pending tag switch.
1469 * @param wait When set, call stalls until work becomes avaiable, or times out.
1470 * If not set, returns immediately.
1472 * @return Returns the WQE pointer from POW. Returns NULL if no work was available.
1474 static inline cvmx_wqe_t * cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t wait)
1476 cvmx_pow_load_addr_t ptr;
1477 cvmx_pow_tag_load_resp_t result;
1479 if (CVMX_ENABLE_POW_CHECKS)
1480 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1483 ptr.swork.mem_region = CVMX_IO_SEG;
1484 ptr.swork.is_io = 1;
1485 ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
1486 ptr.swork.wait = wait;
1488 result.u64 = cvmx_read_csr(ptr.u64);
1490 if (result.s_work.no_work)
1493 return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr);
1498 * Synchronous work request. Requests work from the POW.
1499 * This function waits for any previous tag switch to complete before
1500 * requesting the new work.
1502 * @param wait When set, call stalls until work becomes avaiable, or times out.
1503 * If not set, returns immediately.
1505 * @return Returns the WQE pointer from POW. Returns NULL if no work was available.
1507 static inline cvmx_wqe_t * cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
1509 if (CVMX_ENABLE_POW_CHECKS)
1510 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1512 /* Must not have a switch pending when requesting work */
1513 cvmx_pow_tag_sw_wait();
1514 return(cvmx_pow_work_request_sync_nocheck(wait));
1520 * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
1521 * This function waits for any previous tag switch to complete before
1522 * requesting the null_rd.
1524 * @return Returns the POW state of type cvmx_pow_tag_type_t.
1526 static inline cvmx_pow_tag_type_t cvmx_pow_work_request_null_rd(void)
1528 cvmx_pow_load_addr_t ptr;
1529 cvmx_pow_tag_load_resp_t result;
1531 if (CVMX_ENABLE_POW_CHECKS)
1532 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1534 /* Must not have a switch pending when requesting work */
1535 cvmx_pow_tag_sw_wait();
1538 ptr.snull_rd.mem_region = CVMX_IO_SEG;
1539 ptr.snull_rd.is_io = 1;
1540 ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
1542 result.u64 = cvmx_read_csr(ptr.u64);
1544 return (cvmx_pow_tag_type_t)result.s_null_rd.state;
1549 * Asynchronous work request. Work is requested from the POW unit, and should later
1550 * be checked with function cvmx_pow_work_response_async.
1551 * This function does NOT wait for previous tag switches to complete,
1552 * so the caller must ensure that there is not a pending tag switch.
1554 * @param scr_addr Scratch memory address that response will be returned to,
1555 * which is either a valid WQE, or a response with the invalid bit set.
1556 * Byte address, must be 8 byte aligned.
1557 * @param wait 1 to cause response to wait for work to become available (or timeout)
1558 * 0 to cause response to return immediately
1560 static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, cvmx_pow_wait_t wait)
1562 cvmx_pow_iobdma_store_t data;
1564 if (CVMX_ENABLE_POW_CHECKS)
1565 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1567 /* scr_addr must be 8 byte aligned */
1569 data.s.scraddr = scr_addr >> 3;
1571 data.s.did = CVMX_OCT_DID_TAG_SWTAG;
1573 cvmx_send_single(data.u64);
1576 * Asynchronous work request. Work is requested from the POW unit, and should later
1577 * be checked with function cvmx_pow_work_response_async.
1578 * This function waits for any previous tag switch to complete before
1579 * requesting the new work.
1581 * @param scr_addr Scratch memory address that response will be returned to,
1582 * which is either a valid WQE, or a response with the invalid bit set.
1583 * Byte address, must be 8 byte aligned.
1584 * @param wait 1 to cause response to wait for work to become available (or timeout)
1585 * 0 to cause response to return immediately
1587 static inline void cvmx_pow_work_request_async(int scr_addr, cvmx_pow_wait_t wait)
1589 if (CVMX_ENABLE_POW_CHECKS)
1590 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1592 /* Must not have a switch pending when requesting work */
1593 cvmx_pow_tag_sw_wait();
1594 cvmx_pow_work_request_async_nocheck(scr_addr, wait);
1599 * Gets result of asynchronous work request. Performs a IOBDMA sync
1600 * to wait for the response.
1602 * @param scr_addr Scratch memory address to get result from
1603 * Byte address, must be 8 byte aligned.
1604 * @return Returns the WQE from the scratch register, or NULL if no work was available.
1606 static inline cvmx_wqe_t * cvmx_pow_work_response_async(int scr_addr)
1608 cvmx_pow_tag_load_resp_t result;
1611 result.u64 = cvmx_scratch_read64(scr_addr);
1613 if (result.s_work.no_work)
1616 return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr);
1621 * Checks if a work queue entry pointer returned by a work
1622 * request is valid. It may be invalid due to no work
1623 * being available or due to a timeout.
1625 * @param wqe_ptr pointer to a work queue entry returned by the POW
1627 * @return 0 if pointer is valid
1628 * 1 if invalid (no work was returned)
1630 static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
1632 return (wqe_ptr == NULL);
1638 * Starts a tag switch to the provided tag value and tag type. Completion for
1639 * the tag switch must be checked for separately.
1640 * This function does NOT update the
1641 * work queue entry in dram to match tag value and type, so the application must
1642 * keep track of these if they are important to the application.
1643 * This tag switch command must not be used for switches to NULL, as the tag
1644 * switch pending bit will be set by the switch request, but never cleared by the
1647 * NOTE: This should not be used when switching from a NULL tag. Use
1648 * cvmx_pow_tag_sw_full() instead.
1650 * This function does no checks, so the caller must ensure that any previous tag
1651 * switch has completed.
1653 * @param tag new tag value
1654 * @param tag_type new tag type (ordered or atomic)
1656 static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type)
1659 cvmx_pow_tag_req_t tag_req;
1661 if (CVMX_ENABLE_POW_CHECKS)
1663 cvmx_pow_tag_info_t current_tag;
1664 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1665 current_tag = cvmx_pow_get_current_tag();
1666 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1667 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag\n", __FUNCTION__);
1668 cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__);
1669 cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__);
1672 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1673 ** once the WQE is in flight. See hardware manual for complete details.
1674 ** It is the application's responsibility to keep track of the current tag
1675 ** value if that is important.
1679 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1680 tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;
1681 tag_req.s_cn68xx_other.tag = tag;
1682 tag_req.s_cn68xx_other.type = tag_type;
1684 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;
1685 tag_req.s_cn38xx.tag = tag;
1686 tag_req.s_cn38xx.type = tag_type;
1690 ptr.sio.mem_region = CVMX_IO_SEG;
1692 ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1694 /* once this store arrives at POW, it will attempt the switch
1695 software must wait for the switch to complete separately */
1696 cvmx_write_io(ptr.u64, tag_req.u64);
1701 * Starts a tag switch to the provided tag value and tag type. Completion for
1702 * the tag switch must be checked for separately.
1703 * This function does NOT update the
1704 * work queue entry in dram to match tag value and type, so the application must
1705 * keep track of these if they are important to the application.
1706 * This tag switch command must not be used for switches to NULL, as the tag
1707 * switch pending bit will be set by the switch request, but never cleared by the
1710 * NOTE: This should not be used when switching from a NULL tag. Use
1711 * cvmx_pow_tag_sw_full() instead.
1713 * This function waits for any previous tag switch to complete, and also
1714 * displays an error on tag switches to NULL.
1716 * @param tag new tag value
1717 * @param tag_type new tag type (ordered or atomic)
1719 static inline void cvmx_pow_tag_sw(uint32_t tag, cvmx_pow_tag_type_t tag_type)
1721 if (CVMX_ENABLE_POW_CHECKS)
1722 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1724 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1725 ** once the WQE is in flight. See hardware manual for complete details.
1726 ** It is the application's responsibility to keep track of the current tag
1727 ** value if that is important.
1730 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1731 ** if a previous switch is still pending. */
1732 cvmx_pow_tag_sw_wait();
1733 cvmx_pow_tag_sw_nocheck(tag, tag_type);
1738 * Starts a tag switch to the provided tag value and tag type. Completion for
1739 * the tag switch must be checked for separately.
1740 * This function does NOT update the
1741 * work queue entry in dram to match tag value and type, so the application must
1742 * keep track of these if they are important to the application.
1743 * This tag switch command must not be used for switches to NULL, as the tag
1744 * switch pending bit will be set by the switch request, but never cleared by the
1747 * This function must be used for tag switches from NULL.
1749 * This function does no checks, so the caller must ensure that any previous tag
1750 * switch has completed.
1752 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
1753 * @param tag tag value to be assigned to work queue entry
1754 * @param tag_type type of tag
1755 * @param group group value for the work queue entry.
1757 static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group)
1760 cvmx_pow_tag_req_t tag_req;
1762 if (CVMX_ENABLE_POW_CHECKS)
1764 cvmx_pow_tag_info_t current_tag;
1765 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1766 current_tag = cvmx_pow_get_current_tag();
1767 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1768 cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__);
1769 cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__);
1770 if ((wqp != cvmx_phys_to_ptr(0x80)) && cvmx_pow_get_current_wqp())
1771 cvmx_warn_if(wqp != cvmx_pow_get_current_wqp(), "%s passed WQE(%p) doesn't match the address in the POW(%p)\n", __FUNCTION__, wqp, cvmx_pow_get_current_wqp());
1774 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1775 ** once the WQE is in flight. See hardware manual for complete details.
1776 ** It is the application's responsibility to keep track of the current tag
1777 ** value if that is important.
1781 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1782 tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_FULL;
1783 tag_req.s_cn68xx_other.tag = tag;
1784 tag_req.s_cn68xx_other.type = tag_type;
1785 tag_req.s_cn68xx_other.grp = group;
1787 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_FULL;
1788 tag_req.s_cn38xx.tag = tag;
1789 tag_req.s_cn38xx.type = tag_type;
1790 tag_req.s_cn38xx.grp = group;
1794 ptr.sio.mem_region = CVMX_IO_SEG;
1796 ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1797 ptr.sio.offset = CAST64(wqp);
1799 /* once this store arrives at POW, it will attempt the switch
1800 software must wait for the switch to complete separately */
1801 cvmx_write_io(ptr.u64, tag_req.u64);
1806 * Starts a tag switch to the provided tag value and tag type. Completion for
1807 * the tag switch must be checked for separately.
1808 * This function does NOT update the
1809 * work queue entry in dram to match tag value and type, so the application must
1810 * keep track of these if they are important to the application.
1811 * This tag switch command must not be used for switches to NULL, as the tag
1812 * switch pending bit will be set by the switch request, but never cleared by the
1815 * This function must be used for tag switches from NULL.
1817 * This function waits for any pending tag switches to complete
1818 * before requesting the tag switch.
1820 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
1821 * @param tag tag value to be assigned to work queue entry
1822 * @param tag_type type of tag
1823 * @param group group value for the work queue entry.
1825 static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group)
1827 if (CVMX_ENABLE_POW_CHECKS)
1828 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1830 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1831 ** if a previous switch is still pending. */
1832 cvmx_pow_tag_sw_wait();
1833 cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
1838 * Switch to a NULL tag, which ends any ordering or
1839 * synchronization provided by the POW for the current
1840 * work queue entry. This operation completes immediately,
1841 * so completion should not be waited for.
1842 * This function does NOT wait for previous tag switches to complete,
1843 * so the caller must ensure that any previous tag switches have completed.
1845 static inline void cvmx_pow_tag_sw_null_nocheck(void)
1848 cvmx_pow_tag_req_t tag_req;
1850 if (CVMX_ENABLE_POW_CHECKS)
1852 cvmx_pow_tag_info_t current_tag;
1853 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1854 current_tag = cvmx_pow_get_current_tag();
1855 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1856 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called when we already have a NULL tag\n", __FUNCTION__);
1860 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1861 tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;
1862 tag_req.s_cn68xx_other.type = CVMX_POW_TAG_TYPE_NULL;
1864 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;
1865 tag_req.s_cn38xx.type = CVMX_POW_TAG_TYPE_NULL;
1870 ptr.sio.mem_region = CVMX_IO_SEG;
1872 ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1875 cvmx_write_io(ptr.u64, tag_req.u64);
1877 /* switch to NULL completes immediately */
1881 * Switch to a NULL tag, which ends any ordering or
1882 * synchronization provided by the POW for the current
1883 * work queue entry. This operation completes immediatly,
1884 * so completion should not be waited for.
1885 * This function waits for any pending tag switches to complete
1886 * before requesting the switch to NULL.
1888 static inline void cvmx_pow_tag_sw_null(void)
1890 if (CVMX_ENABLE_POW_CHECKS)
1891 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1893 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1894 ** if a previous switch is still pending. */
1895 cvmx_pow_tag_sw_wait();
1896 cvmx_pow_tag_sw_null_nocheck();
1898 /* switch to NULL completes immediately */
1904 * Submits work to an input queue. This function updates the work queue entry in DRAM to match
1905 * the arguments given.
1906 * Note that the tag provided is for the work queue entry submitted, and is unrelated to the tag that
1907 * the core currently holds.
1909 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
1910 * @param tag tag value to be assigned to work queue entry
1911 * @param tag_type type of tag
1912 * @param qos Input queue to add to.
1913 * @param grp group value for the work queue entry.
1915 static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t qos, uint64_t grp)
1918 cvmx_pow_tag_req_t tag_req;
1922 wqp->word1.s.tag = tag;
1923 wqp->word1.s.tag_type = tag_type;
1925 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1926 /* Reset all reserved bits */
1927 wqp->word1.cn68xx.zero_0 = 0;
1928 wqp->word1.cn68xx.zero_1 = 0;
1929 wqp->word1.cn68xx.zero_2 = 0;
1930 wqp->word1.cn68xx.qos = qos;
1931 wqp->word1.cn68xx.grp = grp;
1933 tag_req.s_cn68xx_add.op = CVMX_POW_TAG_OP_ADDWQ;
1934 tag_req.s_cn68xx_add.type = tag_type;
1935 tag_req.s_cn68xx_add.tag = tag;
1936 tag_req.s_cn68xx_add.qos = qos;
1937 tag_req.s_cn68xx_add.grp = grp;
1939 /* Reset all reserved bits */
1940 wqp->word1.cn38xx.zero_2 = 0;
1941 wqp->word1.cn38xx.qos = qos;
1942 wqp->word1.cn38xx.grp = grp;
1944 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_ADDWQ;
1945 tag_req.s_cn38xx.type = tag_type;
1946 tag_req.s_cn38xx.tag = tag;
1947 tag_req.s_cn38xx.qos = qos;
1948 tag_req.s_cn38xx.grp = grp;
1952 ptr.sio.mem_region = CVMX_IO_SEG;
1954 ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1955 ptr.sio.offset = cvmx_ptr_to_phys(wqp);
1957 /* SYNC write to memory before the work submit. This is necessary
1958 ** as POW may read values from DRAM at this time */
1960 cvmx_write_io(ptr.u64, tag_req.u64);
1966 * This function sets the group mask for a core. The group mask
1967 * indicates which groups each core will accept work from. There are
1970 * @param core_num core to apply mask to
1971 * @param mask Group mask. There are 16 groups, so only bits 0-15 are valid,
1972 * representing groups 0-15.
1973 * Each 1 bit in the mask enables the core to accept work from
1974 * the corresponding group.
1976 static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
1979 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
1981 cvmx_sso_ppx_grp_msk_t grp_msk;
1982 grp_msk.s.grp_msk = mask;
1983 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(core_num), grp_msk.u64);
1987 cvmx_pow_pp_grp_mskx_t grp_msk;
1988 grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
1989 grp_msk.s.grp_msk = mask;
1990 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
1995 * This function sets POW static priorities for a core. Each input queue has
1996 * an associated priority value.
1998 * @param core_num core to apply priorities to
1999 * @param priority Vector of 8 priorities, one per POW Input Queue (0-7).
2000 * Highest priority is 0 and lowest is 7. A priority value
2001 * of 0xF instructs POW to skip the Input Queue when
2002 * scheduling to this specific core.
2003 * NOTE: priorities should not have gaps in values, meaning
2004 * {0,1,1,1,1,1,1,1} is a valid configuration while
2005 * {0,2,2,2,2,2,2,2} is not.
2007 static inline void cvmx_pow_set_priority(uint64_t core_num, const uint8_t priority[])
2009 if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
2012 /* Detect gaps between priorities and flag error */
2015 uint32_t prio_mask = 0;
2018 if (priority[i] != 0xF)
2019 prio_mask |= 1<<priority[i];
2021 if ( prio_mask ^ ((1<<cvmx_pop(prio_mask)) - 1))
2023 cvmx_dprintf("ERROR: POW static priorities should be contiguous (0x%llx)\n", (unsigned long long)prio_mask);
2028 /* POW priorities are supported on CN5xxx and later */
2029 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
2031 cvmx_sso_ppx_qos_pri_t qos_pri;
2033 qos_pri.u64 = cvmx_read_csr(CVMX_SSO_PPX_QOS_PRI(core_num));
2034 qos_pri.s.qos0_pri = priority[0];
2035 qos_pri.s.qos1_pri = priority[1];
2036 qos_pri.s.qos2_pri = priority[2];
2037 qos_pri.s.qos3_pri = priority[3];
2038 qos_pri.s.qos4_pri = priority[4];
2039 qos_pri.s.qos5_pri = priority[5];
2040 qos_pri.s.qos6_pri = priority[6];
2041 qos_pri.s.qos7_pri = priority[7];
2042 cvmx_write_csr(CVMX_SSO_PPX_QOS_PRI(core_num), qos_pri.u64);
2046 cvmx_pow_pp_grp_mskx_t grp_msk;
2048 grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
2049 grp_msk.s.qos0_pri = priority[0];
2050 grp_msk.s.qos1_pri = priority[1];
2051 grp_msk.s.qos2_pri = priority[2];
2052 grp_msk.s.qos3_pri = priority[3];
2053 grp_msk.s.qos4_pri = priority[4];
2054 grp_msk.s.qos5_pri = priority[5];
2055 grp_msk.s.qos6_pri = priority[6];
2056 grp_msk.s.qos7_pri = priority[7];
2058 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
2063 * Performs a tag switch and then an immediate deschedule. This completes
2064 * immediately, so completion must not be waited for. This function does NOT
2065 * update the wqe in DRAM to match arguments.
2067 * This function does NOT wait for any prior tag switches to complete, so the
2068 * calling code must do this.
2070 * Note the following CAVEAT of the Octeon HW behavior when
2071 * re-scheduling DE-SCHEDULEd items whose (next) state is
2073 * - If there are no switches pending at the time that the
2074 * HW executes the de-schedule, the HW will only re-schedule
2075 * the head of the FIFO associated with the given tag. This
2076 * means that in many respects, the HW treats this ORDERED
2077 * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
2078 * case (to an ORDERED tag), the HW will do the switch
2079 * before the deschedule whenever it is possible to do
2080 * the switch immediately, so it may often look like
2082 * - If there is a pending switch to ORDERED at the time
2083 * the HW executes the de-schedule, the HW will perform
2084 * the switch at the time it re-schedules, and will be
2085 * able to reschedule any/all of the entries with the
2087 * Due to this behavior, the RECOMMENDATION to software is
2088 * that they have a (next) state of ATOMIC when they
2089 * DE-SCHEDULE. If an ORDERED tag is what was really desired,
2090 * SW can choose to immediately switch to an ORDERED tag
2091 * after the work (that has an ATOMIC tag) is re-scheduled.
2092 * Note that since there are never any tag switches pending
2093 * when the HW re-schedules, this switch can be IMMEDIATE upon
2094 * the reception of the pointer during the re-schedule.
2096 * @param tag New tag value
2097 * @param tag_type New tag type
2098 * @param group New group value
2099 * @param no_sched Control whether this work queue entry will be rescheduled.
2100 * - 1 : don't schedule this work
2101 * - 0 : allow this work to be scheduled.
2103 static inline void cvmx_pow_tag_sw_desched_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched)
2106 cvmx_pow_tag_req_t tag_req;
2108 if (CVMX_ENABLE_POW_CHECKS)
2110 cvmx_pow_tag_info_t current_tag;
2111 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
2112 current_tag = cvmx_pow_get_current_tag();
2113 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
2114 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not allowed from NULL state\n", __FUNCTION__);
2115 cvmx_warn_if((current_tag.tag_type != CVMX_POW_TAG_TYPE_ATOMIC) && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC), "%s called where neither the before or after tag is ATOMIC\n", __FUNCTION__);
2119 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
2120 tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
2121 tag_req.s_cn68xx_other.tag = tag;
2122 tag_req.s_cn68xx_other.type = tag_type;
2123 tag_req.s_cn68xx_other.grp = group;
2124 tag_req.s_cn68xx_other.no_sched = no_sched;
2126 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
2127 tag_req.s_cn38xx.tag = tag;
2128 tag_req.s_cn38xx.type = tag_type;
2129 tag_req.s_cn38xx.grp = group;
2130 tag_req.s_cn38xx.no_sched = no_sched;
2134 ptr.sio.mem_region = CVMX_IO_SEG;
2136 ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
2138 cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */
2141 * Performs a tag switch and then an immediate deschedule. This completes
2142 * immediately, so completion must not be waited for. This function does NOT
2143 * update the wqe in DRAM to match arguments.
2145 * This function waits for any prior tag switches to complete, so the
2146 * calling code may call this function with a pending tag switch.
2148 * Note the following CAVEAT of the Octeon HW behavior when
2149 * re-scheduling DE-SCHEDULEd items whose (next) state is
2151 * - If there are no switches pending at the time that the
2152 * HW executes the de-schedule, the HW will only re-schedule
2153 * the head of the FIFO associated with the given tag. This
2154 * means that in many respects, the HW treats this ORDERED
2155 * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
2156 * case (to an ORDERED tag), the HW will do the switch
2157 * before the deschedule whenever it is possible to do
2158 * the switch immediately, so it may often look like
2160 * - If there is a pending switch to ORDERED at the time
2161 * the HW executes the de-schedule, the HW will perform
2162 * the switch at the time it re-schedules, and will be
2163 * able to reschedule any/all of the entries with the
2165 * Due to this behavior, the RECOMMENDATION to software is
2166 * that they have a (next) state of ATOMIC when they
2167 * DE-SCHEDULE. If an ORDERED tag is what was really desired,
2168 * SW can choose to immediately switch to an ORDERED tag
2169 * after the work (that has an ATOMIC tag) is re-scheduled.
2170 * Note that since there are never any tag switches pending
2171 * when the HW re-schedules, this switch can be IMMEDIATE upon
2172 * the reception of the pointer during the re-schedule.
2174 * @param tag New tag value
2175 * @param tag_type New tag type
2176 * @param group New group value
2177 * @param no_sched Control whether this work queue entry will be rescheduled.
2178 * - 1 : don't schedule this work
2179 * - 0 : allow this work to be scheduled.
2181 static inline void cvmx_pow_tag_sw_desched(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched)
2183 if (CVMX_ENABLE_POW_CHECKS)
2184 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
2186 /* Need to make sure any writes to the work queue entry are complete */
2188 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
2189 ** if a previous switch is still pending. */
2190 cvmx_pow_tag_sw_wait();
2191 cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
2199 * Descchedules the current work queue entry.
2201 * @param no_sched no schedule flag value to be set on the work queue entry. If this is set
2202 * the entry will not be rescheduled.
2204 static inline void cvmx_pow_desched(uint64_t no_sched)
2207 cvmx_pow_tag_req_t tag_req;
2209 if (CVMX_ENABLE_POW_CHECKS)
2211 cvmx_pow_tag_info_t current_tag;
2212 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
2213 current_tag = cvmx_pow_get_current_tag();
2214 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
2215 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not expected from NULL state\n", __FUNCTION__);
2218 /* Need to make sure any writes to the work queue entry are complete */
2222 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
2223 tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_DESCH;
2224 tag_req.s_cn68xx_other.no_sched = no_sched;
2226 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_DESCH;
2227 tag_req.s_cn38xx.no_sched = no_sched;
2231 ptr.sio.mem_region = CVMX_IO_SEG;
2233 ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
2235 cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */
2244 /***********************************************************************************************
2245 ** Define usage of bits within the 32 bit tag values.
2246 ***********************************************************************************************/
2249 * Number of bits of the tag used by software. The SW bits
2250 * are always a contiguous block of the high starting at bit 31.
2251 * The hardware bits are always the low bits. By default, the top 8 bits
2252 * of the tag are reserved for software, and the low 24 are set by the IPD unit.
2254 #define CVMX_TAG_SW_BITS (8)
2255 #define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
2257 /* Below is the list of values for the top 8 bits of the tag. */
2258 #define CVMX_TAG_SW_BITS_INTERNAL 0x1 /* Tag values with top byte of this value are reserved for internal executive uses */
2259 /* The executive divides the remaining 24 bits as follows:
2260 ** * the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
2261 ** * the lower 16 bits (bits 15 - 0 of the tag) define are the value with the subgroup
2262 ** Note that this section describes the format of tags generated by software - refer to the
2263 ** hardware documentation for a description of the tags values generated by the packet input
2265 ** Subgroups are defined here */
2266 #define CVMX_TAG_SUBGROUP_MASK 0xFFFF /* Mask for the value portion of the tag */
2267 #define CVMX_TAG_SUBGROUP_SHIFT 16
2268 #define CVMX_TAG_SUBGROUP_PKO 0x1
2271 /* End of executive tag subgroup definitions */
2273 /* The remaining values software bit values 0x2 - 0xff are available for application use */
2278 * This function creates a 32 bit tag value from the two values provided.
2280 * @param sw_bits The upper bits (number depends on configuration) are set to this value. The remainder of
2281 * bits are set by the hw_bits parameter.
2282 * @param hw_bits The lower bits (number depends on configuration) are set to this value. The remainder of
2283 * bits are set by the sw_bits parameter.
2285 * @return 32 bit value of the combined hw and sw bits.
2287 static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
2289 return((((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) << CVMX_TAG_SW_SHIFT) | (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS))));
2292 * Extracts the bits allocated for software use from the tag
2294 * @param tag 32 bit tag value
2296 * @return N bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define
2298 static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
2300 return((tag >> (32 - CVMX_TAG_SW_BITS)) & cvmx_build_mask(CVMX_TAG_SW_BITS));
2304 * Extracts the bits allocated for hardware use from the tag
2306 * @param tag 32 bit tag value
2308 * @return (32 - N) bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define
2310 static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
2312 return(tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
2316 * Store the current POW internal state into the supplied
2317 * buffer. It is recommended that you pass a buffer of at least
2318 * 128KB. The format of the capture may change based on SDK
2319 * version and Octeon chip.
2321 * @param buffer Buffer to store capture into
2322 * @param buffer_size
2323 * The size of the supplied buffer
2325 * @return Zero on sucess, negative on failure
2327 extern int cvmx_pow_capture(void *buffer, int buffer_size);
2330 * Dump a POW capture to the console in a human readable format.
2332 * @param buffer POW capture from cvmx_pow_capture()
2333 * @param buffer_size
2334 * Size of the buffer
2336 extern void cvmx_pow_display(void *buffer, int buffer_size);
2339 * Return the number of POW entries supported by this chip
2341 * @return Number of POW entries
2343 extern int cvmx_pow_get_num_entries(void);
2350 #endif /* __CVMX_POW_H__ */