1 /***********************license start***************
2 * Copyright (c) 2003-2011 Cavium Inc. (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Interface to the hardware Packet Order / Work unit.
46 * New, starting with SDK 1.7.0, cvmx-pow supports a number of
47 * extended consistency checks. The define
48 * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
49 * internal state checks to find common programming errors. If
50 * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
51 * enabled. For example, cvmx-pow will check for the following
52 * program errors or POW state inconsistency.
53 * - Requesting a POW operation with an active tag switch in
55 * - Waiting for a tag switch to complete for an excessively
56 * long period. This is normally a sign of an error in locking
58 * - Illegal tag switches from NULL_NULL.
59 * - Illegal tag switches from NULL.
60 * - Illegal deschedule request.
61 * - WQE pointer not matching the one attached to the core by
64 * <hr>$Revision: 70030 $<hr>
67 #ifndef __CVMX_POW_H__
68 #define __CVMX_POW_H__
70 #include "cvmx-scratch.h"
73 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
74 #include <asm/octeon/cvmx-sso-defs.h>
76 #include "cvmx-warn.h"
83 /* Default to having all POW constancy checks turned on */
84 #ifndef CVMX_ENABLE_POW_CHECKS
85 #define CVMX_ENABLE_POW_CHECKS 1
89 * Wait flag values for pow functions.
98 * POW tag operations. These are used in the data stored to the POW.
102 CVMX_POW_TAG_OP_SWTAG = 0L, /**< switch the tag (only) for this PP
103 - the previous tag should be non-NULL in this case
104 - tag switch response required
105 - fields used: op, type, tag */
106 CVMX_POW_TAG_OP_SWTAG_FULL = 1L, /**< switch the tag for this PP, with full information
107 - this should be used when the previous tag is NULL
108 - tag switch response required
109 - fields used: address, op, grp, type, tag */
110 CVMX_POW_TAG_OP_SWTAG_DESCH = 2L, /**< switch the tag (and/or group) for this PP and de-schedule
111 - OK to keep the tag the same and only change the group
112 - fields used: op, no_sched, grp, type, tag */
113 CVMX_POW_TAG_OP_DESCH = 3L, /**< just de-schedule
114 - fields used: op, no_sched */
115 CVMX_POW_TAG_OP_ADDWQ = 4L, /**< create an entirely new work queue entry
116 - fields used: address, op, qos, grp, type, tag */
117 CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,/**< just update the work queue pointer and grp for this PP
118 - fields used: address, op, grp */
119 CVMX_POW_TAG_OP_SET_NSCHED = 6L, /**< set the no_sched bit on the de-schedule list
120 - does nothing if the selected entry is not on the de-schedule list
121 - does nothing if the stored work queue pointer does not match the address field
122 - fields used: address, index, op
123 Before issuing a *_NSCHED operation, SW must guarantee that all
124 prior deschedules and set/clr NSCHED operations are complete and all
125 prior switches are complete. The hardware provides the opsdone bit
126 and swdone bit for SW polling. After issuing a *_NSCHED operation,
127 SW must guarantee that the set/clr NSCHED is complete before
128 any subsequent operations. */
129 CVMX_POW_TAG_OP_CLR_NSCHED = 7L, /**< clears the no_sched bit on the de-schedule list
130 - does nothing if the selected entry is not on the de-schedule list
131 - does nothing if the stored work queue pointer does not match the address field
132 - fields used: address, index, op
133 Before issuing a *_NSCHED operation, SW must guarantee that all
134 prior deschedules and set/clr NSCHED operations are complete and all
135 prior switches are complete. The hardware provides the opsdone bit
136 and swdone bit for SW polling. After issuing a *_NSCHED operation,
137 SW must guarantee that the set/clr NSCHED is complete before
138 any subsequent operations. */
139 CVMX_POW_TAG_OP_NOP = 15L /**< do nothing */
143 * This structure defines the store data on a store to POW
150 #ifdef __BIG_ENDIAN_BITFIELD
151 uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
153 uint64_t index :13; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
154 cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
155 uint64_t unused2 : 2;
156 uint64_t qos : 3; /**< the QOS level for the packet. qos is only used for CVMX_POW_TAG_OP_ADDWQ */
157 uint64_t grp : 4; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
158 cvmx_pow_tag_type_t type : 3; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
159 uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
162 cvmx_pow_tag_type_t type : 3;
165 uint64_t unused2 : 2;
166 cvmx_pow_tag_op_t op : 4;
169 uint64_t no_sched : 1;
173 #ifdef __BIG_ENDIAN_BITFIELD
174 uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
175 cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
176 uint64_t unused1 : 4;
177 uint64_t index :11; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
178 uint64_t unused2 : 1;
179 uint64_t grp : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
180 uint64_t unused3 : 3;
181 cvmx_pow_tag_type_t type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
182 uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
185 cvmx_pow_tag_type_t type : 2;
186 uint64_t unused3 : 3;
188 uint64_t unused2 : 1;
190 uint64_t unused1 : 4;
191 cvmx_pow_tag_op_t op : 4;
192 uint64_t no_sched : 1;
196 #ifdef __BIG_ENDIAN_BITFIELD
197 uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
198 cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
199 uint64_t unused1 : 12;
200 uint64_t qos : 3; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
201 uint64_t unused2 : 1;
202 uint64_t grp : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
203 uint64_t unused3 : 3;
204 cvmx_pow_tag_type_t type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
205 uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
208 cvmx_pow_tag_type_t type : 2;
209 uint64_t unused3 : 3;
211 uint64_t unused2 : 1;
213 uint64_t unused1 : 12;
214 cvmx_pow_tag_op_t op : 4;
215 uint64_t no_sched : 1;
219 #ifdef __BIG_ENDIAN_BITFIELD
220 uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
221 cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
222 uint64_t unused1 : 16;
223 uint64_t grp : 6; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
224 uint64_t unused3 : 3;
225 cvmx_pow_tag_type_t type : 2; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
226 uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
229 cvmx_pow_tag_type_t type : 2;
230 uint64_t unused3 : 3;
232 uint64_t unused1 : 16;
233 cvmx_pow_tag_op_t op : 4;
234 uint64_t no_sched : 1;
238 } cvmx_pow_tag_req_t;
245 }cvmx_pow_tag_info_t;
248 * This structure describes the address to load stuff from POW
255 * Address for new work request loads (did<2:0> == 0)
259 #ifdef __BIG_ENDIAN_BITFIELD
260 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
261 uint64_t reserved_49_61 : 13; /**< Must be zero */
262 uint64_t is_io : 1; /**< Must be one */
263 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 0 in this case */
264 uint64_t reserved_4_39 : 36; /**< Must be zero */
265 uint64_t wait : 1; /**< If set, don't return load response until work is available */
266 uint64_t reserved_0_2 : 3; /**< Must be zero */
268 uint64_t reserved_0_2 : 3;
270 uint64_t reserved_4_39 : 36;
273 uint64_t reserved_49_61 : 13;
274 uint64_t mem_region : 2;
279 * Address for loads to get POW internal status
283 #ifdef __BIG_ENDIAN_BITFIELD
284 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
285 uint64_t reserved_49_61 : 13; /**< Must be zero */
286 uint64_t is_io : 1; /**< Must be one */
287 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 1 in this case */
288 uint64_t reserved_10_39 : 30; /**< Must be zero */
289 uint64_t coreid : 4; /**< The core id to get status for */
290 uint64_t get_rev : 1; /**< If set and get_cur is set, return reverse tag-list pointer rather than forward tag-list pointer */
291 uint64_t get_cur : 1; /**< If set, return current status rather than pending status */
292 uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type */
293 uint64_t reserved_0_2 : 3; /**< Must be zero */
295 uint64_t reserved_0_2 : 3;
296 uint64_t get_wqp : 1;
297 uint64_t get_cur : 1;
298 uint64_t get_rev : 1;
300 uint64_t reserved_10_39 : 30;
303 uint64_t reserved_49_61 : 13;
304 uint64_t mem_region : 2;
309 * Address for loads to get 68XX SS0 internal status
313 #ifdef __BIG_ENDIAN_BITFIELD
314 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
315 uint64_t reserved_49_61 : 13; /**< Must be zero */
316 uint64_t is_io : 1; /**< Must be one */
317 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 1 in this case */
318 uint64_t reserved_14_39 : 26; /**< Must be zero */
319 uint64_t coreid : 5; /**< The core id to get status for */
320 uint64_t reserved_6_8 : 3;
321 uint64_t opcode : 3; /**< Status operation */
322 uint64_t reserved_0_2 : 3; /**< Must be zero */
324 uint64_t reserved_0_2 : 3;
326 uint64_t reserved_6_8 : 3;
328 uint64_t reserved_14_39 : 26;
331 uint64_t reserved_49_61 : 13;
332 uint64_t mem_region : 2;
337 * Address for memory loads to get POW internal state
341 #ifdef __BIG_ENDIAN_BITFIELD
342 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
343 uint64_t reserved_49_61 : 13; /**< Must be zero */
344 uint64_t is_io : 1; /**< Must be one */
345 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 2 in this case */
346 uint64_t reserved_16_39 : 24; /**< Must be zero */
347 uint64_t index : 11; /**< POW memory index */
348 uint64_t get_des : 1; /**< If set, return deschedule information rather than the standard
349 response for work-queue index (invalid if the work-queue entry is not on the
351 uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type (no effect when get_des set). */
352 uint64_t reserved_0_2 : 3; /**< Must be zero */
354 uint64_t reserved_0_2 : 3;
355 uint64_t get_wqp : 1;
356 uint64_t get_des : 1;
358 uint64_t reserved_16_39 : 24;
361 uint64_t reserved_49_61 : 13;
362 uint64_t mem_region : 2;
367 * Address for memory loads to get SSO internal state
371 #ifdef __BIG_ENDIAN_BITFIELD
372 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
373 uint64_t reserved_49_61 : 13; /**< Must be zero */
374 uint64_t is_io : 1; /**< Must be one */
375 uint64_t did : 8; /**< the ID of SSO - did<2:0> == 2 in this case */
376 uint64_t reserved_20_39 : 20; /**< Must be zero */
377 uint64_t index : 11; /**< SSO memory index */
378 uint64_t reserved_6_8 : 3; /**< Must be zero */
379 uint64_t opcode : 3; /**< Read TAG/WQ pointer/pending tag/next potr */
380 uint64_t reserved_0_2 : 3; /**< Must be zero */
382 uint64_t reserved_0_2 : 3;
384 uint64_t reserved_3_5 : 3;
386 uint64_t reserved_20_39 : 20;
389 uint64_t reserved_49_61 : 13;
390 uint64_t mem_region : 2;
395 * Address for index/pointer loads
399 #ifdef __BIG_ENDIAN_BITFIELD
400 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
401 uint64_t reserved_49_61 : 13; /**< Must be zero */
402 uint64_t is_io : 1; /**< Must be one */
403 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 3 in this case */
404 uint64_t reserved_9_39 : 31; /**< Must be zero */
405 uint64_t qosgrp : 4; /**< when {get_rmt ==0 AND get_des_get_tail == 0}, this field selects one of
406 eight POW internal-input queues (0-7), one per QOS level; values 8-15 are
407 illegal in this case;
408 when {get_rmt ==0 AND get_des_get_tail == 1}, this field selects one of
409 16 deschedule lists (per group);
410 when get_rmt ==1, this field selects one of 16 memory-input queue lists.
411 The two memory-input queue lists associated with each QOS level are:
412 - qosgrp = 0, qosgrp = 8: QOS0
413 - qosgrp = 1, qosgrp = 9: QOS1
414 - qosgrp = 2, qosgrp = 10: QOS2
415 - qosgrp = 3, qosgrp = 11: QOS3
416 - qosgrp = 4, qosgrp = 12: QOS4
417 - qosgrp = 5, qosgrp = 13: QOS5
418 - qosgrp = 6, qosgrp = 14: QOS6
419 - qosgrp = 7, qosgrp = 15: QOS7 */
420 uint64_t get_des_get_tail: 1; /**< If set and get_rmt is clear, return deschedule list indexes
421 rather than indexes for the specified qos level; if set and get_rmt is set, return
422 the tail pointer rather than the head pointer for the specified qos level. */
423 uint64_t get_rmt : 1; /**< If set, return remote pointers rather than the local indexes for the specified qos level. */
424 uint64_t reserved_0_2 : 3; /**< Must be zero */
426 uint64_t reserved_0_2 : 3;
427 uint64_t get_rmt : 1;
428 uint64_t get_des_get_tail: 1;
430 uint64_t reserved_9_39 : 31;
433 uint64_t reserved_49_61 : 13;
434 uint64_t mem_region : 2;
439 * Address for a Index/Pointer loads to get SSO internal state
443 #ifdef __BIG_ENDIAN_BITFIELD
444 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
445 uint64_t reserved_49_61 : 13; /**< Must be zero */
446 uint64_t is_io : 1; /**< Must be one */
447 uint64_t did : 8; /**< the ID of SSO - did<2:0> == 2 in this case */
448 uint64_t reserved_15_39 : 25; /**< Must be zero */
449 uint64_t qos_grp : 6; /**< When opcode = IPL_IQ, this field specifies IQ (or QOS).
450 When opcode = IPL_DESCHED, this field specifies the group.
451 This field is reserved for all other opcodes. */
452 uint64_t reserved_6_8 : 3; /**< Must be zero */
453 uint64_t opcode : 3; /**< Read TAG/WQ pointer/pending tag/next potr */
454 uint64_t reserved_0_2 : 3; /**< Must be zero */
456 uint64_t reserved_0_2 : 3;
458 uint64_t reserved_3_5 : 3;
459 uint64_t qos_grp : 6;
460 uint64_t reserved_15_39 : 25;
463 uint64_t reserved_49_61 : 13;
464 uint64_t mem_region : 2;
469 * address for NULL_RD request (did<2:0> == 4)
470 * when this is read, HW attempts to change the state to NULL if it is NULL_NULL
471 * (the hardware cannot switch from NULL_NULL to NULL if a POW entry is not available -
472 * software may need to recover by finishing another piece of work before a POW
473 * entry can ever become available.)
477 #ifdef __BIG_ENDIAN_BITFIELD
478 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
479 uint64_t reserved_49_61 : 13; /**< Must be zero */
480 uint64_t is_io : 1; /**< Must be one */
481 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 4 in this case */
482 uint64_t reserved_0_39 : 40; /**< Must be zero */
484 uint64_t reserved_0_39 : 40;
487 uint64_t reserved_49_61 : 13;
488 uint64_t mem_region : 2;
491 } cvmx_pow_load_addr_t;
494 * This structure defines the response to a load/SENDSINGLE to POW (except CSR reads)
501 * Response to new work request loads
505 #ifdef __BIG_ENDIAN_BITFIELD
506 uint64_t no_work : 1; /**< Set when no new work queue entry was returned.
507 If there was de-scheduled work, the HW will definitely
508 return it. When this bit is set, it could mean
510 - There was no work, or
511 - There was no work that the HW could find. This
512 case can happen, regardless of the wait bit value
513 in the original request, when there is work
514 in the IQ's that is too deep down the list. */
515 uint64_t reserved_40_62 : 23; /**< Must be zero */
516 uint64_t addr : 40; /**< 36 in O1 -- the work queue pointer */
519 uint64_t reserved_40_62 : 23;
520 uint64_t no_work : 1;
525 * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
529 #ifdef __BIG_ENDIAN_BITFIELD
530 uint64_t reserved_62_63 : 2;
531 uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or
532 SWTAG_FULL, and the POW entry has not left the list for the original tag. */
533 uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */
534 uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */
535 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
536 uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */
537 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
538 uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */
539 uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */
540 uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */
541 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
542 uint64_t reserved_51 : 1;
543 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
544 uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */
545 uint64_t reserved_34_35 : 2;
546 uint64_t pend_type : 2; /**< This is the tag type when pend_switch or (pend_desched AND pend_desched_switch) are set. */
547 uint64_t pend_tag : 32; /**< - this is the tag when pend_switch or (pend_desched AND pend_desched_switch) are set. */
549 uint64_t pend_tag : 32;
550 uint64_t pend_type : 2;
551 uint64_t reserved_34_35 : 2;
552 uint64_t pend_grp : 4;
553 uint64_t pend_index : 11;
554 uint64_t reserved_51 : 1;
555 uint64_t pend_nosched_clr: 1;
556 uint64_t pend_null_rd : 1;
557 uint64_t pend_new_work_wait: 1;
558 uint64_t pend_new_work : 1;
559 uint64_t pend_nosched : 1;
560 uint64_t pend_desched_switch: 1;
561 uint64_t pend_desched : 1;
562 uint64_t pend_switch_null: 1;
563 uint64_t pend_switch_full: 1;
564 uint64_t pend_switch : 1;
565 uint64_t reserved_62_63 : 2;
570 * Result for a SSO Status Load (when opcode is SL_PENDTAG)
574 #ifdef __BIG_ENDIAN_BITFIELD
575 uint64_t pend_switch : 1; /**< Set when there is a pending non-UNSCHEDULED SWTAG or
576 SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
577 uint64_t pend_get_work : 1; /**< Set when there is a pending GET_WORK */
578 uint64_t pend_get_work_wait: 1; /**< when pend_get_work is set, this biit indicates that the
580 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
581 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
582 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
583 uint64_t pend_alloc_we : 1; /**< Set when there is a pending ALLOC_WE. */
584 uint64_t reserved_48_56 : 9;
585 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
586 uint64_t reserved_34_36 : 3;
587 uint64_t pend_type : 2; /**< This is the tag type when pend_switch is set. */
588 uint64_t pend_tag : 32; /**< This is the tag when pend_switch is set. */
590 uint64_t pend_tag : 32;
591 uint64_t pend_type : 2;
592 uint64_t reserved_34_36 : 3;
593 uint64_t pend_index : 11;
594 uint64_t reserved_48_56 : 9;
595 uint64_t pend_alloc_we : 1;
596 uint64_t pend_desched : 1;
597 uint64_t pend_nosched_clr: 1;
598 uint64_t pend_nosched : 1;
599 uint64_t pend_get_work_wait: 1;
600 uint64_t pend_get_work : 1;
601 uint64_t pend_switch : 1;
606 * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
610 #ifdef __BIG_ENDIAN_BITFIELD
611 uint64_t reserved_62_63 : 2;
612 uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or
613 SWTAG_FULL, and the POW entry has not left the list for the original tag. */
614 uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */
615 uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */
616 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
617 uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */
618 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
619 uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */
620 uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */
621 uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */
622 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
623 uint64_t reserved_51 : 1;
624 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
625 uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */
626 uint64_t pend_wqp : 36; /**< This is the wqp when pend_nosched_clr is set. */
628 uint64_t pend_wqp : 36;
629 uint64_t pend_grp : 4;
630 uint64_t pend_index : 11;
631 uint64_t reserved_51 : 1;
632 uint64_t pend_nosched_clr: 1;
633 uint64_t pend_null_rd : 1;
634 uint64_t pend_new_work_wait: 1;
635 uint64_t pend_new_work : 1;
636 uint64_t pend_nosched : 1;
637 uint64_t pend_desched_switch: 1;
638 uint64_t pend_desched : 1;
639 uint64_t pend_switch_null: 1;
640 uint64_t pend_switch_full: 1;
641 uint64_t pend_switch : 1;
642 uint64_t reserved_62_63 : 2;
647 * Result for a SSO Status Load (when opcode is SL_PENDWQP)
651 #ifdef __BIG_ENDIAN_BITFIELD
652 uint64_t pend_switch : 1; /**< Set when there is a pending non-UNSCHEDULED SWTAG or
653 SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
654 uint64_t pend_get_work : 1; /**< Set when there is a pending GET_WORK */
655 uint64_t pend_get_work_wait: 1; /**< when pend_get_work is set, this biit indicates that the
657 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
658 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
659 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
660 uint64_t pend_alloc_we : 1; /**< Set when there is a pending ALLOC_WE. */
661 uint64_t reserved_51_56 : 6;
662 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
663 uint64_t reserved_38_39 : 2;
664 uint64_t pend_wqp : 38; /**< This is the wqp when pend_nosched_clr is set. */
666 uint64_t pend_wqp : 38;
667 uint64_t reserved_38_39 : 2;
668 uint64_t pend_index : 11;
669 uint64_t reserved_51_56 : 6;
670 uint64_t pend_alloc_we : 1;
671 uint64_t pend_desched : 1;
672 uint64_t pend_nosched_clr: 1;
673 uint64_t pend_nosched : 1;
674 uint64_t pend_get_work_wait: 1;
675 uint64_t pend_get_work : 1;
676 uint64_t pend_switch : 1;
681 * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==0)
685 #ifdef __BIG_ENDIAN_BITFIELD
686 uint64_t reserved_62_63 : 2;
687 uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and
688 tag_type is not NULL or NULL_NULL). */
689 uint64_t index : 11; /**< The POW entry attached to the core. */
690 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
691 uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in
692 the NULL or NULL_NULL state). */
693 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
694 NULL or NULL_NULL state). */
695 uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list
696 entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
697 uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on
698 SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
701 uint64_t tag_type : 2;
706 uint64_t link_index : 11;
707 uint64_t reserved_62_63 : 2;
712 * Result for a SSO Status Load (when opcode is SL_TAG)
716 #ifdef __BIG_ENDIAN_BITFIELD
717 uint64_t reserved_57_63 : 7;
718 uint64_t index : 11; /**< The SSO entry attached to the core. */
719 uint64_t reserved_45 : 1;
720 uint64_t grp : 6; /**< The group attached to the core (updated when new tag list entered on
722 uint64_t head : 1; /**< Set when this SSO entry is at the head of its tag list (also set when in the
723 UNSCHEDULED or EMPTY state). */
724 uint64_t tail : 1; /**< Set when this SSO entry is at the tail of its tag list (also set when in the
725 UNSCHEDULED or EMPTY state). */
726 uint64_t reserved_34_36 : 3;
727 uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list entered
728 on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
729 uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on SWTAG,
730 SWTAG_FULL, or SWTAG_DESCHED). */
733 uint64_t tag_type : 2;
734 uint64_t reserved_34_36 : 3;
738 uint64_t reserved_45 : 1;
740 uint64_t reserved_57_63 : 7;
745 * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
749 #ifdef __BIG_ENDIAN_BITFIELD
750 uint64_t reserved_62_63 : 2;
751 uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0
752 (and tag_type is not NULL or NULL_NULL). This field is unpredictable
753 when the core's state is NULL or NULL_NULL. */
754 uint64_t index : 11; /**< The POW entry attached to the core. */
755 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
756 uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in
757 the NULL or NULL_NULL state). */
758 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
759 NULL or NULL_NULL state). */
760 uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list
761 entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
762 uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on
763 SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
766 uint64_t tag_type : 2;
771 uint64_t revlink_index : 11;
772 uint64_t reserved_62_63 : 2;
777 * Result for a SSO Status Load (when opcode is SL_WQP)
781 #ifdef __BIG_ENDIAN_BITFIELD
782 uint64_t reserved_58_63 : 6;
783 uint64_t index : 11; /**< The SSO entry attached to the core. */
784 uint64_t reserved_46 : 1;
785 uint64_t grp : 6; /**< The group attached to the core (updated when new tag list entered on
787 uint64_t reserved_38_39 : 2;
788 uint64_t wqp : 38; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
791 uint64_t reserved_38_39 : 2;
793 uint64_t reserved_46 : 1;
795 uint64_t reserved_58_63 : 6;
800 * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==0)
804 #ifdef __BIG_ENDIAN_BITFIELD
805 uint64_t reserved_62_63 : 2;
806 uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and
807 tag_type is not NULL or NULL_NULL). */
808 uint64_t index : 11; /**< The POW entry attached to the core. */
809 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
810 uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
815 uint64_t link_index : 11;
816 uint64_t reserved_62_63 : 2;
821 * Result for a SSO Status Load (when opcode is SL_LINKS)
825 #ifdef __BIG_ENDIAN_BITFIELD
826 uint64_t reserved_46_63 : 18;
827 uint64_t index : 11; /**< The SSO entry attached to the core. */
828 uint64_t reserved_34 : 1;
829 uint64_t grp : 6; /**< The group attached to the core (updated when new tag list entered on
831 uint64_t head : 1; /**< Set when this SSO entry is at the head of its tag list (also set when in the
832 UNSCHEDULED or EMPTY state). */
833 uint64_t tail : 1; /**< Set when this SSO entry is at the tail of its tag list (also set when in the
834 UNSCHEDULED or EMPTY state). */
835 uint64_t reserved_24_25 : 2;
836 uint64_t revlink_index : 11; /**< Points to the prior SSO entry in the tag list when head==0 (and tag_type is not UNSCHEDULED or EMPTY). */
837 uint64_t reserved_11_12 : 2;
838 uint64_t link_index : 11; /**< Points to the next SSO entry in the tag list when tail==0 (and tag_type is not UNSCHEDULDED or EMPTY). */
840 uint64_t link_index : 11;
841 uint64_t reserved_11_12 : 2;
842 uint64_t revlink_index : 11;
843 uint64_t reserved_24_25 : 2;
847 uint64_t reserved_34 : 1;
849 uint64_t reserved_46_63 : 18;
854 * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==1)
858 #ifdef __BIG_ENDIAN_BITFIELD
859 uint64_t reserved_62_63 : 2;
860 uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0
861 (and tag_type is not NULL or NULL_NULL). This field is unpredictable
862 when the core's state is NULL or NULL_NULL. */
863 uint64_t index : 11; /**< The POW entry attached to the core. */
864 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
865 uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
870 uint64_t revlink_index : 11;
871 uint64_t reserved_62_63 : 2;
876 * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
880 #ifdef __BIG_ENDIAN_BITFIELD
881 uint64_t reserved_51_63 : 13;
882 uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list
883 (unpredictable if entry is the tail of the list). */
884 uint64_t grp : 4; /**< The group of the POW entry. */
885 uint64_t reserved_35 : 1;
886 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
887 NULL or NULL_NULL state). */
888 uint64_t tag_type : 2; /**< The tag type of the POW entry. */
889 uint64_t tag : 32; /**< The tag of the POW entry. */
892 uint64_t tag_type : 2;
894 uint64_t reserved_35 : 1;
896 uint64_t next_index : 11;
897 uint64_t reserved_51_63 : 13;
902 * Result For SSO Memory Load (opcode is ML_TAG)
906 #ifdef __BIG_ENDIAN_BITFIELD
907 uint64_t reserved_38_63 : 26;
908 uint64_t tail : 1; /**< Set when this SSO entry is at the tail of its tag list (also set when in the
909 NULL or NULL_NULL state). */
910 uint64_t reserved_34_36 : 3;
911 uint64_t tag_type : 2; /**< The tag type of the SSO entry. */
912 uint64_t tag : 32; /**< The tag of the SSO entry. */
915 uint64_t tag_type : 2;
916 uint64_t reserved_34_36 : 3;
918 uint64_t reserved_38_63 : 26;
920 } s_smemload0_cn68xx;
923 * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
927 #ifdef __BIG_ENDIAN_BITFIELD
928 uint64_t reserved_51_63 : 13;
929 uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list
930 (unpredictable if entry is the tail of the list). */
931 uint64_t grp : 4; /**< The group of the POW entry. */
932 uint64_t wqp : 36; /**< The WQP held in the POW entry. */
936 uint64_t next_index : 11;
937 uint64_t reserved_51_63 : 13;
942 * Result For SSO Memory Load (opcode is ML_WQPGRP)
946 #ifdef __BIG_ENDIAN_BITFIELD
947 uint64_t reserved_48_63 : 16;
948 uint64_t nosched : 1; /**< The nosched bit for the SSO entry. */
949 uint64_t reserved_46 : 1;
950 uint64_t grp : 6; /**< The group of the SSO entry. */
951 uint64_t reserved_38_39 : 2;
952 uint64_t wqp : 38; /**< The WQP held in the SSO entry. */
955 uint64_t reserved_38_39 : 2;
957 uint64_t reserved_46 : 1;
958 uint64_t nosched : 1;
959 uint64_t reserved_51_63 : 16;
961 } s_smemload1_cn68xx;
964 * Result For POW Memory Load (get_des == 1)
968 #ifdef __BIG_ENDIAN_BITFIELD
969 uint64_t reserved_51_63 : 13;
970 uint64_t fwd_index : 11; /**< The next entry in the tag list connected to the descheduled head. */
971 uint64_t grp : 4; /**< The group of the POW entry. */
972 uint64_t nosched : 1; /**< The nosched bit for the POW entry. */
973 uint64_t pend_switch : 1; /**< There is a pending tag switch */
974 uint64_t pend_type : 2; /**< The next tag type for the new tag list when pend_switch is set. */
975 uint64_t pend_tag : 32; /**< The next tag for the new tag list when pend_switch is set. */
977 uint64_t pend_tag : 32;
978 uint64_t pend_type : 2;
979 uint64_t pend_switch : 1;
980 uint64_t nosched : 1;
982 uint64_t fwd_index : 11;
983 uint64_t reserved_51_63 : 13;
988 * Result For SSO Memory Load (opcode is ML_PENTAG)
992 #ifdef __BIG_ENDIAN_BITFIELD
993 uint64_t reserved_38_63 : 26;
994 uint64_t pend_switch : 1; /**< Set when there is a pending non-UNSCHEDULED SWTAG or
995 SWTAG_FULL, and the SSO entry has not left the list for the original tag. */
996 uint64_t reserved_34_36 : 3;
997 uint64_t pend_type : 2; /**< The next tag type for the new tag list when pend_switch is set. */
998 uint64_t pend_tag : 32; /**< The next tag for the new tag list when pend_switch is set. */
1000 uint64_t pend_tag : 32;
1001 uint64_t pend_type : 2;
1002 uint64_t reserved_34_36 : 3;
1003 uint64_t pend_switch : 1;
1004 uint64_t reserved_38_63 : 26;
1006 } s_smemload2_cn68xx;
1009 * Result For SSO Memory Load (opcode is ML_LINKS)
1013 #ifdef __BIG_ENDIAN_BITFIELD
1014 uint64_t reserved_24_63 : 40;
1015 uint64_t fwd_index : 11; /**< The next entry in the tag list connected to the descheduled head. */
1016 uint64_t reserved_11_12 : 2;
1017 uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list
1018 (unpredicatble if entry is the tail of the list). */
1020 uint64_t next_index : 11;
1021 uint64_t reserved_11_12 : 2;
1022 uint64_t fwd_index : 11;
1023 uint64_t reserved_24_63 : 40;
1025 } s_smemload3_cn68xx;
1028 * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
1032 #ifdef __BIG_ENDIAN_BITFIELD
1033 uint64_t reserved_52_63 : 12;
1034 uint64_t free_val : 1; /**< - set when there is one or more POW entries on the free list. */
1035 uint64_t free_one : 1; /**< - set when there is exactly one POW entry on the free list. */
1036 uint64_t reserved_49 : 1;
1037 uint64_t free_head : 11; /**< - when free_val is set, indicates the first entry on the free list. */
1038 uint64_t reserved_37 : 1;
1039 uint64_t free_tail : 11; /**< - when free_val is set, indicates the last entry on the free list. */
1040 uint64_t loc_val : 1; /**< - set when there is one or more POW entries on the input Q list selected by qosgrp. */
1041 uint64_t loc_one : 1; /**< - set when there is exactly one POW entry on the input Q list selected by qosgrp. */
1042 uint64_t reserved_23 : 1;
1043 uint64_t loc_head : 11; /**< - when loc_val is set, indicates the first entry on the input Q list selected by qosgrp. */
1044 uint64_t reserved_11 : 1;
1045 uint64_t loc_tail : 11; /**< - when loc_val is set, indicates the last entry on the input Q list selected by qosgrp. */
1047 uint64_t loc_tail : 11;
1048 uint64_t reserved_11 : 1;
1049 uint64_t loc_head : 11;
1050 uint64_t reserved_23 : 1;
1051 uint64_t loc_one : 1;
1052 uint64_t loc_val : 1;
1053 uint64_t free_tail : 11;
1054 uint64_t reserved_37 : 1;
1055 uint64_t free_head : 11;
1056 uint64_t reserved_49 : 1;
1057 uint64_t free_one : 1;
1058 uint64_t free_val : 1;
1059 uint64_t reserved_52_63 : 12;
1064 * Result for SSO Index/Pointer Load(opcode == IPL_IQ/IPL_DESCHED/IPL_NOSCHED)
1068 #ifdef __BIG_ENDIAN_BITFIELD
1069 uint64_t reserved_28_63 : 36;
1070 uint64_t queue_val : 1; /**< - If set, one or more valid entries are in the queue. */
1071 uint64_t queue_one : 1; /**< - If set, exactly one valid entry is in the queue. */
1072 uint64_t reserved_24_25 : 2;
1073 uint64_t queue_head : 11; /**< - Index of entry at the head of the queue. */
1074 uint64_t reserved_11_12 : 2;
1075 uint64_t queue_tail : 11; /**< - Index of entry at the tail of the queue. */
1077 uint64_t queue_tail : 11;
1078 uint64_t reserved_11_12 : 2;
1079 uint64_t queue_head : 11;
1080 uint64_t reserved_24_25 : 2;
1081 uint64_t queue_one : 1;
1082 uint64_t queue_val : 1;
1083 uint64_t reserved_28_63 : 36;
1085 } sindexload0_cn68xx;
1088 * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
1092 #ifdef __BIG_ENDIAN_BITFIELD
1093 uint64_t reserved_52_63 : 12;
1094 uint64_t nosched_val : 1; /**< - set when there is one or more POW entries on the nosched list. */
1095 uint64_t nosched_one : 1; /**< - set when there is exactly one POW entry on the nosched list. */
1096 uint64_t reserved_49 : 1;
1097 uint64_t nosched_head : 11; /**< - when nosched_val is set, indicates the first entry on the nosched list. */
1098 uint64_t reserved_37 : 1;
1099 uint64_t nosched_tail : 11; /**< - when nosched_val is set, indicates the last entry on the nosched list. */
1100 uint64_t des_val : 1; /**< - set when there is one or more descheduled heads on the descheduled list selected by qosgrp. */
1101 uint64_t des_one : 1; /**< - set when there is exactly one descheduled head on the descheduled list selected by qosgrp. */
1102 uint64_t reserved_23 : 1;
1103 uint64_t des_head : 11; /**< - when des_val is set, indicates the first descheduled head on the descheduled list selected by qosgrp. */
1104 uint64_t reserved_11 : 1;
1105 uint64_t des_tail : 11; /**< - when des_val is set, indicates the last descheduled head on the descheduled list selected by qosgrp. */
1107 uint64_t des_tail : 11;
1108 uint64_t reserved_11 : 1;
1109 uint64_t des_head : 11;
1110 uint64_t reserved_23 : 1;
1111 uint64_t des_one : 1;
1112 uint64_t des_val : 1;
1113 uint64_t nosched_tail : 11;
1114 uint64_t reserved_37 : 1;
1115 uint64_t nosched_head : 11;
1116 uint64_t reserved_49 : 1;
1117 uint64_t nosched_one : 1;
1118 uint64_t nosched_val : 1;
1119 uint64_t reserved_52_63 : 12;
1124 * Result for SSO Index/Pointer Load(opcode == IPL_FREE0/IPL_FREE1/IPL_FREE2)
1128 #ifdef __BIG_ENDIAN_BITFIELD
1129 uint64_t reserved_60_63 : 4;
1130 uint64_t qnum_head : 2; /**< - Subqueue with current head */
1131 uint64_t qnum_tail : 2; /**< - Subqueue with current tail */
1132 uint64_t reserved_28_55 : 28;
1133 uint64_t queue_val : 1; /**< - If set, one or more valid entries are in the queue. */
1134 uint64_t queue_one : 1; /**< - If set, exactly one valid entry is in the queue. */
1135 uint64_t reserved_24_25 : 2;
1136 uint64_t queue_head : 11; /**< - Index of entry at the head of the queue. */
1137 uint64_t reserved_11_12 : 2;
1138 uint64_t queue_tail : 11; /**< - Index of entry at the tail of the queue. */
1140 uint64_t queue_tail : 11;
1141 uint64_t reserved_11_12 : 2;
1142 uint64_t queue_head : 11;
1143 uint64_t reserved_24_25 : 2;
1144 uint64_t queue_one : 1;
1145 uint64_t queue_val : 1;
1146 uint64_t reserved_28_55 : 28;
1147 uint64_t qnum_tail : 2;
1148 uint64_t qnum_head : 2;
1149 uint64_t reserved_60_63 : 4;
1151 } sindexload1_cn68xx;
1154 * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
1158 #ifdef __BIG_ENDIAN_BITFIELD
1159 uint64_t reserved_39_63 : 25;
1160 uint64_t rmt_is_head : 1; /**< Set when this DRAM list is the current head (i.e. is the next to
1161 be reloaded when the POW hardware reloads a POW entry from DRAM). The
1162 POW hardware alternates between the two DRAM lists associated with a QOS
1163 level when it reloads work from DRAM into the POW unit. */
1164 uint64_t rmt_val : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp
1165 contains one or more pieces of work. */
1166 uint64_t rmt_one : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp
1167 contains exactly one piece of work. */
1168 uint64_t rmt_head : 36; /**< When rmt_val is set, indicates the first piece of work on the
1169 DRAM input Q list selected by qosgrp. */
1171 uint64_t rmt_head : 36;
1172 uint64_t rmt_one : 1;
1173 uint64_t rmt_val : 1;
1174 uint64_t rmt_is_head : 1;
1175 uint64_t reserved_39_63 : 25;
1180 * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 1)
1184 #ifdef __BIG_ENDIAN_BITFIELD
1185 uint64_t reserved_39_63 : 25;
1186 uint64_t rmt_is_head : 1; /**< - set when this DRAM list is the current head (i.e. is the next to
1187 be reloaded when the POW hardware reloads a POW entry from DRAM). The
1188 POW hardware alternates between the two DRAM lists associated with a QOS
1189 level when it reloads work from DRAM into the POW unit. */
1190 uint64_t rmt_val : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp
1191 contains one or more pieces of work. */
1192 uint64_t rmt_one : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp
1193 contains exactly one piece of work. */
1194 uint64_t rmt_tail : 36; /**< - when rmt_val is set, indicates the last piece of work on the DRAM
1195 input Q list selected by qosgrp. */
1197 uint64_t rmt_tail : 36;
1198 uint64_t rmt_one : 1;
1199 uint64_t rmt_val : 1;
1200 uint64_t rmt_is_head : 1;
1201 uint64_t reserved_39_63 : 25;
1206 * Response to NULL_RD request loads
1210 #ifdef __BIG_ENDIAN_BITFIELD
1211 uint64_t unused : 62;
1212 uint64_t state : 2; /**< of type cvmx_pow_tag_type_t. state is one of the following:
1213 - CVMX_POW_TAG_TYPE_ORDERED
1214 - CVMX_POW_TAG_TYPE_ATOMIC
1215 - CVMX_POW_TAG_TYPE_NULL
1216 - CVMX_POW_TAG_TYPE_NULL_NULL */
1219 uint64_t unused : 62;
1223 } cvmx_pow_tag_load_resp_t;
1228 #ifdef __BIG_ENDIAN_BITFIELD
1229 uint64_t reserved_57_63 : 7;
1230 uint64_t index : 11;
1231 uint64_t reserved_45 : 1;
1235 uint64_t reserved_34_36 : 3;
1236 uint64_t tag_type : 2;
1240 uint64_t tag_type : 2;
1241 uint64_t reserved_34_36 : 3;
1245 uint64_t reserved_45 : 1;
1246 uint64_t index : 11;
1247 uint64_t reserved_57_63 : 7;
1250 } cvmx_pow_sl_tag_resp_t;
1253 * This structure describes the address used for stores to the POW.
1254 * The store address is meaningful on stores to the POW. The hardware assumes that an aligned
1255 * 64-bit store was used for all these stores.
1256 * Note the assumption that the work queue entry is aligned on an 8-byte
1257 * boundary (since the low-order 3 address bits must be zero).
1258 * Note that not all fields are used by all operations.
1260 * NOTE: The following is the behavior of the pending switch bit at the PP
1261 * for POW stores (i.e. when did<7:3> == 0xc)
1262 * - did<2:0> == 0 => pending switch bit is set
1263 * - did<2:0> == 1 => no affect on the pending switch bit
1264 * - did<2:0> == 3 => pending switch bit is cleared
1265 * - did<2:0> == 7 => no affect on the pending switch bit
1266 * - did<2:0> == others => must not be used
1267 * - No other loads/stores have an affect on the pending switch bit
1268 * - The switch bus from POW can clear the pending switch bit
1270 * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle ADDWQ command
1271 * that only contains the pointer). SW must never use did<2:0> == 2.
1276 * Unsigned 64 bit integer representation of store address
1282 #ifdef __BIG_ENDIAN_BITFIELD
1283 uint64_t mem_reg : 2; /**< Memory region. Should be CVMX_IO_SEG in most cases */
1284 uint64_t reserved_49_61 : 13; /**< Must be zero */
1285 uint64_t is_io : 1; /**< Must be one */
1286 uint64_t did : 8; /**< Device ID of POW. Note that different sub-dids are used. */
1287 uint64_t reserved_36_39 : 4; /**< Must be zero */
1288 uint64_t addr : 36; /**< Address field. addr<2:0> must be zero */
1291 uint64_t reserved_36_39 : 4;
1294 uint64_t reserved_49_61 : 13;
1295 uint64_t mem_reg : 2;
1298 } cvmx_pow_tag_store_addr_t;
1301 * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
1309 #ifdef __BIG_ENDIAN_BITFIELD
1310 uint64_t scraddr : 8; /**< the (64-bit word) location in scratchpad to write to (if len != 0) */
1311 uint64_t len : 8; /**< the number of words in the response (0 => no response) */
1312 uint64_t did : 8; /**< the ID of the device on the non-coherent bus */
1313 uint64_t unused :36;
1314 uint64_t wait : 1; /**< if set, don't return load response until work is available */
1315 uint64_t unused2 : 3;
1317 uint64_t unused2 : 3;
1319 uint64_t unused :36;
1322 uint64_t scraddr : 8;
1326 } cvmx_pow_iobdma_store_t;
1329 /* CSR typedefs have been moved to cvmx-pow-defs.h */
1332 * Get the POW tag for this core. This returns the current
1333 * tag type, tag, group, and POW entry index associated with
1334 * this core. Index is only valid if the tag type isn't NULL_NULL.
1335 * If a tag switch is pending this routine returns the tag before
1336 * the tag switch, not after.
1338 * @return Current tag
1340 static inline cvmx_pow_tag_info_t cvmx_pow_get_current_tag(void)
1342 cvmx_pow_load_addr_t load_addr;
1343 cvmx_pow_tag_info_t result;
1345 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1346 cvmx_pow_sl_tag_resp_t load_resp;
1348 load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
1349 load_addr.sstatus_cn68xx.is_io = 1;
1350 load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
1351 load_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();
1352 load_addr.sstatus_cn68xx.opcode = 3;
1353 load_resp.u64 = cvmx_read_csr(load_addr.u64);
1354 result.grp = load_resp.s.grp;
1355 result.index = load_resp.s.index;
1356 result.tag_type = load_resp.s.tag_type;
1357 result.tag = load_resp.s.tag;
1359 cvmx_pow_tag_load_resp_t load_resp;
1361 load_addr.sstatus.mem_region = CVMX_IO_SEG;
1362 load_addr.sstatus.is_io = 1;
1363 load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1364 load_addr.sstatus.coreid = cvmx_get_core_num();
1365 load_addr.sstatus.get_cur = 1;
1366 load_resp.u64 = cvmx_read_csr(load_addr.u64);
1367 result.grp = load_resp.s_sstatus2.grp;
1368 result.index = load_resp.s_sstatus2.index;
1369 result.tag_type = load_resp.s_sstatus2.tag_type;
1370 result.tag = load_resp.s_sstatus2.tag;
1376 * Get the POW WQE for this core. This returns the work queue
1377 * entry currently associated with this core.
1379 * @return WQE pointer
1381 static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
1383 cvmx_pow_load_addr_t load_addr;
1384 cvmx_pow_tag_load_resp_t load_resp;
1386 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1388 load_addr.sstatus_cn68xx.mem_region = CVMX_IO_SEG;
1389 load_addr.sstatus_cn68xx.is_io = 1;
1390 load_addr.sstatus_cn68xx.did = CVMX_OCT_DID_TAG_TAG5;
1391 load_addr.sstatus_cn68xx.coreid = cvmx_get_core_num();
1392 load_addr.sstatus_cn68xx.opcode = 3;
1393 load_resp.u64 = cvmx_read_csr(load_addr.u64);
1394 if (load_resp.s_sstatus3_cn68xx.wqp)
1395 return (cvmx_wqe_t*)cvmx_phys_to_ptr(load_resp.s_sstatus3_cn68xx.wqp);
1397 return (cvmx_wqe_t*)0;
1400 load_addr.sstatus.mem_region = CVMX_IO_SEG;
1401 load_addr.sstatus.is_io = 1;
1402 load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
1403 load_addr.sstatus.coreid = cvmx_get_core_num();
1404 load_addr.sstatus.get_cur = 1;
1405 load_addr.sstatus.get_wqp = 1;
1406 load_resp.u64 = cvmx_read_csr(load_addr.u64);
1407 return (cvmx_wqe_t*)cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
1414 * Print a warning if a tag switch is pending for this core
1416 * @param function Function name checking for a pending tag switch
1418 static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
1420 uint64_t switch_complete;
1421 CVMX_MF_CHORD(switch_complete);
1422 cvmx_warn_if(!switch_complete, "%s called with tag switch in progress\n", function);
1427 * Waits for a tag switch to complete by polling the completion bit.
1428 * Note that switches to NULL complete immediately and do not need
1431 static inline void cvmx_pow_tag_sw_wait(void)
1433 const uint64_t MAX_CYCLES = 1ull<<31;
1434 uint64_t switch_complete;
1435 uint64_t start_cycle = cvmx_get_cycle();
1438 CVMX_MF_CHORD(switch_complete);
1439 if (cvmx_unlikely(switch_complete))
1441 if (cvmx_unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES))
1443 cvmx_dprintf("WARNING: Tag switch is taking a long time, possible deadlock\n");
1444 start_cycle = -MAX_CYCLES-1;
1451 * Synchronous work request. Requests work from the POW.
1452 * This function does NOT wait for previous tag switches to complete,
1453 * so the caller must ensure that there is not a pending tag switch.
1455 * @param wait When set, call stalls until work becomes avaiable, or times out.
1456 * If not set, returns immediately.
1458 * @return Returns the WQE pointer from POW. Returns NULL if no work was available.
1460 static inline cvmx_wqe_t * cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t wait)
1462 cvmx_pow_load_addr_t ptr;
1463 cvmx_pow_tag_load_resp_t result;
1465 if (CVMX_ENABLE_POW_CHECKS)
1466 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1469 ptr.swork.mem_region = CVMX_IO_SEG;
1470 ptr.swork.is_io = 1;
1471 ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
1472 ptr.swork.wait = wait;
1474 result.u64 = cvmx_read_csr(ptr.u64);
1476 if (result.s_work.no_work)
1479 return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr);
1484 * Synchronous work request. Requests work from the POW.
1485 * This function waits for any previous tag switch to complete before
1486 * requesting the new work.
1488 * @param wait When set, call stalls until work becomes avaiable, or times out.
1489 * If not set, returns immediately.
1491 * @return Returns the WQE pointer from POW. Returns NULL if no work was available.
1493 static inline cvmx_wqe_t * cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
1495 if (CVMX_ENABLE_POW_CHECKS)
1496 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1498 /* Must not have a switch pending when requesting work */
1499 cvmx_pow_tag_sw_wait();
1500 return(cvmx_pow_work_request_sync_nocheck(wait));
1506 * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
1507 * This function waits for any previous tag switch to complete before
1508 * requesting the null_rd.
1510 * @return Returns the POW state of type cvmx_pow_tag_type_t.
1512 static inline cvmx_pow_tag_type_t cvmx_pow_work_request_null_rd(void)
1514 cvmx_pow_load_addr_t ptr;
1515 cvmx_pow_tag_load_resp_t result;
1517 if (CVMX_ENABLE_POW_CHECKS)
1518 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1520 /* Must not have a switch pending when requesting work */
1521 cvmx_pow_tag_sw_wait();
1524 ptr.snull_rd.mem_region = CVMX_IO_SEG;
1525 ptr.snull_rd.is_io = 1;
1526 ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
1528 result.u64 = cvmx_read_csr(ptr.u64);
1530 return (cvmx_pow_tag_type_t)result.s_null_rd.state;
1535 * Asynchronous work request. Work is requested from the POW unit, and should later
1536 * be checked with function cvmx_pow_work_response_async.
1537 * This function does NOT wait for previous tag switches to complete,
1538 * so the caller must ensure that there is not a pending tag switch.
1540 * @param scr_addr Scratch memory address that response will be returned to,
1541 * which is either a valid WQE, or a response with the invalid bit set.
1542 * Byte address, must be 8 byte aligned.
1543 * @param wait 1 to cause response to wait for work to become available (or timeout)
1544 * 0 to cause response to return immediately
1546 static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, cvmx_pow_wait_t wait)
1548 cvmx_pow_iobdma_store_t data;
1550 if (CVMX_ENABLE_POW_CHECKS)
1551 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1553 /* scr_addr must be 8 byte aligned */
1555 data.s.scraddr = scr_addr >> 3;
1557 data.s.did = CVMX_OCT_DID_TAG_SWTAG;
1559 cvmx_send_single(data.u64);
1562 * Asynchronous work request. Work is requested from the POW unit, and should later
1563 * be checked with function cvmx_pow_work_response_async.
1564 * This function waits for any previous tag switch to complete before
1565 * requesting the new work.
1567 * @param scr_addr Scratch memory address that response will be returned to,
1568 * which is either a valid WQE, or a response with the invalid bit set.
1569 * Byte address, must be 8 byte aligned.
1570 * @param wait 1 to cause response to wait for work to become available (or timeout)
1571 * 0 to cause response to return immediately
1573 static inline void cvmx_pow_work_request_async(int scr_addr, cvmx_pow_wait_t wait)
1575 if (CVMX_ENABLE_POW_CHECKS)
1576 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1578 /* Must not have a switch pending when requesting work */
1579 cvmx_pow_tag_sw_wait();
1580 cvmx_pow_work_request_async_nocheck(scr_addr, wait);
1585 * Gets result of asynchronous work request. Performs a IOBDMA sync
1586 * to wait for the response.
1588 * @param scr_addr Scratch memory address to get result from
1589 * Byte address, must be 8 byte aligned.
1590 * @return Returns the WQE from the scratch register, or NULL if no work was available.
1592 static inline cvmx_wqe_t * cvmx_pow_work_response_async(int scr_addr)
1594 cvmx_pow_tag_load_resp_t result;
1597 result.u64 = cvmx_scratch_read64(scr_addr);
1599 if (result.s_work.no_work)
1602 return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr);
1607 * Checks if a work queue entry pointer returned by a work
1608 * request is valid. It may be invalid due to no work
1609 * being available or due to a timeout.
1611 * @param wqe_ptr pointer to a work queue entry returned by the POW
1613 * @return 0 if pointer is valid
1614 * 1 if invalid (no work was returned)
1616 static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
1618 return (wqe_ptr == NULL);
1624 * Starts a tag switch to the provided tag value and tag type. Completion for
1625 * the tag switch must be checked for separately.
1626 * This function does NOT update the
1627 * work queue entry in dram to match tag value and type, so the application must
1628 * keep track of these if they are important to the application.
1629 * This tag switch command must not be used for switches to NULL, as the tag
1630 * switch pending bit will be set by the switch request, but never cleared by the
1633 * NOTE: This should not be used when switching from a NULL tag. Use
1634 * cvmx_pow_tag_sw_full() instead.
1636 * This function does no checks, so the caller must ensure that any previous tag
1637 * switch has completed.
1639 * @param tag new tag value
1640 * @param tag_type new tag type (ordered or atomic)
1642 static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type)
1645 cvmx_pow_tag_req_t tag_req;
1647 if (CVMX_ENABLE_POW_CHECKS)
1649 cvmx_pow_tag_info_t current_tag;
1650 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1651 current_tag = cvmx_pow_get_current_tag();
1652 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1653 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag\n", __FUNCTION__);
1654 cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__);
1655 cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__);
1658 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1659 ** once the WQE is in flight. See hardware manual for complete details.
1660 ** It is the application's responsibility to keep track of the current tag
1661 ** value if that is important.
1665 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1666 tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;
1667 tag_req.s_cn68xx_other.tag = tag;
1668 tag_req.s_cn68xx_other.type = tag_type;
1670 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;
1671 tag_req.s_cn38xx.tag = tag;
1672 tag_req.s_cn38xx.type = tag_type;
1676 ptr.sio.mem_region = CVMX_IO_SEG;
1678 ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1680 /* once this store arrives at POW, it will attempt the switch
1681 software must wait for the switch to complete separately */
1682 cvmx_write_io(ptr.u64, tag_req.u64);
1687 * Starts a tag switch to the provided tag value and tag type. Completion for
1688 * the tag switch must be checked for separately.
1689 * This function does NOT update the
1690 * work queue entry in dram to match tag value and type, so the application must
1691 * keep track of these if they are important to the application.
1692 * This tag switch command must not be used for switches to NULL, as the tag
1693 * switch pending bit will be set by the switch request, but never cleared by the
1696 * NOTE: This should not be used when switching from a NULL tag. Use
1697 * cvmx_pow_tag_sw_full() instead.
1699 * This function waits for any previous tag switch to complete, and also
1700 * displays an error on tag switches to NULL.
1702 * @param tag new tag value
1703 * @param tag_type new tag type (ordered or atomic)
1705 static inline void cvmx_pow_tag_sw(uint32_t tag, cvmx_pow_tag_type_t tag_type)
1707 if (CVMX_ENABLE_POW_CHECKS)
1708 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1710 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1711 ** once the WQE is in flight. See hardware manual for complete details.
1712 ** It is the application's responsibility to keep track of the current tag
1713 ** value if that is important.
1716 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1717 ** if a previous switch is still pending. */
1718 cvmx_pow_tag_sw_wait();
1719 cvmx_pow_tag_sw_nocheck(tag, tag_type);
1724 * Starts a tag switch to the provided tag value and tag type. Completion for
1725 * the tag switch must be checked for separately.
1726 * This function does NOT update the
1727 * work queue entry in dram to match tag value and type, so the application must
1728 * keep track of these if they are important to the application.
1729 * This tag switch command must not be used for switches to NULL, as the tag
1730 * switch pending bit will be set by the switch request, but never cleared by the
1733 * This function must be used for tag switches from NULL.
1735 * This function does no checks, so the caller must ensure that any previous tag
1736 * switch has completed.
1738 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
1739 * @param tag tag value to be assigned to work queue entry
1740 * @param tag_type type of tag
1741 * @param group group value for the work queue entry.
1743 static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group)
1746 cvmx_pow_tag_req_t tag_req;
1748 if (CVMX_ENABLE_POW_CHECKS)
1750 cvmx_pow_tag_info_t current_tag;
1751 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1752 current_tag = cvmx_pow_get_current_tag();
1753 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1754 cvmx_warn_if((current_tag.tag_type == tag_type) && (current_tag.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__);
1755 cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__);
1756 if ((wqp != cvmx_phys_to_ptr(0x80)) && cvmx_pow_get_current_wqp())
1757 cvmx_warn_if(wqp != cvmx_pow_get_current_wqp(), "%s passed WQE(%p) doesn't match the address in the POW(%p)\n", __FUNCTION__, wqp, cvmx_pow_get_current_wqp());
1760 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1761 ** once the WQE is in flight. See hardware manual for complete details.
1762 ** It is the application's responsibility to keep track of the current tag
1763 ** value if that is important.
1767 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1768 tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_FULL;
1769 tag_req.s_cn68xx_other.tag = tag;
1770 tag_req.s_cn68xx_other.type = tag_type;
1771 tag_req.s_cn68xx_other.grp = group;
1773 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_FULL;
1774 tag_req.s_cn38xx.tag = tag;
1775 tag_req.s_cn38xx.type = tag_type;
1776 tag_req.s_cn38xx.grp = group;
1780 ptr.sio.mem_region = CVMX_IO_SEG;
1782 ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1783 ptr.sio.offset = CAST64(wqp);
1785 /* once this store arrives at POW, it will attempt the switch
1786 software must wait for the switch to complete separately */
1787 cvmx_write_io(ptr.u64, tag_req.u64);
1792 * Starts a tag switch to the provided tag value and tag type. Completion for
1793 * the tag switch must be checked for separately.
1794 * This function does NOT update the
1795 * work queue entry in dram to match tag value and type, so the application must
1796 * keep track of these if they are important to the application.
1797 * This tag switch command must not be used for switches to NULL, as the tag
1798 * switch pending bit will be set by the switch request, but never cleared by the
1801 * This function must be used for tag switches from NULL.
1803 * This function waits for any pending tag switches to complete
1804 * before requesting the tag switch.
1806 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
1807 * @param tag tag value to be assigned to work queue entry
1808 * @param tag_type type of tag
1809 * @param group group value for the work queue entry.
1811 static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group)
1813 if (CVMX_ENABLE_POW_CHECKS)
1814 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1816 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1817 ** if a previous switch is still pending. */
1818 cvmx_pow_tag_sw_wait();
1819 cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
1824 * Switch to a NULL tag, which ends any ordering or
1825 * synchronization provided by the POW for the current
1826 * work queue entry. This operation completes immediately,
1827 * so completion should not be waited for.
1828 * This function does NOT wait for previous tag switches to complete,
1829 * so the caller must ensure that any previous tag switches have completed.
1831 static inline void cvmx_pow_tag_sw_null_nocheck(void)
1834 cvmx_pow_tag_req_t tag_req;
1836 if (CVMX_ENABLE_POW_CHECKS)
1838 cvmx_pow_tag_info_t current_tag;
1839 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1840 current_tag = cvmx_pow_get_current_tag();
1841 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1842 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called when we already have a NULL tag\n", __FUNCTION__);
1846 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1847 tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG;
1848 tag_req.s_cn68xx_other.type = CVMX_POW_TAG_TYPE_NULL;
1850 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG;
1851 tag_req.s_cn38xx.type = CVMX_POW_TAG_TYPE_NULL;
1856 ptr.sio.mem_region = CVMX_IO_SEG;
1858 ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1861 cvmx_write_io(ptr.u64, tag_req.u64);
1863 /* switch to NULL completes immediately */
1867 * Switch to a NULL tag, which ends any ordering or
1868 * synchronization provided by the POW for the current
1869 * work queue entry. This operation completes immediatly,
1870 * so completion should not be waited for.
1871 * This function waits for any pending tag switches to complete
1872 * before requesting the switch to NULL.
1874 static inline void cvmx_pow_tag_sw_null(void)
1876 if (CVMX_ENABLE_POW_CHECKS)
1877 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1879 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1880 ** if a previous switch is still pending. */
1881 cvmx_pow_tag_sw_wait();
1882 cvmx_pow_tag_sw_null_nocheck();
1884 /* switch to NULL completes immediately */
1890 * Submits work to an input queue. This function updates the work queue entry in DRAM to match
1891 * the arguments given.
1892 * Note that the tag provided is for the work queue entry submitted, and is unrelated to the tag that
1893 * the core currently holds.
1895 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
1896 * @param tag tag value to be assigned to work queue entry
1897 * @param tag_type type of tag
1898 * @param qos Input queue to add to.
1899 * @param grp group value for the work queue entry.
1901 static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t qos, uint64_t grp)
1904 cvmx_pow_tag_req_t tag_req;
1908 wqp->word1.s.tag = tag;
1909 wqp->word1.s.tag_type = tag_type;
1911 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
1912 /* Reset all reserved bits */
1913 wqp->word1.cn68xx.zero_0 = 0;
1914 wqp->word1.cn68xx.zero_1 = 0;
1915 wqp->word1.cn68xx.zero_2 = 0;
1916 wqp->word1.cn68xx.qos = qos;
1917 wqp->word1.cn68xx.grp = grp;
1919 tag_req.s_cn68xx_add.op = CVMX_POW_TAG_OP_ADDWQ;
1920 tag_req.s_cn68xx_add.type = tag_type;
1921 tag_req.s_cn68xx_add.tag = tag;
1922 tag_req.s_cn68xx_add.qos = qos;
1923 tag_req.s_cn68xx_add.grp = grp;
1925 /* Reset all reserved bits */
1926 wqp->word1.cn38xx.zero_2 = 0;
1927 wqp->word1.cn38xx.qos = qos;
1928 wqp->word1.cn38xx.grp = grp;
1930 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_ADDWQ;
1931 tag_req.s_cn38xx.type = tag_type;
1932 tag_req.s_cn38xx.tag = tag;
1933 tag_req.s_cn38xx.qos = qos;
1934 tag_req.s_cn38xx.grp = grp;
1938 ptr.sio.mem_region = CVMX_IO_SEG;
1940 ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1941 ptr.sio.offset = cvmx_ptr_to_phys(wqp);
1943 /* SYNC write to memory before the work submit. This is necessary
1944 ** as POW may read values from DRAM at this time */
1946 cvmx_write_io(ptr.u64, tag_req.u64);
1952 * This function sets the group mask for a core. The group mask
1953 * indicates which groups each core will accept work from. There are
1956 * @param core_num core to apply mask to
1957 * @param mask Group mask. There are 16 groups, so only bits 0-15 are valid,
1958 * representing groups 0-15.
1959 * Each 1 bit in the mask enables the core to accept work from
1960 * the corresponding group.
1962 static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
1965 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
1967 cvmx_sso_ppx_grp_msk_t grp_msk;
1968 grp_msk.s.grp_msk = mask;
1969 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(core_num), grp_msk.u64);
1973 cvmx_pow_pp_grp_mskx_t grp_msk;
1974 grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
1975 grp_msk.s.grp_msk = mask;
1976 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
1981 * This function sets POW static priorities for a core. Each input queue has
1982 * an associated priority value.
1984 * @param core_num core to apply priorities to
1985 * @param priority Vector of 8 priorities, one per POW Input Queue (0-7).
1986 * Highest priority is 0 and lowest is 7. A priority value
1987 * of 0xF instructs POW to skip the Input Queue when
1988 * scheduling to this specific core.
1989 * NOTE: priorities should not have gaps in values, meaning
1990 * {0,1,1,1,1,1,1,1} is a valid configuration while
1991 * {0,2,2,2,2,2,2,2} is not.
1993 static inline void cvmx_pow_set_priority(uint64_t core_num, const uint8_t priority[])
1995 if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
1998 /* Detect gaps between priorities and flag error */
2001 uint32_t prio_mask = 0;
2004 if (priority[i] != 0xF)
2005 prio_mask |= 1<<priority[i];
2007 if ( prio_mask ^ ((1<<cvmx_pop(prio_mask)) - 1))
2009 cvmx_dprintf("ERROR: POW static priorities should be contiguous (0x%llx)\n", (unsigned long long)prio_mask);
2014 /* POW priorities are supported on CN5xxx and later */
2015 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
2017 cvmx_sso_ppx_qos_pri_t qos_pri;
2019 qos_pri.u64 = cvmx_read_csr(CVMX_SSO_PPX_QOS_PRI(core_num));
2020 qos_pri.s.qos0_pri = priority[0];
2021 qos_pri.s.qos1_pri = priority[1];
2022 qos_pri.s.qos2_pri = priority[2];
2023 qos_pri.s.qos3_pri = priority[3];
2024 qos_pri.s.qos4_pri = priority[4];
2025 qos_pri.s.qos5_pri = priority[5];
2026 qos_pri.s.qos6_pri = priority[6];
2027 qos_pri.s.qos7_pri = priority[7];
2028 cvmx_write_csr(CVMX_SSO_PPX_QOS_PRI(core_num), qos_pri.u64);
2032 cvmx_pow_pp_grp_mskx_t grp_msk;
2034 grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
2035 grp_msk.s.qos0_pri = priority[0];
2036 grp_msk.s.qos1_pri = priority[1];
2037 grp_msk.s.qos2_pri = priority[2];
2038 grp_msk.s.qos3_pri = priority[3];
2039 grp_msk.s.qos4_pri = priority[4];
2040 grp_msk.s.qos5_pri = priority[5];
2041 grp_msk.s.qos6_pri = priority[6];
2042 grp_msk.s.qos7_pri = priority[7];
2044 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
2049 * Performs a tag switch and then an immediate deschedule. This completes
2050 * immediately, so completion must not be waited for. This function does NOT
2051 * update the wqe in DRAM to match arguments.
2053 * This function does NOT wait for any prior tag switches to complete, so the
2054 * calling code must do this.
2056 * Note the following CAVEAT of the Octeon HW behavior when
2057 * re-scheduling DE-SCHEDULEd items whose (next) state is
2059 * - If there are no switches pending at the time that the
2060 * HW executes the de-schedule, the HW will only re-schedule
2061 * the head of the FIFO associated with the given tag. This
2062 * means that in many respects, the HW treats this ORDERED
2063 * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
2064 * case (to an ORDERED tag), the HW will do the switch
2065 * before the deschedule whenever it is possible to do
2066 * the switch immediately, so it may often look like
2068 * - If there is a pending switch to ORDERED at the time
2069 * the HW executes the de-schedule, the HW will perform
2070 * the switch at the time it re-schedules, and will be
2071 * able to reschedule any/all of the entries with the
2073 * Due to this behavior, the RECOMMENDATION to software is
2074 * that they have a (next) state of ATOMIC when they
2075 * DE-SCHEDULE. If an ORDERED tag is what was really desired,
2076 * SW can choose to immediately switch to an ORDERED tag
2077 * after the work (that has an ATOMIC tag) is re-scheduled.
2078 * Note that since there are never any tag switches pending
2079 * when the HW re-schedules, this switch can be IMMEDIATE upon
2080 * the reception of the pointer during the re-schedule.
2082 * @param tag New tag value
2083 * @param tag_type New tag type
2084 * @param group New group value
2085 * @param no_sched Control whether this work queue entry will be rescheduled.
2086 * - 1 : don't schedule this work
2087 * - 0 : allow this work to be scheduled.
2089 static inline void cvmx_pow_tag_sw_desched_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched)
2092 cvmx_pow_tag_req_t tag_req;
2094 if (CVMX_ENABLE_POW_CHECKS)
2096 cvmx_pow_tag_info_t current_tag;
2097 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
2098 current_tag = cvmx_pow_get_current_tag();
2099 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
2100 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not allowed from NULL state\n", __FUNCTION__);
2101 cvmx_warn_if((current_tag.tag_type != CVMX_POW_TAG_TYPE_ATOMIC) && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC), "%s called where neither the before or after tag is ATOMIC\n", __FUNCTION__);
2105 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
2106 tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
2107 tag_req.s_cn68xx_other.tag = tag;
2108 tag_req.s_cn68xx_other.type = tag_type;
2109 tag_req.s_cn68xx_other.grp = group;
2110 tag_req.s_cn68xx_other.no_sched = no_sched;
2112 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
2113 tag_req.s_cn38xx.tag = tag;
2114 tag_req.s_cn38xx.type = tag_type;
2115 tag_req.s_cn38xx.grp = group;
2116 tag_req.s_cn38xx.no_sched = no_sched;
2120 ptr.sio.mem_region = CVMX_IO_SEG;
2122 ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
2124 cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */
2127 * Performs a tag switch and then an immediate deschedule. This completes
2128 * immediately, so completion must not be waited for. This function does NOT
2129 * update the wqe in DRAM to match arguments.
2131 * This function waits for any prior tag switches to complete, so the
2132 * calling code may call this function with a pending tag switch.
2134 * Note the following CAVEAT of the Octeon HW behavior when
2135 * re-scheduling DE-SCHEDULEd items whose (next) state is
2137 * - If there are no switches pending at the time that the
2138 * HW executes the de-schedule, the HW will only re-schedule
2139 * the head of the FIFO associated with the given tag. This
2140 * means that in many respects, the HW treats this ORDERED
2141 * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
2142 * case (to an ORDERED tag), the HW will do the switch
2143 * before the deschedule whenever it is possible to do
2144 * the switch immediately, so it may often look like
2146 * - If there is a pending switch to ORDERED at the time
2147 * the HW executes the de-schedule, the HW will perform
2148 * the switch at the time it re-schedules, and will be
2149 * able to reschedule any/all of the entries with the
2151 * Due to this behavior, the RECOMMENDATION to software is
2152 * that they have a (next) state of ATOMIC when they
2153 * DE-SCHEDULE. If an ORDERED tag is what was really desired,
2154 * SW can choose to immediately switch to an ORDERED tag
2155 * after the work (that has an ATOMIC tag) is re-scheduled.
2156 * Note that since there are never any tag switches pending
2157 * when the HW re-schedules, this switch can be IMMEDIATE upon
2158 * the reception of the pointer during the re-schedule.
2160 * @param tag New tag value
2161 * @param tag_type New tag type
2162 * @param group New group value
2163 * @param no_sched Control whether this work queue entry will be rescheduled.
2164 * - 1 : don't schedule this work
2165 * - 0 : allow this work to be scheduled.
2167 static inline void cvmx_pow_tag_sw_desched(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched)
2169 if (CVMX_ENABLE_POW_CHECKS)
2170 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
2172 /* Need to make sure any writes to the work queue entry are complete */
2174 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
2175 ** if a previous switch is still pending. */
2176 cvmx_pow_tag_sw_wait();
2177 cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
2185 * Descchedules the current work queue entry.
2187 * @param no_sched no schedule flag value to be set on the work queue entry. If this is set
2188 * the entry will not be rescheduled.
2190 static inline void cvmx_pow_desched(uint64_t no_sched)
2193 cvmx_pow_tag_req_t tag_req;
2195 if (CVMX_ENABLE_POW_CHECKS)
2197 cvmx_pow_tag_info_t current_tag;
2198 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
2199 current_tag = cvmx_pow_get_current_tag();
2200 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
2201 cvmx_warn_if(current_tag.tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not expected from NULL state\n", __FUNCTION__);
2204 /* Need to make sure any writes to the work queue entry are complete */
2208 if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE)) {
2209 tag_req.s_cn68xx_other.op = CVMX_POW_TAG_OP_DESCH;
2210 tag_req.s_cn68xx_other.no_sched = no_sched;
2212 tag_req.s_cn38xx.op = CVMX_POW_TAG_OP_DESCH;
2213 tag_req.s_cn38xx.no_sched = no_sched;
2217 ptr.sio.mem_region = CVMX_IO_SEG;
2219 ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
2221 cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */
2230 /***********************************************************************************************
2231 ** Define usage of bits within the 32 bit tag values.
2232 ***********************************************************************************************/
2235 * Number of bits of the tag used by software. The SW bits
2236 * are always a contiguous block of the high starting at bit 31.
2237 * The hardware bits are always the low bits. By default, the top 8 bits
2238 * of the tag are reserved for software, and the low 24 are set by the IPD unit.
2240 #define CVMX_TAG_SW_BITS (8)
2241 #define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
2243 /* Below is the list of values for the top 8 bits of the tag. */
2244 #define CVMX_TAG_SW_BITS_INTERNAL 0x1 /* Tag values with top byte of this value are reserved for internal executive uses */
2245 /* The executive divides the remaining 24 bits as follows:
2246 ** * the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
2247 ** * the lower 16 bits (bits 15 - 0 of the tag) define are the value with the subgroup
2248 ** Note that this section describes the format of tags generated by software - refer to the
2249 ** hardware documentation for a description of the tags values generated by the packet input
2251 ** Subgroups are defined here */
2252 #define CVMX_TAG_SUBGROUP_MASK 0xFFFF /* Mask for the value portion of the tag */
2253 #define CVMX_TAG_SUBGROUP_SHIFT 16
2254 #define CVMX_TAG_SUBGROUP_PKO 0x1
2257 /* End of executive tag subgroup definitions */
2259 /* The remaining values software bit values 0x2 - 0xff are available for application use */
2264 * This function creates a 32 bit tag value from the two values provided.
2266 * @param sw_bits The upper bits (number depends on configuration) are set to this value. The remainder of
2267 * bits are set by the hw_bits parameter.
2268 * @param hw_bits The lower bits (number depends on configuration) are set to this value. The remainder of
2269 * bits are set by the sw_bits parameter.
2271 * @return 32 bit value of the combined hw and sw bits.
2273 static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
2275 return((((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) << CVMX_TAG_SW_SHIFT) | (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS))));
2278 * Extracts the bits allocated for software use from the tag
2280 * @param tag 32 bit tag value
2282 * @return N bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define
2284 static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
2286 return((tag >> (32 - CVMX_TAG_SW_BITS)) & cvmx_build_mask(CVMX_TAG_SW_BITS));
2290 * Extracts the bits allocated for hardware use from the tag
2292 * @param tag 32 bit tag value
2294 * @return (32 - N) bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define
2296 static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
2298 return(tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
2302 * Store the current POW internal state into the supplied
2303 * buffer. It is recommended that you pass a buffer of at least
2304 * 128KB. The format of the capture may change based on SDK
2305 * version and Octeon chip.
2307 * @param buffer Buffer to store capture into
2308 * @param buffer_size
2309 * The size of the supplied buffer
2311 * @return Zero on sucess, negative on failure
2313 extern int cvmx_pow_capture(void *buffer, int buffer_size);
2316 * Dump a POW capture to the console in a human readable format.
2318 * @param buffer POW capture from cvmx_pow_capture()
2319 * @param buffer_size
2320 * Size of the buffer
2322 extern void cvmx_pow_display(void *buffer, int buffer_size);
2325 * Return the number of POW entries supported by this chip
2327 * @return Number of POW entries
2329 extern int cvmx_pow_get_num_entries(void);
2336 #endif /* __CVMX_POW_H__ */