1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Networks (support@cavium.com). All rights
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * * Neither the name of Cavium Networks nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
44 * Interface to the hardware Packet Order / Work unit.
46 * New, starting with SDK 1.7.0, cvmx-pow supports a number of
47 * extended consistency checks. The define
48 * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
49 * internal state checks to find common programming errors. If
50 * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
51 * enabled. For example, cvmx-pow will check for the following
52 * program errors or POW state inconsistency.
53 * - Requesting a POW operation with an active tag switch in
55 * - Waiting for a tag switch to complete for an excessively
56 * long period. This is normally a sign of an error in locking
58 * - Illegal tag switches from NULL_NULL.
59 * - Illegal tag switches from NULL.
60 * - Illegal deschedule request.
61 * - WQE pointer not matching the one attached to the core by
64 * <hr>$Revision: 49448 $<hr>
67 #ifndef __CVMX_POW_H__
68 #define __CVMX_POW_H__
70 #include "cvmx-scratch.h"
73 #ifndef CVMX_BUILD_FOR_LINUX_KERNEL
74 #include "cvmx-warn.h"
81 /* Default to having all POW constancy checks turned on */
82 #ifndef CVMX_ENABLE_POW_CHECKS
83 #define CVMX_ENABLE_POW_CHECKS 1
87 * Wait flag values for pow functions.
96 * POW tag operations. These are used in the data stored to the POW.
100 CVMX_POW_TAG_OP_SWTAG = 0L, /**< switch the tag (only) for this PP
101 - the previous tag should be non-NULL in this case
102 - tag switch response required
103 - fields used: op, type, tag */
104 CVMX_POW_TAG_OP_SWTAG_FULL = 1L, /**< switch the tag for this PP, with full information
105 - this should be used when the previous tag is NULL
106 - tag switch response required
107 - fields used: address, op, grp, type, tag */
108 CVMX_POW_TAG_OP_SWTAG_DESCH = 2L, /**< switch the tag (and/or group) for this PP and de-schedule
109 - OK to keep the tag the same and only change the group
110 - fields used: op, no_sched, grp, type, tag */
111 CVMX_POW_TAG_OP_DESCH = 3L, /**< just de-schedule
112 - fields used: op, no_sched */
113 CVMX_POW_TAG_OP_ADDWQ = 4L, /**< create an entirely new work queue entry
114 - fields used: address, op, qos, grp, type, tag */
115 CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,/**< just update the work queue pointer and grp for this PP
116 - fields used: address, op, grp */
117 CVMX_POW_TAG_OP_SET_NSCHED = 6L, /**< set the no_sched bit on the de-schedule list
118 - does nothing if the selected entry is not on the de-schedule list
119 - does nothing if the stored work queue pointer does not match the address field
120 - fields used: address, index, op
121 Before issuing a *_NSCHED operation, SW must guarantee that all
122 prior deschedules and set/clr NSCHED operations are complete and all
123 prior switches are complete. The hardware provides the opsdone bit
124 and swdone bit for SW polling. After issuing a *_NSCHED operation,
125 SW must guarantee that the set/clr NSCHED is complete before
126 any subsequent operations. */
127 CVMX_POW_TAG_OP_CLR_NSCHED = 7L, /**< clears the no_sched bit on the de-schedule list
128 - does nothing if the selected entry is not on the de-schedule list
129 - does nothing if the stored work queue pointer does not match the address field
130 - fields used: address, index, op
131 Before issuing a *_NSCHED operation, SW must guarantee that all
132 prior deschedules and set/clr NSCHED operations are complete and all
133 prior switches are complete. The hardware provides the opsdone bit
134 and swdone bit for SW polling. After issuing a *_NSCHED operation,
135 SW must guarantee that the set/clr NSCHED is complete before
136 any subsequent operations. */
137 CVMX_POW_TAG_OP_NOP = 15L /**< do nothing */
141 * This structure defines the store data on a store to POW
148 #if __BYTE_ORDER == __BIG_ENDIAN
149 uint64_t no_sched : 1; /**< don't reschedule this entry. no_sched is used for CVMX_POW_TAG_OP_SWTAG_DESCH and CVMX_POW_TAG_OP_DESCH */
151 uint64_t index :13; /**< contains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
152 cvmx_pow_tag_op_t op : 4; /**< the operation to perform */
153 uint64_t unused2 : 2;
154 uint64_t qos : 3; /**< the QOS level for the packet. qos is only used for CVMX_POW_TAG_OP_ADDWQ */
155 uint64_t grp : 4; /**< the group that the work queue entry will be scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ, CVMX_POW_TAG_OP_SWTAG_FULL, CVMX_POW_TAG_OP_SWTAG_DESCH, and CVMX_POW_TAG_OP_UPDATE_WQP_GRP */
156 cvmx_pow_tag_type_t type : 3; /**< the type of the tag. type is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
157 uint64_t tag :32; /**< the actual tag. tag is used for everything except CVMX_POW_TAG_OP_DESCH, CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and CVMX_POW_TAG_OP_*_NSCHED */
160 cvmx_pow_tag_type_t type : 3;
163 uint64_t unused2 : 2;
164 cvmx_pow_tag_op_t op : 4;
167 uint64_t no_sched : 1;
170 } cvmx_pow_tag_req_t;
173 * This structure describes the address to load stuff from POW
180 * Address for new work request loads (did<2:0> == 0)
184 #if __BYTE_ORDER == __BIG_ENDIAN
185 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
186 uint64_t reserved_49_61 : 13; /**< Must be zero */
187 uint64_t is_io : 1; /**< Must be one */
188 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 0 in this case */
189 uint64_t reserved_4_39 : 36; /**< Must be zero */
190 uint64_t wait : 1; /**< If set, don't return load response until work is available */
191 uint64_t reserved_0_2 : 3; /**< Must be zero */
193 uint64_t reserved_0_2 : 3;
195 uint64_t reserved_4_39 : 36;
198 uint64_t reserved_49_61 : 13;
199 uint64_t mem_region : 2;
204 * Address for loads to get POW internal status
208 #if __BYTE_ORDER == __BIG_ENDIAN
209 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
210 uint64_t reserved_49_61 : 13; /**< Must be zero */
211 uint64_t is_io : 1; /**< Must be one */
212 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 1 in this case */
213 uint64_t reserved_10_39 : 30; /**< Must be zero */
214 uint64_t coreid : 4; /**< The core id to get status for */
215 uint64_t get_rev : 1; /**< If set and get_cur is set, return reverse tag-list pointer rather than forward tag-list pointer */
216 uint64_t get_cur : 1; /**< If set, return current status rather than pending status */
217 uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type */
218 uint64_t reserved_0_2 : 3; /**< Must be zero */
220 uint64_t reserved_0_2 : 3;
221 uint64_t get_wqp : 1;
222 uint64_t get_cur : 1;
223 uint64_t get_rev : 1;
225 uint64_t reserved_10_39 : 30;
228 uint64_t reserved_49_61 : 13;
229 uint64_t mem_region : 2;
234 * Address for memory loads to get POW internal state
238 #if __BYTE_ORDER == __BIG_ENDIAN
239 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
240 uint64_t reserved_49_61 : 13; /**< Must be zero */
241 uint64_t is_io : 1; /**< Must be one */
242 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 2 in this case */
243 uint64_t reserved_16_39 : 24; /**< Must be zero */
244 uint64_t index : 11; /**< POW memory index */
245 uint64_t get_des : 1; /**< If set, return deschedule information rather than the standard
246 response for work-queue index (invalid if the work-queue entry is not on the
248 uint64_t get_wqp : 1; /**< If set, get the work-queue pointer rather than tag/type (no effect when get_des set). */
249 uint64_t reserved_0_2 : 3; /**< Must be zero */
251 uint64_t reserved_0_2 : 3;
252 uint64_t get_wqp : 1;
253 uint64_t get_des : 1;
255 uint64_t reserved_16_39 : 24;
258 uint64_t reserved_49_61 : 13;
259 uint64_t mem_region : 2;
264 * Address for index/pointer loads
268 #if __BYTE_ORDER == __BIG_ENDIAN
269 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
270 uint64_t reserved_49_61 : 13; /**< Must be zero */
271 uint64_t is_io : 1; /**< Must be one */
272 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 3 in this case */
273 uint64_t reserved_9_39 : 31; /**< Must be zero */
274 uint64_t qosgrp : 4; /**< when {get_rmt ==0 AND get_des_get_tail == 0}, this field selects one of
275 eight POW internal-input queues (0-7), one per QOS level; values 8-15 are
276 illegal in this case;
277 when {get_rmt ==0 AND get_des_get_tail == 1}, this field selects one of
278 16 deschedule lists (per group);
279 when get_rmt ==1, this field selects one of 16 memory-input queue lists.
280 The two memory-input queue lists associated with each QOS level are:
281 - qosgrp = 0, qosgrp = 8: QOS0
282 - qosgrp = 1, qosgrp = 9: QOS1
283 - qosgrp = 2, qosgrp = 10: QOS2
284 - qosgrp = 3, qosgrp = 11: QOS3
285 - qosgrp = 4, qosgrp = 12: QOS4
286 - qosgrp = 5, qosgrp = 13: QOS5
287 - qosgrp = 6, qosgrp = 14: QOS6
288 - qosgrp = 7, qosgrp = 15: QOS7 */
289 uint64_t get_des_get_tail: 1; /**< If set and get_rmt is clear, return deschedule list indexes
290 rather than indexes for the specified qos level; if set and get_rmt is set, return
291 the tail pointer rather than the head pointer for the specified qos level. */
292 uint64_t get_rmt : 1; /**< If set, return remote pointers rather than the local indexes for the specified qos level. */
293 uint64_t reserved_0_2 : 3; /**< Must be zero */
295 uint64_t reserved_0_2 : 3;
296 uint64_t get_rmt : 1;
297 uint64_t get_des_get_tail: 1;
299 uint64_t reserved_9_39 : 31;
302 uint64_t reserved_49_61 : 13;
303 uint64_t mem_region : 2;
308 * address for NULL_RD request (did<2:0> == 4)
309 * when this is read, HW attempts to change the state to NULL if it is NULL_NULL
310 * (the hardware cannot switch from NULL_NULL to NULL if a POW entry is not available -
311 * software may need to recover by finishing another piece of work before a POW
312 * entry can ever become available.)
316 #if __BYTE_ORDER == __BIG_ENDIAN
317 uint64_t mem_region : 2; /**< Mips64 address region. Should be CVMX_IO_SEG */
318 uint64_t reserved_49_61 : 13; /**< Must be zero */
319 uint64_t is_io : 1; /**< Must be one */
320 uint64_t did : 8; /**< the ID of POW -- did<2:0> == 4 in this case */
321 uint64_t reserved_0_39 : 40; /**< Must be zero */
323 uint64_t reserved_0_39 : 40;
326 uint64_t reserved_49_61 : 13;
327 uint64_t mem_region : 2;
330 } cvmx_pow_load_addr_t;
333 * This structure defines the response to a load/SENDSINGLE to POW (except CSR reads)
340 * Response to new work request loads
344 #if __BYTE_ORDER == __BIG_ENDIAN
345 uint64_t no_work : 1; /**< Set when no new work queue entry was returned.
346 If there was de-scheduled work, the HW will definitely
347 return it. When this bit is set, it could mean
349 - There was no work, or
350 - There was no work that the HW could find. This
351 case can happen, regardless of the wait bit value
352 in the original request, when there is work
353 in the IQ's that is too deep down the list. */
354 uint64_t reserved_40_62 : 23; /**< Must be zero */
355 uint64_t addr : 40; /**< 36 in O1 -- the work queue pointer */
358 uint64_t reserved_40_62 : 23;
359 uint64_t no_work : 1;
364 * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
368 #if __BYTE_ORDER == __BIG_ENDIAN
369 uint64_t reserved_62_63 : 2;
370 uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or
371 SWTAG_FULL, and the POW entry has not left the list for the original tag. */
372 uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */
373 uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */
374 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
375 uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */
376 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
377 uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */
378 uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */
379 uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */
380 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
381 uint64_t reserved_51 : 1;
382 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
383 uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */
384 uint64_t reserved_34_35 : 2;
385 uint64_t pend_type : 2; /**< This is the tag type when pend_switch or (pend_desched AND pend_desched_switch) are set. */
386 uint64_t pend_tag : 32; /**< - this is the tag when pend_switch or (pend_desched AND pend_desched_switch) are set. */
388 uint64_t pend_tag : 32;
389 uint64_t pend_type : 2;
390 uint64_t reserved_34_35 : 2;
391 uint64_t pend_grp : 4;
392 uint64_t pend_index : 11;
393 uint64_t reserved_51 : 1;
394 uint64_t pend_nosched_clr: 1;
395 uint64_t pend_null_rd : 1;
396 uint64_t pend_new_work_wait: 1;
397 uint64_t pend_new_work : 1;
398 uint64_t pend_nosched : 1;
399 uint64_t pend_desched_switch: 1;
400 uint64_t pend_desched : 1;
401 uint64_t pend_switch_null: 1;
402 uint64_t pend_switch_full: 1;
403 uint64_t pend_switch : 1;
404 uint64_t reserved_62_63 : 2;
409 * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
413 #if __BYTE_ORDER == __BIG_ENDIAN
414 uint64_t reserved_62_63 : 2;
415 uint64_t pend_switch : 1; /**< Set when there is a pending non-NULL SWTAG or
416 SWTAG_FULL, and the POW entry has not left the list for the original tag. */
417 uint64_t pend_switch_full: 1; /**< Set when SWTAG_FULL and pend_switch is set. */
418 uint64_t pend_switch_null: 1; /**< Set when there is a pending NULL SWTAG, or an implicit switch to NULL. */
419 uint64_t pend_desched : 1; /**< Set when there is a pending DESCHED or SWTAG_DESCHED. */
420 uint64_t pend_desched_switch: 1; /**< Set when there is a pending SWTAG_DESCHED and pend_desched is set. */
421 uint64_t pend_nosched : 1; /**< Set when nosched is desired and pend_desched is set. */
422 uint64_t pend_new_work : 1; /**< Set when there is a pending GET_WORK. */
423 uint64_t pend_new_work_wait: 1; /**< When pend_new_work is set, this bit indicates that the wait bit was set. */
424 uint64_t pend_null_rd : 1; /**< Set when there is a pending NULL_RD. */
425 uint64_t pend_nosched_clr: 1; /**< Set when there is a pending CLR_NSCHED. */
426 uint64_t reserved_51 : 1;
427 uint64_t pend_index : 11; /**< This is the index when pend_nosched_clr is set. */
428 uint64_t pend_grp : 4; /**< This is the new_grp when (pend_desched AND pend_desched_switch) is set. */
429 uint64_t pend_wqp : 36; /**< This is the wqp when pend_nosched_clr is set. */
431 uint64_t pend_wqp : 36;
432 uint64_t pend_grp : 4;
433 uint64_t pend_index : 11;
434 uint64_t reserved_51 : 1;
435 uint64_t pend_nosched_clr: 1;
436 uint64_t pend_null_rd : 1;
437 uint64_t pend_new_work_wait: 1;
438 uint64_t pend_new_work : 1;
439 uint64_t pend_nosched : 1;
440 uint64_t pend_desched_switch: 1;
441 uint64_t pend_desched : 1;
442 uint64_t pend_switch_null: 1;
443 uint64_t pend_switch_full: 1;
444 uint64_t pend_switch : 1;
445 uint64_t reserved_62_63 : 2;
450 * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==0)
454 #if __BYTE_ORDER == __BIG_ENDIAN
455 uint64_t reserved_62_63 : 2;
456 uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and
457 tag_type is not NULL or NULL_NULL). */
458 uint64_t index : 11; /**< The POW entry attached to the core. */
459 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
460 uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in
461 the NULL or NULL_NULL state). */
462 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
463 NULL or NULL_NULL state). */
464 uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list
465 entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
466 uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on
467 SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
470 uint64_t tag_type : 2;
475 uint64_t link_index : 11;
476 uint64_t reserved_62_63 : 2;
481 * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
485 #if __BYTE_ORDER == __BIG_ENDIAN
486 uint64_t reserved_62_63 : 2;
487 uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0
488 (and tag_type is not NULL or NULL_NULL). This field is unpredictable
489 when the core's state is NULL or NULL_NULL. */
490 uint64_t index : 11; /**< The POW entry attached to the core. */
491 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
492 uint64_t head : 1; /**< Set when this POW entry is at the head of its tag list (also set when in
493 the NULL or NULL_NULL state). */
494 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
495 NULL or NULL_NULL state). */
496 uint64_t tag_type : 2; /**< The tag type attached to the core (updated when new tag list
497 entered on SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
498 uint64_t tag : 32; /**< The tag attached to the core (updated when new tag list entered on
499 SWTAG, SWTAG_FULL, or SWTAG_DESCHED). */
502 uint64_t tag_type : 2;
507 uint64_t revlink_index : 11;
508 uint64_t reserved_62_63 : 2;
513 * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==0)
517 #if __BYTE_ORDER == __BIG_ENDIAN
518 uint64_t reserved_62_63 : 2;
519 uint64_t link_index : 11; /**< Points to the next POW entry in the tag list when tail == 0 (and
520 tag_type is not NULL or NULL_NULL). */
521 uint64_t index : 11; /**< The POW entry attached to the core. */
522 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
523 uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
528 uint64_t link_index : 11;
529 uint64_t reserved_62_63 : 2;
534 * Result for a POW Status Load (when get_cur==1, get_wqp==1, and get_rev==1)
538 #if __BYTE_ORDER == __BIG_ENDIAN
539 uint64_t reserved_62_63 : 2;
540 uint64_t revlink_index : 11; /**< Points to the prior POW entry in the tag list when head == 0
541 (and tag_type is not NULL or NULL_NULL). This field is unpredictable
542 when the core's state is NULL or NULL_NULL. */
543 uint64_t index : 11; /**< The POW entry attached to the core. */
544 uint64_t grp : 4; /**< The group attached to the core (updated when new tag list entered on SWTAG_FULL). */
545 uint64_t wqp : 36; /**< The wqp attached to the core (updated when new tag list entered on SWTAG_FULL). */
550 uint64_t revlink_index : 11;
551 uint64_t reserved_62_63 : 2;
556 * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
560 #if __BYTE_ORDER == __BIG_ENDIAN
561 uint64_t reserved_51_63 : 13;
562 uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list
563 (unpredictable if entry is the tail of the list). */
564 uint64_t grp : 4; /**< The group of the POW entry. */
565 uint64_t reserved_35 : 1;
566 uint64_t tail : 1; /**< Set when this POW entry is at the tail of its tag list (also set when in the
567 NULL or NULL_NULL state). */
568 uint64_t tag_type : 2; /**< The tag type of the POW entry. */
569 uint64_t tag : 32; /**< The tag of the POW entry. */
572 uint64_t tag_type : 2;
574 uint64_t reserved_35 : 1;
576 uint64_t next_index : 11;
577 uint64_t reserved_51_63 : 13;
582 * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
586 #if __BYTE_ORDER == __BIG_ENDIAN
587 uint64_t reserved_51_63 : 13;
588 uint64_t next_index : 11; /**< The next entry in the input, free, descheduled_head list
589 (unpredictable if entry is the tail of the list). */
590 uint64_t grp : 4; /**< The group of the POW entry. */
591 uint64_t wqp : 36; /**< The WQP held in the POW entry. */
595 uint64_t next_index : 11;
596 uint64_t reserved_51_63 : 13;
601 * Result For POW Memory Load (get_des == 1)
605 #if __BYTE_ORDER == __BIG_ENDIAN
606 uint64_t reserved_51_63 : 13;
607 uint64_t fwd_index : 11; /**< The next entry in the tag list connected to the descheduled head. */
608 uint64_t grp : 4; /**< The group of the POW entry. */
609 uint64_t nosched : 1; /**< The nosched bit for the POW entry. */
610 uint64_t pend_switch : 1; /**< There is a pending tag switch */
611 uint64_t pend_type : 2; /**< The next tag type for the new tag list when pend_switch is set. */
612 uint64_t pend_tag : 32; /**< The next tag for the new tag list when pend_switch is set. */
614 uint64_t pend_tag : 32;
615 uint64_t pend_type : 2;
616 uint64_t pend_switch : 1;
617 uint64_t nosched : 1;
619 uint64_t fwd_index : 11;
620 uint64_t reserved_51_63 : 13;
625 * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
629 #if __BYTE_ORDER == __BIG_ENDIAN
630 uint64_t reserved_52_63 : 12;
631 uint64_t free_val : 1; /**< - set when there is one or more POW entries on the free list. */
632 uint64_t free_one : 1; /**< - set when there is exactly one POW entry on the free list. */
633 uint64_t reserved_49 : 1;
634 uint64_t free_head : 11; /**< - when free_val is set, indicates the first entry on the free list. */
635 uint64_t reserved_37 : 1;
636 uint64_t free_tail : 11; /**< - when free_val is set, indicates the last entry on the free list. */
637 uint64_t loc_val : 1; /**< - set when there is one or more POW entries on the input Q list selected by qosgrp. */
638 uint64_t loc_one : 1; /**< - set when there is exactly one POW entry on the input Q list selected by qosgrp. */
639 uint64_t reserved_23 : 1;
640 uint64_t loc_head : 11; /**< - when loc_val is set, indicates the first entry on the input Q list selected by qosgrp. */
641 uint64_t reserved_11 : 1;
642 uint64_t loc_tail : 11; /**< - when loc_val is set, indicates the last entry on the input Q list selected by qosgrp. */
644 uint64_t loc_tail : 11;
645 uint64_t reserved_11 : 1;
646 uint64_t loc_head : 11;
647 uint64_t reserved_23 : 1;
648 uint64_t loc_one : 1;
649 uint64_t loc_val : 1;
650 uint64_t free_tail : 11;
651 uint64_t reserved_37 : 1;
652 uint64_t free_head : 11;
653 uint64_t reserved_49 : 1;
654 uint64_t free_one : 1;
655 uint64_t free_val : 1;
656 uint64_t reserved_52_63 : 12;
661 * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
665 #if __BYTE_ORDER == __BIG_ENDIAN
666 uint64_t reserved_52_63 : 12;
667 uint64_t nosched_val : 1; /**< - set when there is one or more POW entries on the nosched list. */
668 uint64_t nosched_one : 1; /**< - set when there is exactly one POW entry on the nosched list. */
669 uint64_t reserved_49 : 1;
670 uint64_t nosched_head : 11; /**< - when nosched_val is set, indicates the first entry on the nosched list. */
671 uint64_t reserved_37 : 1;
672 uint64_t nosched_tail : 11; /**< - when nosched_val is set, indicates the last entry on the nosched list. */
673 uint64_t des_val : 1; /**< - set when there is one or more descheduled heads on the descheduled list selected by qosgrp. */
674 uint64_t des_one : 1; /**< - set when there is exactly one descheduled head on the descheduled list selected by qosgrp. */
675 uint64_t reserved_23 : 1;
676 uint64_t des_head : 11; /**< - when des_val is set, indicates the first descheduled head on the descheduled list selected by qosgrp. */
677 uint64_t reserved_11 : 1;
678 uint64_t des_tail : 11; /**< - when des_val is set, indicates the last descheduled head on the descheduled list selected by qosgrp. */
680 uint64_t des_tail : 11;
681 uint64_t reserved_11 : 1;
682 uint64_t des_head : 11;
683 uint64_t reserved_23 : 1;
684 uint64_t des_one : 1;
685 uint64_t des_val : 1;
686 uint64_t nosched_tail : 11;
687 uint64_t reserved_37 : 1;
688 uint64_t nosched_head : 11;
689 uint64_t reserved_49 : 1;
690 uint64_t nosched_one : 1;
691 uint64_t nosched_val : 1;
692 uint64_t reserved_52_63 : 12;
697 * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
701 #if __BYTE_ORDER == __BIG_ENDIAN
702 uint64_t reserved_39_63 : 25;
703 uint64_t rmt_is_head : 1; /**< Set when this DRAM list is the current head (i.e. is the next to
704 be reloaded when the POW hardware reloads a POW entry from DRAM). The
705 POW hardware alternates between the two DRAM lists associated with a QOS
706 level when it reloads work from DRAM into the POW unit. */
707 uint64_t rmt_val : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp
708 contains one or more pieces of work. */
709 uint64_t rmt_one : 1; /**< Set when the DRAM portion of the input Q list selected by qosgrp
710 contains exactly one piece of work. */
711 uint64_t rmt_head : 36; /**< When rmt_val is set, indicates the first piece of work on the
712 DRAM input Q list selected by qosgrp. */
714 uint64_t rmt_head : 36;
715 uint64_t rmt_one : 1;
716 uint64_t rmt_val : 1;
717 uint64_t rmt_is_head : 1;
718 uint64_t reserved_39_63 : 25;
723 * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 1)
727 #if __BYTE_ORDER == __BIG_ENDIAN
728 uint64_t reserved_39_63 : 25;
729 uint64_t rmt_is_head : 1; /**< - set when this DRAM list is the current head (i.e. is the next to
730 be reloaded when the POW hardware reloads a POW entry from DRAM). The
731 POW hardware alternates between the two DRAM lists associated with a QOS
732 level when it reloads work from DRAM into the POW unit. */
733 uint64_t rmt_val : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp
734 contains one or more pieces of work. */
735 uint64_t rmt_one : 1; /**< - set when the DRAM portion of the input Q list selected by qosgrp
736 contains exactly one piece of work. */
737 uint64_t rmt_tail : 36; /**< - when rmt_val is set, indicates the last piece of work on the DRAM
738 input Q list selected by qosgrp. */
740 uint64_t rmt_tail : 36;
741 uint64_t rmt_one : 1;
742 uint64_t rmt_val : 1;
743 uint64_t rmt_is_head : 1;
744 uint64_t reserved_39_63 : 25;
749 * Response to NULL_RD request loads
753 #if __BYTE_ORDER == __BIG_ENDIAN
754 uint64_t unused : 62;
755 uint64_t state : 2; /**< of type cvmx_pow_tag_type_t. state is one of the following:
756 - CVMX_POW_TAG_TYPE_ORDERED
757 - CVMX_POW_TAG_TYPE_ATOMIC
758 - CVMX_POW_TAG_TYPE_NULL
759 - CVMX_POW_TAG_TYPE_NULL_NULL */
762 uint64_t unused : 62;
766 } cvmx_pow_tag_load_resp_t;
769 * This structure describes the address used for stores to the POW.
770 * The store address is meaningful on stores to the POW. The hardware assumes that an aligned
771 * 64-bit store was used for all these stores.
772 * Note the assumption that the work queue entry is aligned on an 8-byte
773 * boundary (since the low-order 3 address bits must be zero).
774 * Note that not all fields are used by all operations.
776 * NOTE: The following is the behavior of the pending switch bit at the PP
777 * for POW stores (i.e. when did<7:3> == 0xc)
778 * - did<2:0> == 0 => pending switch bit is set
779 * - did<2:0> == 1 => no affect on the pending switch bit
780 * - did<2:0> == 3 => pending switch bit is cleared
781 * - did<2:0> == 7 => no affect on the pending switch bit
782 * - did<2:0> == others => must not be used
783 * - No other loads/stores have an affect on the pending switch bit
784 * - The switch bus from POW can clear the pending switch bit
786 * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle ADDWQ command
787 * that only contains the pointer). SW must never use did<2:0> == 2.
792 * Unsigned 64 bit integer representation of store address
798 #if __BYTE_ORDER == __BIG_ENDIAN
799 uint64_t mem_reg : 2; /**< Memory region. Should be CVMX_IO_SEG in most cases */
800 uint64_t reserved_49_61 : 13; /**< Must be zero */
801 uint64_t is_io : 1; /**< Must be one */
802 uint64_t did : 8; /**< Device ID of POW. Note that different sub-dids are used. */
803 uint64_t reserved_36_39 : 4; /**< Must be zero */
804 uint64_t addr : 36; /**< Address field. addr<2:0> must be zero */
807 uint64_t reserved_36_39 : 4;
810 uint64_t reserved_49_61 : 13;
811 uint64_t mem_reg : 2;
814 } cvmx_pow_tag_store_addr_t;
817 * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
825 #if __BYTE_ORDER == __BIG_ENDIAN
826 uint64_t scraddr : 8; /**< the (64-bit word) location in scratchpad to write to (if len != 0) */
827 uint64_t len : 8; /**< the number of words in the response (0 => no response) */
828 uint64_t did : 8; /**< the ID of the device on the non-coherent bus */
830 uint64_t wait : 1; /**< if set, don't return load response until work is available */
831 uint64_t unused2 : 3;
833 uint64_t unused2 : 3;
838 uint64_t scraddr : 8;
842 } cvmx_pow_iobdma_store_t;
845 /* CSR typedefs have been moved to cvmx-pow-defs.h */
848 * Get the POW tag for this core. This returns the current
849 * tag type, tag, group, and POW entry index associated with
850 * this core. Index is only valid if the tag type isn't NULL_NULL.
851 * If a tag switch is pending this routine returns the tag before
852 * the tag switch, not after.
854 * @return Current tag
856 static inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void)
858 cvmx_pow_load_addr_t load_addr;
859 cvmx_pow_tag_load_resp_t load_resp;
860 cvmx_pow_tag_req_t result;
863 load_addr.sstatus.mem_region = CVMX_IO_SEG;
864 load_addr.sstatus.is_io = 1;
865 load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
866 load_addr.sstatus.coreid = cvmx_get_core_num();
867 load_addr.sstatus.get_cur = 1;
868 load_resp.u64 = cvmx_read_csr(load_addr.u64);
870 result.s.grp = load_resp.s_sstatus2.grp;
871 result.s.index = load_resp.s_sstatus2.index;
872 result.s.type = (cvmx_pow_tag_type_t)load_resp.s_sstatus2.tag_type;
873 result.s.tag = load_resp.s_sstatus2.tag;
879 * Get the POW WQE for this core. This returns the work queue
880 * entry currently associated with this core.
882 * @return WQE pointer
884 static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
886 cvmx_pow_load_addr_t load_addr;
887 cvmx_pow_tag_load_resp_t load_resp;
890 load_addr.sstatus.mem_region = CVMX_IO_SEG;
891 load_addr.sstatus.is_io = 1;
892 load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
893 load_addr.sstatus.coreid = cvmx_get_core_num();
894 load_addr.sstatus.get_cur = 1;
895 load_addr.sstatus.get_wqp = 1;
896 load_resp.u64 = cvmx_read_csr(load_addr.u64);
897 return (cvmx_wqe_t*)cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
903 * Print a warning if a tag switch is pending for this core
905 * @param function Function name checking for a pending tag switch
907 static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
909 uint64_t switch_complete;
910 CVMX_MF_CHORD(switch_complete);
911 cvmx_warn_if(!switch_complete, "%s called with tag switch in progress\n", function);
916 * Waits for a tag switch to complete by polling the completion bit.
917 * Note that switches to NULL complete immediately and do not need
920 static inline void cvmx_pow_tag_sw_wait(void)
922 const uint64_t MAX_CYCLES = 1ull<<31;
923 uint64_t switch_complete;
924 uint64_t start_cycle = cvmx_get_cycle();
927 CVMX_MF_CHORD(switch_complete);
928 if (cvmx_unlikely(switch_complete))
930 if (cvmx_unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES))
932 cvmx_dprintf("WARNING: Tag switch is taking a long time, possible deadlock\n");
933 start_cycle = -MAX_CYCLES-1;
940 * Synchronous work request. Requests work from the POW.
941 * This function does NOT wait for previous tag switches to complete,
942 * so the caller must ensure that there is not a pending tag switch.
944 * @param wait When set, call stalls until work becomes avaiable, or times out.
945 * If not set, returns immediately.
947 * @return Returns the WQE pointer from POW. Returns NULL if no work was available.
949 static inline cvmx_wqe_t * cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t wait)
951 cvmx_pow_load_addr_t ptr;
952 cvmx_pow_tag_load_resp_t result;
954 if (CVMX_ENABLE_POW_CHECKS)
955 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
958 ptr.swork.mem_region = CVMX_IO_SEG;
960 ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
961 ptr.swork.wait = wait;
963 result.u64 = cvmx_read_csr(ptr.u64);
965 if (result.s_work.no_work)
968 return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr);
973 * Synchronous work request. Requests work from the POW.
974 * This function waits for any previous tag switch to complete before
975 * requesting the new work.
977 * @param wait When set, call stalls until work becomes avaiable, or times out.
978 * If not set, returns immediately.
980 * @return Returns the WQE pointer from POW. Returns NULL if no work was available.
982 static inline cvmx_wqe_t * cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
984 if (CVMX_ENABLE_POW_CHECKS)
985 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
987 /* Must not have a switch pending when requesting work */
988 cvmx_pow_tag_sw_wait();
989 return(cvmx_pow_work_request_sync_nocheck(wait));
995 * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
996 * This function waits for any previous tag switch to complete before
997 * requesting the null_rd.
999 * @return Returns the POW state of type cvmx_pow_tag_type_t.
1001 static inline cvmx_pow_tag_type_t cvmx_pow_work_request_null_rd(void)
1003 cvmx_pow_load_addr_t ptr;
1004 cvmx_pow_tag_load_resp_t result;
1006 if (CVMX_ENABLE_POW_CHECKS)
1007 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1009 /* Must not have a switch pending when requesting work */
1010 cvmx_pow_tag_sw_wait();
1013 ptr.snull_rd.mem_region = CVMX_IO_SEG;
1014 ptr.snull_rd.is_io = 1;
1015 ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
1017 result.u64 = cvmx_read_csr(ptr.u64);
1019 return (cvmx_pow_tag_type_t)result.s_null_rd.state;
1024 * Asynchronous work request. Work is requested from the POW unit, and should later
1025 * be checked with function cvmx_pow_work_response_async.
1026 * This function does NOT wait for previous tag switches to complete,
1027 * so the caller must ensure that there is not a pending tag switch.
1029 * @param scr_addr Scratch memory address that response will be returned to,
1030 * which is either a valid WQE, or a response with the invalid bit set.
1031 * Byte address, must be 8 byte aligned.
1032 * @param wait 1 to cause response to wait for work to become available (or timeout)
1033 * 0 to cause response to return immediately
1035 static inline void cvmx_pow_work_request_async_nocheck(int scr_addr, cvmx_pow_wait_t wait)
1037 cvmx_pow_iobdma_store_t data;
1039 if (CVMX_ENABLE_POW_CHECKS)
1040 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1042 /* scr_addr must be 8 byte aligned */
1043 data.s.scraddr = scr_addr >> 3;
1045 data.s.did = CVMX_OCT_DID_TAG_SWTAG;
1047 cvmx_send_single(data.u64);
1050 * Asynchronous work request. Work is requested from the POW unit, and should later
1051 * be checked with function cvmx_pow_work_response_async.
1052 * This function waits for any previous tag switch to complete before
1053 * requesting the new work.
1055 * @param scr_addr Scratch memory address that response will be returned to,
1056 * which is either a valid WQE, or a response with the invalid bit set.
1057 * Byte address, must be 8 byte aligned.
1058 * @param wait 1 to cause response to wait for work to become available (or timeout)
1059 * 0 to cause response to return immediately
1061 static inline void cvmx_pow_work_request_async(int scr_addr, cvmx_pow_wait_t wait)
1063 if (CVMX_ENABLE_POW_CHECKS)
1064 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1066 /* Must not have a switch pending when requesting work */
1067 cvmx_pow_tag_sw_wait();
1068 cvmx_pow_work_request_async_nocheck(scr_addr, wait);
1073 * Gets result of asynchronous work request. Performs a IOBDMA sync
1074 * to wait for the response.
1076 * @param scr_addr Scratch memory address to get result from
1077 * Byte address, must be 8 byte aligned.
1078 * @return Returns the WQE from the scratch register, or NULL if no work was available.
1080 static inline cvmx_wqe_t * cvmx_pow_work_response_async(int scr_addr)
1082 cvmx_pow_tag_load_resp_t result;
1085 result.u64 = cvmx_scratch_read64(scr_addr);
1087 if (result.s_work.no_work)
1090 return (cvmx_wqe_t*)cvmx_phys_to_ptr(result.s_work.addr);
1095 * Checks if a work queue entry pointer returned by a work
1096 * request is valid. It may be invalid due to no work
1097 * being available or due to a timeout.
1099 * @param wqe_ptr pointer to a work queue entry returned by the POW
1101 * @return 0 if pointer is valid
1102 * 1 if invalid (no work was returned)
1104 static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
1106 return (wqe_ptr == NULL);
1112 * Starts a tag switch to the provided tag value and tag type. Completion for
1113 * the tag switch must be checked for separately.
1114 * This function does NOT update the
1115 * work queue entry in dram to match tag value and type, so the application must
1116 * keep track of these if they are important to the application.
1117 * This tag switch command must not be used for switches to NULL, as the tag
1118 * switch pending bit will be set by the switch request, but never cleared by the
1121 * NOTE: This should not be used when switching from a NULL tag. Use
1122 * cvmx_pow_tag_sw_full() instead.
1124 * This function does no checks, so the caller must ensure that any previous tag
1125 * switch has completed.
1127 * @param tag new tag value
1128 * @param tag_type new tag type (ordered or atomic)
1130 static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type)
1133 cvmx_pow_tag_req_t tag_req;
1135 if (CVMX_ENABLE_POW_CHECKS)
1137 cvmx_pow_tag_req_t current_tag;
1138 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1139 current_tag = cvmx_pow_get_current_tag();
1140 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1141 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag\n", __FUNCTION__);
1142 cvmx_warn_if((current_tag.s.type == tag_type) && (current_tag.s.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__);
1143 cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__);
1146 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1147 ** once the WQE is in flight. See hardware manual for complete details.
1148 ** It is the application's responsibility to keep track of the current tag
1149 ** value if that is important.
1154 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
1155 tag_req.s.tag = tag;
1156 tag_req.s.type = tag_type;
1159 ptr.sio.mem_region = CVMX_IO_SEG;
1161 ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1163 /* once this store arrives at POW, it will attempt the switch
1164 software must wait for the switch to complete separately */
1165 cvmx_write_io(ptr.u64, tag_req.u64);
1170 * Starts a tag switch to the provided tag value and tag type. Completion for
1171 * the tag switch must be checked for separately.
1172 * This function does NOT update the
1173 * work queue entry in dram to match tag value and type, so the application must
1174 * keep track of these if they are important to the application.
1175 * This tag switch command must not be used for switches to NULL, as the tag
1176 * switch pending bit will be set by the switch request, but never cleared by the
1179 * NOTE: This should not be used when switching from a NULL tag. Use
1180 * cvmx_pow_tag_sw_full() instead.
1182 * This function waits for any previous tag switch to complete, and also
1183 * displays an error on tag switches to NULL.
1185 * @param tag new tag value
1186 * @param tag_type new tag type (ordered or atomic)
1188 static inline void cvmx_pow_tag_sw(uint32_t tag, cvmx_pow_tag_type_t tag_type)
1190 if (CVMX_ENABLE_POW_CHECKS)
1191 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1193 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1194 ** once the WQE is in flight. See hardware manual for complete details.
1195 ** It is the application's responsibility to keep track of the current tag
1196 ** value if that is important.
1199 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1200 ** if a previous switch is still pending. */
1201 cvmx_pow_tag_sw_wait();
1202 cvmx_pow_tag_sw_nocheck(tag, tag_type);
1207 * Starts a tag switch to the provided tag value and tag type. Completion for
1208 * the tag switch must be checked for separately.
1209 * This function does NOT update the
1210 * work queue entry in dram to match tag value and type, so the application must
1211 * keep track of these if they are important to the application.
1212 * This tag switch command must not be used for switches to NULL, as the tag
1213 * switch pending bit will be set by the switch request, but never cleared by the
1216 * This function must be used for tag switches from NULL.
1218 * This function does no checks, so the caller must ensure that any previous tag
1219 * switch has completed.
1221 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
1222 * @param tag tag value to be assigned to work queue entry
1223 * @param tag_type type of tag
1224 * @param group group value for the work queue entry.
1226 static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group)
1229 cvmx_pow_tag_req_t tag_req;
1231 if (CVMX_ENABLE_POW_CHECKS)
1233 cvmx_pow_tag_req_t current_tag;
1234 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1235 current_tag = cvmx_pow_get_current_tag();
1236 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1237 cvmx_warn_if((current_tag.s.type == tag_type) && (current_tag.s.tag == tag), "%s called to perform a tag switch to the same tag\n", __FUNCTION__);
1238 cvmx_warn_if(tag_type == CVMX_POW_TAG_TYPE_NULL, "%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n", __FUNCTION__);
1239 if (wqp != cvmx_phys_to_ptr(0x80))
1240 cvmx_warn_if(wqp != cvmx_pow_get_current_wqp(), "%s passed WQE(%p) doesn't match the address in the POW(%p)\n", __FUNCTION__, wqp, cvmx_pow_get_current_wqp());
1243 /* Note that WQE in DRAM is not updated here, as the POW does not read from DRAM
1244 ** once the WQE is in flight. See hardware manual for complete details.
1245 ** It is the application's responsibility to keep track of the current tag
1246 ** value if that is important.
1250 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL;
1251 tag_req.s.tag = tag;
1252 tag_req.s.type = tag_type;
1253 tag_req.s.grp = group;
1256 ptr.sio.mem_region = CVMX_IO_SEG;
1258 ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
1259 ptr.sio.offset = CAST64(wqp);
1261 /* once this store arrives at POW, it will attempt the switch
1262 software must wait for the switch to complete separately */
1263 cvmx_write_io(ptr.u64, tag_req.u64);
1268 * Starts a tag switch to the provided tag value and tag type. Completion for
1269 * the tag switch must be checked for separately.
1270 * This function does NOT update the
1271 * work queue entry in dram to match tag value and type, so the application must
1272 * keep track of these if they are important to the application.
1273 * This tag switch command must not be used for switches to NULL, as the tag
1274 * switch pending bit will be set by the switch request, but never cleared by the
1277 * This function must be used for tag switches from NULL.
1279 * This function waits for any pending tag switches to complete
1280 * before requesting the tag switch.
1282 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
1283 * @param tag tag value to be assigned to work queue entry
1284 * @param tag_type type of tag
1285 * @param group group value for the work queue entry.
1287 static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group)
1289 if (CVMX_ENABLE_POW_CHECKS)
1290 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1292 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1293 ** if a previous switch is still pending. */
1294 cvmx_pow_tag_sw_wait();
1295 cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
1300 * Switch to a NULL tag, which ends any ordering or
1301 * synchronization provided by the POW for the current
1302 * work queue entry. This operation completes immediatly,
1303 * so completetion should not be waited for.
1304 * This function does NOT wait for previous tag switches to complete,
1305 * so the caller must ensure that any previous tag switches have completed.
1307 static inline void cvmx_pow_tag_sw_null_nocheck(void)
1310 cvmx_pow_tag_req_t tag_req;
1312 if (CVMX_ENABLE_POW_CHECKS)
1314 cvmx_pow_tag_req_t current_tag;
1315 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1316 current_tag = cvmx_pow_get_current_tag();
1317 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1318 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called when we already have a NULL tag\n", __FUNCTION__);
1322 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
1323 tag_req.s.type = CVMX_POW_TAG_TYPE_NULL;
1327 ptr.sio.mem_region = CVMX_IO_SEG;
1329 ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1332 cvmx_write_io(ptr.u64, tag_req.u64);
1334 /* switch to NULL completes immediately */
1338 * Switch to a NULL tag, which ends any ordering or
1339 * synchronization provided by the POW for the current
1340 * work queue entry. This operation completes immediatly,
1341 * so completetion should not be waited for.
1342 * This function waits for any pending tag switches to complete
1343 * before requesting the switch to NULL.
1345 static inline void cvmx_pow_tag_sw_null(void)
1347 if (CVMX_ENABLE_POW_CHECKS)
1348 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1350 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1351 ** if a previous switch is still pending. */
1352 cvmx_pow_tag_sw_wait();
1353 cvmx_pow_tag_sw_null_nocheck();
1355 /* switch to NULL completes immediately */
1361 * Submits work to an input queue. This function updates the work queue entry in DRAM to match
1362 * the arguments given.
1363 * Note that the tag provided is for the work queue entry submitted, and is unrelated to the tag that
1364 * the core currently holds.
1366 * @param wqp pointer to work queue entry to submit. This entry is updated to match the other parameters
1367 * @param tag tag value to be assigned to work queue entry
1368 * @param tag_type type of tag
1369 * @param qos Input queue to add to.
1370 * @param grp group value for the work queue entry.
1372 static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t qos, uint64_t grp)
1375 cvmx_pow_tag_req_t tag_req;
1379 wqp->tag_type = tag_type;
1383 tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
1384 tag_req.s.type = tag_type;
1385 tag_req.s.tag = tag;
1386 tag_req.s.qos = qos;
1387 tag_req.s.grp = grp;
1391 ptr.sio.mem_region = CVMX_IO_SEG;
1393 ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
1394 ptr.sio.offset = cvmx_ptr_to_phys(wqp);
1396 /* SYNC write to memory before the work submit. This is necessary
1397 ** as POW may read values from DRAM at this time */
1399 cvmx_write_io(ptr.u64, tag_req.u64);
1405 * This function sets the group mask for a core. The group mask
1406 * indicates which groups each core will accept work from. There are
1409 * @param core_num core to apply mask to
1410 * @param mask Group mask. There are 16 groups, so only bits 0-15 are valid,
1411 * representing groups 0-15.
1412 * Each 1 bit in the mask enables the core to accept work from
1413 * the corresponding group.
1415 static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
1417 cvmx_pow_pp_grp_mskx_t grp_msk;
1419 grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
1420 grp_msk.s.grp_msk = mask;
1421 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
1425 * This function sets POW static priorities for a core. Each input queue has
1426 * an associated priority value.
1428 * @param core_num core to apply priorities to
1429 * @param priority Vector of 8 priorities, one per POW Input Queue (0-7).
1430 * Highest priority is 0 and lowest is 7. A priority value
1431 * of 0xF instructs POW to skip the Input Queue when
1432 * scheduling to this specific core.
1433 * NOTE: priorities should not have gaps in values, meaning
1434 * {0,1,1,1,1,1,1,1} is a valid configuration while
1435 * {0,2,2,2,2,2,2,2} is not.
1437 static inline void cvmx_pow_set_priority(uint64_t core_num, const uint8_t priority[])
1439 /* POW priorities are supported on CN5xxx and later */
1440 if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
1442 cvmx_pow_pp_grp_mskx_t grp_msk;
1444 grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
1445 grp_msk.s.qos0_pri = priority[0];
1446 grp_msk.s.qos1_pri = priority[1];
1447 grp_msk.s.qos2_pri = priority[2];
1448 grp_msk.s.qos3_pri = priority[3];
1449 grp_msk.s.qos4_pri = priority[4];
1450 grp_msk.s.qos5_pri = priority[5];
1451 grp_msk.s.qos6_pri = priority[6];
1452 grp_msk.s.qos7_pri = priority[7];
1454 /* Detect gaps between priorities and flag error */
1457 uint32_t prio_mask = 0;
1460 if (priority[i] != 0xF)
1461 prio_mask |= 1<<priority[i];
1463 if ( prio_mask ^ ((1<<cvmx_pop(prio_mask)) - 1))
1465 cvmx_dprintf("ERROR: POW static priorities should be contiguous (0x%llx)\n", (unsigned long long)prio_mask);
1470 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
1475 * Performs a tag switch and then an immediate deschedule. This completes
1476 * immediatly, so completion must not be waited for. This function does NOT
1477 * update the wqe in DRAM to match arguments.
1479 * This function does NOT wait for any prior tag switches to complete, so the
1480 * calling code must do this.
1482 * Note the following CAVEAT of the Octeon HW behavior when
1483 * re-scheduling DE-SCHEDULEd items whose (next) state is
1485 * - If there are no switches pending at the time that the
1486 * HW executes the de-schedule, the HW will only re-schedule
1487 * the head of the FIFO associated with the given tag. This
1488 * means that in many respects, the HW treats this ORDERED
1489 * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
1490 * case (to an ORDERED tag), the HW will do the switch
1491 * before the deschedule whenever it is possible to do
1492 * the switch immediately, so it may often look like
1494 * - If there is a pending switch to ORDERED at the time
1495 * the HW executes the de-schedule, the HW will perform
1496 * the switch at the time it re-schedules, and will be
1497 * able to reschedule any/all of the entries with the
1499 * Due to this behavior, the RECOMMENDATION to software is
1500 * that they have a (next) state of ATOMIC when they
1501 * DE-SCHEDULE. If an ORDERED tag is what was really desired,
1502 * SW can choose to immediately switch to an ORDERED tag
1503 * after the work (that has an ATOMIC tag) is re-scheduled.
1504 * Note that since there are never any tag switches pending
1505 * when the HW re-schedules, this switch can be IMMEDIATE upon
1506 * the reception of the pointer during the re-schedule.
1508 * @param tag New tag value
1509 * @param tag_type New tag type
1510 * @param group New group value
1511 * @param no_sched Control whether this work queue entry will be rescheduled.
1512 * - 1 : don't schedule this work
1513 * - 0 : allow this work to be scheduled.
1515 static inline void cvmx_pow_tag_sw_desched_nocheck(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched)
1518 cvmx_pow_tag_req_t tag_req;
1520 if (CVMX_ENABLE_POW_CHECKS)
1522 cvmx_pow_tag_req_t current_tag;
1523 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1524 current_tag = cvmx_pow_get_current_tag();
1525 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1526 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not allowed from NULL state\n", __FUNCTION__);
1527 cvmx_warn_if((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC) && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC), "%s called where neither the before or after tag is ATOMIC\n", __FUNCTION__);
1531 tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
1532 tag_req.s.tag = tag;
1533 tag_req.s.type = tag_type;
1534 tag_req.s.grp = group;
1535 tag_req.s.no_sched = no_sched;
1538 ptr.sio.mem_region = CVMX_IO_SEG;
1540 ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
1542 cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */
1545 * Performs a tag switch and then an immediate deschedule. This completes
1546 * immediatly, so completion must not be waited for. This function does NOT
1547 * update the wqe in DRAM to match arguments.
1549 * This function waits for any prior tag switches to complete, so the
1550 * calling code may call this function with a pending tag switch.
1552 * Note the following CAVEAT of the Octeon HW behavior when
1553 * re-scheduling DE-SCHEDULEd items whose (next) state is
1555 * - If there are no switches pending at the time that the
1556 * HW executes the de-schedule, the HW will only re-schedule
1557 * the head of the FIFO associated with the given tag. This
1558 * means that in many respects, the HW treats this ORDERED
1559 * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
1560 * case (to an ORDERED tag), the HW will do the switch
1561 * before the deschedule whenever it is possible to do
1562 * the switch immediately, so it may often look like
1564 * - If there is a pending switch to ORDERED at the time
1565 * the HW executes the de-schedule, the HW will perform
1566 * the switch at the time it re-schedules, and will be
1567 * able to reschedule any/all of the entries with the
1569 * Due to this behavior, the RECOMMENDATION to software is
1570 * that they have a (next) state of ATOMIC when they
1571 * DE-SCHEDULE. If an ORDERED tag is what was really desired,
1572 * SW can choose to immediately switch to an ORDERED tag
1573 * after the work (that has an ATOMIC tag) is re-scheduled.
1574 * Note that since there are never any tag switches pending
1575 * when the HW re-schedules, this switch can be IMMEDIATE upon
1576 * the reception of the pointer during the re-schedule.
1578 * @param tag New tag value
1579 * @param tag_type New tag type
1580 * @param group New group value
1581 * @param no_sched Control whether this work queue entry will be rescheduled.
1582 * - 1 : don't schedule this work
1583 * - 0 : allow this work to be scheduled.
1585 static inline void cvmx_pow_tag_sw_desched(uint32_t tag, cvmx_pow_tag_type_t tag_type, uint64_t group, uint64_t no_sched)
1587 if (CVMX_ENABLE_POW_CHECKS)
1588 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1590 /* Need to make sure any writes to the work queue entry are complete */
1592 /* Ensure that there is not a pending tag switch, as a tag switch cannot be started
1593 ** if a previous switch is still pending. */
1594 cvmx_pow_tag_sw_wait();
1595 cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
1603 * Descchedules the current work queue entry.
1605 * @param no_sched no schedule flag value to be set on the work queue entry. If this is set
1606 * the entry will not be rescheduled.
1608 static inline void cvmx_pow_desched(uint64_t no_sched)
1611 cvmx_pow_tag_req_t tag_req;
1613 if (CVMX_ENABLE_POW_CHECKS)
1615 cvmx_pow_tag_req_t current_tag;
1616 __cvmx_pow_warn_if_pending_switch(__FUNCTION__);
1617 current_tag = cvmx_pow_get_current_tag();
1618 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL, "%s called with NULL_NULL tag\n", __FUNCTION__);
1619 cvmx_warn_if(current_tag.s.type == CVMX_POW_TAG_TYPE_NULL, "%s called with NULL tag. Deschedule not expected from NULL state\n", __FUNCTION__);
1622 /* Need to make sure any writes to the work queue entry are complete */
1626 tag_req.s.op = CVMX_POW_TAG_OP_DESCH;
1627 tag_req.s.no_sched = no_sched;
1630 ptr.sio.mem_region = CVMX_IO_SEG;
1632 ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
1634 cvmx_write_io(ptr.u64, tag_req.u64); /* since TAG3 is used, this store will clear the local pending switch bit */
1643 /***********************************************************************************************
1644 ** Define usage of bits within the 32 bit tag values.
1645 ***********************************************************************************************/
1648 * Number of bits of the tag used by software. The SW bits
1649 * are always a contiguous block of the high starting at bit 31.
1650 * The hardware bits are always the low bits. By default, the top 8 bits
1651 * of the tag are reserved for software, and the low 24 are set by the IPD unit.
1653 #define CVMX_TAG_SW_BITS (8)
1654 #define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
1656 /* Below is the list of values for the top 8 bits of the tag. */
1657 #define CVMX_TAG_SW_BITS_INTERNAL 0x1 /* Tag values with top byte of this value are reserved for internal executive uses */
1658 /* The executive divides the remaining 24 bits as follows:
1659 ** * the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
1660 ** * the lower 16 bits (bits 15 - 0 of the tag) define are the value with the subgroup
1661 ** Note that this section describes the format of tags generated by software - refer to the
1662 ** hardware documentation for a description of the tags values generated by the packet input
1664 ** Subgroups are defined here */
1665 #define CVMX_TAG_SUBGROUP_MASK 0xFFFF /* Mask for the value portion of the tag */
1666 #define CVMX_TAG_SUBGROUP_SHIFT 16
1667 #define CVMX_TAG_SUBGROUP_PKO 0x1
1670 /* End of executive tag subgroup definitions */
1672 /* The remaining values software bit values 0x2 - 0xff are available for application use */
1677 * This function creates a 32 bit tag value from the two values provided.
1679 * @param sw_bits The upper bits (number depends on configuration) are set to this value. The remainder of
1680 * bits are set by the hw_bits parameter.
1681 * @param hw_bits The lower bits (number depends on configuration) are set to this value. The remainder of
1682 * bits are set by the sw_bits parameter.
1684 * @return 32 bit value of the combined hw and sw bits.
1686 static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
1688 return((((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) << CVMX_TAG_SW_SHIFT) | (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS))));
1691 * Extracts the bits allocated for software use from the tag
1693 * @param tag 32 bit tag value
1695 * @return N bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define
1697 static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
1699 return((tag >> (32 - CVMX_TAG_SW_BITS)) & cvmx_build_mask(CVMX_TAG_SW_BITS));
1703 * Extracts the bits allocated for hardware use from the tag
1705 * @param tag 32 bit tag value
1707 * @return (32 - N) bit software tag value, where N is configurable with the CVMX_TAG_SW_BITS define
1709 static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
1711 return(tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
1715 * Store the current POW internal state into the supplied
1716 * buffer. It is recommended that you pass a buffer of at least
1717 * 128KB. The format of the capture may change based on SDK
1718 * version and Octeon chip.
1720 * @param buffer Buffer to store capture into
1721 * @param buffer_size
1722 * The size of the supplied buffer
1724 * @return Zero on sucess, negative on failure
1726 extern int cvmx_pow_capture(void *buffer, int buffer_size);
1729 * Dump a POW capture to the console in a human readable format.
1731 * @param buffer POW capture from cvmx_pow_capture()
1732 * @param buffer_size
1733 * Size of the buffer
1735 extern void cvmx_pow_display(void *buffer, int buffer_size);
1738 * Return the number of POW entries supported by this chip
1740 * @return Number of POW entries
1742 extern int cvmx_pow_get_num_entries(void);
1749 #endif /* __CVMX_POW_H__ */