1 /******************************************************************************
3 © 1995-2003, 2004, 2005-2011 Freescale Semiconductor, Inc.
6 This is proprietary source code of Freescale Semiconductor Inc.,
7 and its use is subject to the NetComm Device Drivers EULA.
8 The copyright notice above does not evidence any actual or intended
9 publication of such source code.
11 ALTERNATIVELY, redistribution and use in source and binary forms, with
12 or without modification, are permitted provided that the following
14 * Redistributions of source code must retain the above copyright
15 notice, this list of conditions and the following disclaimer.
16 * Redistributions in binary form must reproduce the above copyright
17 notice, this list of conditions and the following disclaimer in the
18 documentation and/or other materials provided with the distribution.
19 * Neither the name of Freescale Semiconductor nor the
20 names of its contributors may be used to endorse or promote products
21 derived from this software without specific prior written permission.
23 THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 **************************************************************************/
36 /******************************************************************************
39 @Description QM Low-level implementation
40 *//***************************************************************************/
44 #include "error_ext.h"
46 #include "qman_private.h"
49 /***************************/
50 /* Portal register assists */
51 /***************************/
53 /* Cache-inhibited register offsets */
54 #define REG_EQCR_PI_CINH 0x0000
55 #define REG_EQCR_CI_CINH 0x0004
56 #define REG_EQCR_ITR 0x0008
57 #define REG_DQRR_PI_CINH 0x0040
58 #define REG_DQRR_CI_CINH 0x0044
59 #define REG_DQRR_ITR 0x0048
60 #define REG_DQRR_DCAP 0x0050
61 #define REG_DQRR_SDQCR 0x0054
62 #define REG_DQRR_VDQCR 0x0058
63 #define REG_DQRR_PDQCR 0x005c
64 #define REG_MR_PI_CINH 0x0080
65 #define REG_MR_CI_CINH 0x0084
66 #define REG_MR_ITR 0x0088
67 #define REG_CFG 0x0100
68 #define REG_ISR 0x0e00
69 #define REG_IER 0x0e04
70 #define REG_ISDR 0x0e08
71 #define REG_IIR 0x0e0c
72 #define REG_ITPR 0x0e14
74 /* Cache-enabled register offsets */
75 #define CL_EQCR 0x0000
76 #define CL_DQRR 0x1000
78 #define CL_EQCR_PI_CENA 0x3000
79 #define CL_EQCR_CI_CENA 0x3100
80 #define CL_DQRR_PI_CENA 0x3200
81 #define CL_DQRR_CI_CENA 0x3300
82 #define CL_MR_PI_CENA 0x3400
83 #define CL_MR_CI_CENA 0x3500
84 #define CL_RORI_CENA 0x3600
89 static __inline__ void *ptr_ADD(void *a, uintptr_t b)
91 return (void *)((uintptr_t)a + b);
94 /* The h/w design requires mappings to be size-aligned so that "add"s can be
95 * reduced to "or"s. The primitives below do the same for s/w. */
96 /* Bitwise-OR two pointers */
97 static __inline__ void *ptr_OR(void *a, uintptr_t b)
99 return (void *)((uintptr_t)a | b);
102 /* Cache-inhibited register access */
103 static __inline__ uint32_t __qm_in(struct qm_addr *qm, uintptr_t offset)
105 uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ci, offset);
106 return GET_UINT32(*tmp);
108 static __inline__ void __qm_out(struct qm_addr *qm, uintptr_t offset, uint32_t val)
110 uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ci, offset);
111 WRITE_UINT32(*tmp, val);
113 #define qm_in(reg) __qm_in(&portal->addr, REG_##reg)
114 #define qm_out(reg, val) __qm_out(&portal->addr, REG_##reg, (uint32_t)val)
116 /* Convert 'n' cachelines to a pointer value for bitwise OR */
117 #define qm_cl(n) ((n) << 6)
119 /* Cache-enabled (index) register access */
120 static __inline__ void __qm_cl_touch_ro(struct qm_addr *qm, uintptr_t offset)
122 dcbt_ro(ptr_ADD(qm->addr_ce, offset));
124 static __inline__ void __qm_cl_touch_rw(struct qm_addr *qm, uintptr_t offset)
126 dcbt_rw(ptr_ADD(qm->addr_ce, offset));
128 static __inline__ uint32_t __qm_cl_in(struct qm_addr *qm, uintptr_t offset)
130 uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ce, offset);
131 return GET_UINT32(*tmp);
133 static __inline__ void __qm_cl_out(struct qm_addr *qm, uintptr_t offset, uint32_t val)
135 uint32_t *tmp = (uint32_t *)ptr_ADD(qm->addr_ce, offset);
136 WRITE_UINT32(*tmp, val);
139 static __inline__ void __qm_cl_invalidate(struct qm_addr *qm, uintptr_t offset)
141 dcbi(ptr_ADD(qm->addr_ce, offset));
143 #define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, CL_##reg##_CENA)
144 #define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, CL_##reg##_CENA)
145 #define qm_cl_in(reg) __qm_cl_in(&portal->addr, CL_##reg##_CENA)
146 #define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, CL_##reg##_CENA, val)
147 #define qm_cl_invalidate(reg) __qm_cl_invalidate(&portal->addr, CL_##reg##_CENA)
149 /* Cyclic helper for rings. TODO: once we are able to do fine-grain perf
150 * analysis, look at using the "extra" bit in the ring index registers to avoid
152 static __inline__ uint8_t cyc_diff(uint8_t ringsize, uint8_t first, uint8_t last)
154 /* 'first' is included, 'last' is excluded */
156 return (uint8_t)(last - first);
157 return (uint8_t)(ringsize + last - first);
160 static __inline__ t_Error __qm_portal_bind(struct qm_portal *portal, uint8_t iface)
162 t_Error ret = E_BUSY;
163 if (!(portal->config.bound & iface)) {
164 portal->config.bound |= iface;
170 static __inline__ void __qm_portal_unbind(struct qm_portal *portal, uint8_t iface)
173 ASSERT_COND(portal->config.bound & iface);
174 #endif /* QM_CHECKING */
175 portal->config.bound &= ~iface;
178 /* ---------------- */
179 /* --- EQCR API --- */
181 /* It's safer to code in terms of the 'eqcr' object than the 'portal' object,
182 * because the latter runs the risk of copy-n-paste errors from other code where
183 * we could manipulate some other structure within 'portal'. */
184 /* #define EQCR_API_START() register struct qm_eqcr *eqcr = &portal->eqcr */
186 /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
187 #define EQCR_CARRYCLEAR(p) \
188 (void *)((uintptr_t)(p) & (~(uintptr_t)(QM_EQCR_SIZE << 6)))
190 /* Bit-wise logic to convert a ring pointer to a ring index */
191 static __inline__ uint8_t EQCR_PTR2IDX(struct qm_eqcr_entry *e)
193 return (uint8_t)(((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1));
196 /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
197 static __inline__ void EQCR_INC(struct qm_eqcr *eqcr)
199 /* NB: this is odd-looking, but experiments show that it generates fast
200 * code with essentially no branching overheads. We increment to the
201 * next EQCR pointer and handle overflow and 'vbit'. */
202 struct qm_eqcr_entry *partial = eqcr->cursor + 1;
203 eqcr->cursor = EQCR_CARRYCLEAR(partial);
204 if (partial != eqcr->cursor)
205 eqcr->vbit ^= QM_EQCR_VERB_VBIT;
208 static __inline__ t_Error qm_eqcr_init(struct qm_portal *portal, e_QmPortalProduceMode pmode,
209 e_QmPortalEqcrConsumeMode cmode)
211 register struct qm_eqcr *eqcr = &portal->eqcr;
215 if (__qm_portal_bind(portal, QM_BIND_EQCR))
216 return ERROR_CODE(E_BUSY);
217 eqcr->ring = ptr_ADD(portal->addr.addr_ce, CL_EQCR);
218 eqcr->ci = (uint8_t)(qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1));
219 qm_cl_invalidate(EQCR_CI);
220 pi = (uint8_t)(qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1));
221 eqcr->cursor = eqcr->ring + pi;
222 eqcr->vbit = (uint8_t)((qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
223 QM_EQCR_VERB_VBIT : 0);
224 eqcr->available = (uint8_t)(QM_EQCR_SIZE - 1 -
225 cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi));
226 eqcr->ithresh = (uint8_t)qm_in(EQCR_ITR);
234 #endif /* QM_CHECKING */
235 cfg = (qm_in(CFG) & 0x00ffffff) |
236 ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
241 static __inline__ void qm_eqcr_finish(struct qm_portal *portal)
243 register struct qm_eqcr *eqcr = &portal->eqcr;
244 uint8_t pi = (uint8_t)(qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1));
245 uint8_t ci = (uint8_t)(qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1));
248 ASSERT_COND(!eqcr->busy);
249 #endif /* QM_CHECKING */
250 if (pi != EQCR_PTR2IDX(eqcr->cursor))
251 REPORT_ERROR(WARNING, E_INVALID_STATE, ("losing uncommitted EQCR entries"));
253 REPORT_ERROR(WARNING, E_INVALID_STATE, ("missing existing EQCR completions"));
254 if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
255 REPORT_ERROR(WARNING, E_INVALID_STATE, ("EQCR destroyed unquiesced"));
256 __qm_portal_unbind(portal, QM_BIND_EQCR);
259 static __inline__ struct qm_eqcr_entry *qm_eqcr_start(struct qm_portal *portal)
261 register struct qm_eqcr *eqcr = &portal->eqcr;
263 ASSERT_COND(!eqcr->busy);
264 #endif /* QM_CHECKING */
265 if (!eqcr->available)
269 #endif /* QM_CHECKING */
270 dcbz_64(eqcr->cursor);
274 static __inline__ void qm_eqcr_abort(struct qm_portal *portal)
277 register struct qm_eqcr *eqcr = &portal->eqcr;
278 ASSERT_COND(eqcr->busy);
282 #endif /* QM_CHECKING */
285 static __inline__ struct qm_eqcr_entry *qm_eqcr_pend_and_next(struct qm_portal *portal, uint8_t myverb)
287 register struct qm_eqcr *eqcr = &portal->eqcr;
289 ASSERT_COND(eqcr->busy);
290 ASSERT_COND(eqcr->pmode != e_QmPortalPVB);
291 #endif /* QM_CHECKING */
292 if (eqcr->available == 1)
294 eqcr->cursor->__dont_write_directly__verb = (uint8_t)(myverb | eqcr->vbit);
295 dcbf_64(eqcr->cursor);
298 dcbz_64(eqcr->cursor);
303 #define EQCR_COMMIT_CHECKS(eqcr) \
305 ASSERT_COND(eqcr->busy); \
306 ASSERT_COND(eqcr->cursor->orp == (eqcr->cursor->orp & 0x00ffffff)); \
307 ASSERT_COND(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0x00ffffff)); \
311 #define EQCR_COMMIT_CHECKS(eqcr)
312 #endif /* QM_CHECKING */
315 static __inline__ void qmPortalEqcrPciCommit(struct qm_portal *portal, uint8_t myverb)
317 register struct qm_eqcr *eqcr = &portal->eqcr;
319 EQCR_COMMIT_CHECKS(eqcr);
320 ASSERT_COND(eqcr->pmode == e_QmPortalPCI);
321 #endif /* QM_CHECKING */
322 eqcr->cursor->__dont_write_directly__verb = (uint8_t)(myverb | eqcr->vbit);
325 dcbf_64(eqcr->cursor);
327 qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
330 #endif /* QM_CHECKING */
333 static __inline__ void qmPortalEqcrPcePrefetch(struct qm_portal *portal)
336 register struct qm_eqcr *eqcr = &portal->eqcr;
337 ASSERT_COND(eqcr->pmode == e_QmPortalPCE);
338 #endif /* QM_CHECKING */
339 qm_cl_invalidate(EQCR_PI);
340 qm_cl_touch_rw(EQCR_PI);
343 static __inline__ void qmPortalEqcrPceCommit(struct qm_portal *portal, uint8_t myverb)
345 register struct qm_eqcr *eqcr = &portal->eqcr;
347 EQCR_COMMIT_CHECKS(eqcr);
348 ASSERT_COND(eqcr->pmode == e_QmPortalPCE);
349 #endif /* QM_CHECKING */
350 eqcr->cursor->__dont_write_directly__verb = (uint8_t)(myverb | eqcr->vbit);
353 dcbf_64(eqcr->cursor);
355 qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
358 #endif /* QM_CHECKING */
361 static __inline__ void qmPortalEqcrPvbCommit(struct qm_portal *portal, uint8_t myverb)
363 register struct qm_eqcr *eqcr = &portal->eqcr;
364 struct qm_eqcr_entry *eqcursor;
366 EQCR_COMMIT_CHECKS(eqcr);
367 ASSERT_COND(eqcr->pmode == e_QmPortalPVB);
368 #endif /* QM_CHECKING */
370 eqcursor = eqcr->cursor;
371 eqcursor->__dont_write_directly__verb = (uint8_t)(myverb | eqcr->vbit);
377 #endif /* QM_CHECKING */
380 static __inline__ uint8_t qmPortalEqcrCciUpdate(struct qm_portal *portal)
382 register struct qm_eqcr *eqcr = &portal->eqcr;
383 uint8_t diff, old_ci = eqcr->ci;
385 ASSERT_COND(eqcr->cmode == e_QmPortalEqcrCCI);
386 #endif /* QM_CHECKING */
387 eqcr->ci = (uint8_t)(qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1));
388 diff = cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
389 eqcr->available += diff;
393 static __inline__ void qmPortalEqcrCcePrefetch(struct qm_portal *portal)
396 register struct qm_eqcr *eqcr = &portal->eqcr;
397 ASSERT_COND(eqcr->cmode == e_QmPortalEqcrCCE);
398 #endif /* QM_CHECKING */
399 qm_cl_touch_ro(EQCR_CI);
402 static __inline__ uint8_t qmPortalEqcrCceUpdate(struct qm_portal *portal)
404 register struct qm_eqcr *eqcr = &portal->eqcr;
405 uint8_t diff, old_ci = eqcr->ci;
407 ASSERT_COND(eqcr->cmode == e_QmPortalEqcrCCE);
408 #endif /* QM_CHECKING */
409 eqcr->ci = (uint8_t)(qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1));
410 qm_cl_invalidate(EQCR_CI);
411 diff = cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
412 eqcr->available += diff;
416 static __inline__ uint8_t qm_eqcr_get_ithresh(struct qm_portal *portal)
418 register struct qm_eqcr *eqcr = &portal->eqcr;
419 return eqcr->ithresh;
422 static __inline__ void qm_eqcr_set_ithresh(struct qm_portal *portal, uint8_t ithresh)
424 register struct qm_eqcr *eqcr = &portal->eqcr;
425 eqcr->ithresh = ithresh;
426 qm_out(EQCR_ITR, ithresh);
429 static __inline__ uint8_t qm_eqcr_get_avail(struct qm_portal *portal)
431 register struct qm_eqcr *eqcr = &portal->eqcr;
432 return eqcr->available;
435 static __inline__ uint8_t qm_eqcr_get_fill(struct qm_portal *portal)
437 register struct qm_eqcr *eqcr = &portal->eqcr;
438 return (uint8_t)(QM_EQCR_SIZE - 1 - eqcr->available);
443 /* ---------------- */
444 /* --- DQRR API --- */
446 /* TODO: many possible improvements;
447 * - look at changing the API to use pointer rather than index parameters now
448 * that 'cursor' is a pointer,
449 * - consider moving other parameters to pointer if it could help (ci)
452 /* It's safer to code in terms of the 'dqrr' object than the 'portal' object,
453 * because the latter runs the risk of copy-n-paste errors from other code where
454 * we could manipulate some other structure within 'portal'. */
455 /* #define DQRR_API_START() register struct qm_dqrr *dqrr = &portal->dqrr */
457 #define DQRR_CARRYCLEAR(p) \
458 (void *)((uintptr_t)(p) & (~(uintptr_t)(QM_DQRR_SIZE << 6)))
460 static __inline__ uint8_t DQRR_PTR2IDX(struct qm_dqrr_entry *e)
462 return (uint8_t)(((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1));
465 static __inline__ struct qm_dqrr_entry *DQRR_INC(struct qm_dqrr_entry *e)
467 return DQRR_CARRYCLEAR(e + 1);
470 static __inline__ void qm_dqrr_set_maxfill(struct qm_portal *portal, uint8_t mf)
472 qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
473 ((mf & (QM_DQRR_SIZE - 1)) << 20));
476 static __inline__ t_Error qm_dqrr_init(struct qm_portal *portal, e_QmPortalDequeueMode dmode,
477 e_QmPortalProduceMode pmode, e_QmPortalDqrrConsumeMode cmode,
478 uint8_t max_fill, int stash_ring, int stash_data)
480 register struct qm_dqrr *dqrr = &portal->dqrr;
481 const struct qm_portal_config *config = &portal->config;
484 if (__qm_portal_bind(portal, QM_BIND_DQRR))
485 return ERROR_CODE(E_BUSY);
486 if ((stash_ring || stash_data) && (config->cpu == -1))
487 return ERROR_CODE(E_INVALID_STATE);
488 /* Make sure the DQRR will be idle when we enable */
489 qm_out(DQRR_SDQCR, 0);
490 qm_out(DQRR_VDQCR, 0);
491 qm_out(DQRR_PDQCR, 0);
492 dqrr->ring = ptr_ADD(portal->addr.addr_ce, CL_DQRR);
493 dqrr->pi = (uint8_t)(qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1));
494 dqrr->ci = (uint8_t)(qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1));
495 dqrr->cursor = dqrr->ring + dqrr->ci;
496 dqrr->fill = cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
497 dqrr->vbit = (uint8_t)((qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
498 QM_DQRR_VERB_VBIT : 0);
499 dqrr->ithresh = (uint8_t)qm_in(DQRR_ITR);
507 dqrr->flags |= QM_DQRR_FLAG_RE;
509 dqrr->flags |= QM_DQRR_FLAG_SE;
512 #endif /* QM_CHECKING */
514 cfg = (qm_in(CFG) & 0xff000f00) |
515 ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
516 ((dmode & 1) << 18) | /* DP */
517 ((cmode & 3) << 16) | /* DCM */
518 (stash_ring ? 0x80 : 0) | /* RE */
519 (0 ? 0x40 : 0) | /* Ignore RP */
520 (stash_data ? 0x20 : 0) | /* SE */
521 (0 ? 0x10 : 0); /* Ignore SP */
527 static __inline__ void qm_dqrr_finish(struct qm_portal *portal)
529 register struct qm_dqrr *dqrr = &portal->dqrr;
530 if (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor))
531 REPORT_ERROR(WARNING, E_INVALID_STATE, ("Ignoring completed DQRR entries"));
532 __qm_portal_unbind(portal, QM_BIND_DQRR);
535 static __inline__ struct qm_dqrr_entry *qm_dqrr_current(struct qm_portal *portal)
537 register struct qm_dqrr *dqrr = &portal->dqrr;
543 static __inline__ uint8_t qm_dqrr_cursor(struct qm_portal *portal)
545 register struct qm_dqrr *dqrr = &portal->dqrr;
546 return DQRR_PTR2IDX(dqrr->cursor);
549 static __inline__ uint8_t qm_dqrr_next(struct qm_portal *portal)
551 register struct qm_dqrr *dqrr = &portal->dqrr;
553 ASSERT_COND(dqrr->fill);
555 dqrr->cursor = DQRR_INC(dqrr->cursor);
559 static __inline__ uint8_t qmPortalDqrrPciUpdate(struct qm_portal *portal)
561 register struct qm_dqrr *dqrr = &portal->dqrr;
562 uint8_t diff, old_pi = dqrr->pi;
564 ASSERT_COND(dqrr->pmode == e_QmPortalPCI);
565 #endif /* QM_CHECKING */
566 dqrr->pi = (uint8_t)(qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1));
567 diff = cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
572 static __inline__ void qmPortalDqrrPcePrefetch(struct qm_portal *portal)
575 register struct qm_dqrr *dqrr = &portal->dqrr;
576 ASSERT_COND(dqrr->pmode == e_QmPortalPCE);
577 #endif /* QM_CHECKING */
578 qm_cl_invalidate(DQRR_PI);
579 qm_cl_touch_ro(DQRR_PI);
582 static __inline__ uint8_t qmPortalDqrrPceUpdate(struct qm_portal *portal)
584 register struct qm_dqrr *dqrr = &portal->dqrr;
585 uint8_t diff, old_pi = dqrr->pi;
587 ASSERT_COND(dqrr->pmode == e_QmPortalPCE);
588 #endif /* QM_CHECKING */
589 dqrr->pi = (uint8_t)(qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1));
590 diff = cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
595 static __inline__ void qmPortalDqrrPvbPrefetch(struct qm_portal *portal)
597 register struct qm_dqrr *dqrr = &portal->dqrr;
599 ASSERT_COND(dqrr->pmode == e_QmPortalPVB);
600 /* If ring entries get stashed, don't invalidate/prefetch */
601 if (!(dqrr->flags & QM_DQRR_FLAG_RE))
602 #endif /*QM_CHECKING */
603 dcbit_ro(ptr_ADD(dqrr->ring, qm_cl(dqrr->pi)));
606 static __inline__ uint8_t qmPortalDqrrPvbUpdate(struct qm_portal *portal)
608 register struct qm_dqrr *dqrr = &portal->dqrr;
609 struct qm_dqrr_entry *res = ptr_ADD(dqrr->ring, qm_cl(dqrr->pi));
611 ASSERT_COND(dqrr->pmode == e_QmPortalPVB);
612 #endif /* QM_CHECKING */
613 if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
614 dqrr->pi = (uint8_t)((dqrr->pi + 1) & (QM_DQRR_SIZE - 1));
616 dqrr->vbit ^= QM_DQRR_VERB_VBIT;
623 static __inline__ void qmPortalDqrrCciConsume(struct qm_portal *portal, uint8_t num)
625 register struct qm_dqrr *dqrr = &portal->dqrr;
627 ASSERT_COND(dqrr->cmode == e_QmPortalDqrrCCI);
628 #endif /* QM_CHECKING */
629 dqrr->ci = (uint8_t)((dqrr->ci + num) & (QM_DQRR_SIZE - 1));
630 qm_out(DQRR_CI_CINH, dqrr->ci);
633 static __inline__ void qmPortalDqrrCciConsumeToCurrent(struct qm_portal *portal)
635 register struct qm_dqrr *dqrr = &portal->dqrr;
637 ASSERT_COND(dqrr->cmode == e_QmPortalDqrrCCI);
638 #endif /* QM_CHECKING */
639 dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
640 qm_out(DQRR_CI_CINH, dqrr->ci);
643 static __inline__ void qmPortalDqrrCcePrefetch(struct qm_portal *portal)
646 register struct qm_dqrr *dqrr = &portal->dqrr;
647 ASSERT_COND(dqrr->cmode == e_QmPortalDqrrCCE);
648 #endif /* QM_CHECKING */
649 qm_cl_invalidate(DQRR_CI);
650 qm_cl_touch_rw(DQRR_CI);
653 static __inline__ void qmPortalDqrrCceConsume(struct qm_portal *portal, uint8_t num)
655 register struct qm_dqrr *dqrr = &portal->dqrr;
657 ASSERT_COND(dqrr->cmode == e_QmPortalDqrrCCE);
658 #endif /* QM_CHECKING */
659 dqrr->ci = (uint8_t)((dqrr->ci + num) & (QM_DQRR_SIZE - 1));
660 qm_cl_out(DQRR_CI, dqrr->ci);
663 static __inline__ void qmPortalDqrrCceConsume_to_current(struct qm_portal *portal)
665 register struct qm_dqrr *dqrr = &portal->dqrr;
667 ASSERT_COND(dqrr->cmode == e_QmPortalDqrrCCE);
668 #endif /* QM_CHECKING */
669 dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
670 qm_cl_out(DQRR_CI, dqrr->ci);
673 static __inline__ void qmPortalDqrrDcaConsume1(struct qm_portal *portal, uint8_t idx, bool park)
676 register struct qm_dqrr *dqrr = &portal->dqrr;
677 ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
678 #endif /* QM_CHECKING */
679 ASSERT_COND(idx < QM_DQRR_SIZE);
680 qm_out(DQRR_DCAP, (0 << 8) | /* S */
681 ((uint32_t)(park ? 1 : 0) << 6) | /* PK */
685 static __inline__ void qmPortalDqrrDcaConsume1ptr(struct qm_portal *portal,
686 struct qm_dqrr_entry *dq,
689 uint8_t idx = DQRR_PTR2IDX(dq);
691 register struct qm_dqrr *dqrr = &portal->dqrr;
693 ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
694 ASSERT_COND((dqrr->ring + idx) == dq);
695 ASSERT_COND(idx < QM_DQRR_SIZE);
696 #endif /* QM_CHECKING */
697 qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
698 ((uint32_t)(park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
699 idx); /* DQRR_DCAP::DCAP_CI */
702 static __inline__ void qmPortalDqrrDcaConsumeN(struct qm_portal *portal, uint16_t bitmask)
705 register struct qm_dqrr *dqrr = &portal->dqrr;
706 ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
707 #endif /* QM_CHECKING */
708 qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
709 ((uint32_t)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
712 static __inline__ uint8_t qmPortalDqrrDcaCci(struct qm_portal *portal)
715 register struct qm_dqrr *dqrr = &portal->dqrr;
716 ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
717 #endif /* QM_CHECKING */
718 return (uint8_t)(qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1));
721 static __inline__ void qmPortalDqrrDcaCcePrefetch(struct qm_portal *portal)
724 register struct qm_dqrr *dqrr = &portal->dqrr;
725 ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
726 #endif /* QM_CHECKING */
727 qm_cl_invalidate(DQRR_CI);
728 qm_cl_touch_ro(DQRR_CI);
731 static __inline__ uint8_t qmPortalDqrrDcaCce(struct qm_portal *portal)
734 register struct qm_dqrr *dqrr = &portal->dqrr;
735 ASSERT_COND(dqrr->cmode == e_QmPortalDqrrDCA);
736 #endif /* QM_CHECKING */
737 return (uint8_t)(qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1));
740 static __inline__ uint8_t qm_dqrr_get_ci(struct qm_portal *portal)
742 register struct qm_dqrr *dqrr = &portal->dqrr;
744 ASSERT_COND(dqrr->cmode != e_QmPortalDqrrDCA);
745 #endif /* QM_CHECKING */
750 static __inline__ void qm_dqrr_park(struct qm_portal *portal, uint8_t idx)
753 register struct qm_dqrr *dqrr = &portal->dqrr;
754 ASSERT_COND(dqrr->cmode != e_QmPortalDqrrDCA);
755 #endif /* QM_CHECKING */
757 qm_out(DQRR_DCAP, (0 << 8) | /* S */
758 (uint32_t)(1 << 6) | /* PK */
759 (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
762 static __inline__ void qm_dqrr_park_ci(struct qm_portal *portal)
764 register struct qm_dqrr *dqrr = &portal->dqrr;
766 ASSERT_COND(dqrr->cmode != e_QmPortalDqrrDCA);
767 #endif /* QM_CHECKING */
768 qm_out(DQRR_DCAP, (0 << 8) | /* S */
769 (uint32_t)(1 << 6) | /* PK */
770 (dqrr->ci & (QM_DQRR_SIZE - 1)));/* DCAP_CI */
773 static __inline__ void qm_dqrr_sdqcr_set(struct qm_portal *portal, uint32_t sdqcr)
775 qm_out(DQRR_SDQCR, sdqcr);
778 static __inline__ uint32_t qm_dqrr_sdqcr_get(struct qm_portal *portal)
780 return qm_in(DQRR_SDQCR);
783 static __inline__ void qm_dqrr_vdqcr_set(struct qm_portal *portal, uint32_t vdqcr)
785 qm_out(DQRR_VDQCR, vdqcr);
788 static __inline__ uint32_t qm_dqrr_vdqcr_get(struct qm_portal *portal)
790 return qm_in(DQRR_VDQCR);
793 static __inline__ void qm_dqrr_pdqcr_set(struct qm_portal *portal, uint32_t pdqcr)
795 qm_out(DQRR_PDQCR, pdqcr);
798 static __inline__ uint32_t qm_dqrr_pdqcr_get(struct qm_portal *portal)
800 return qm_in(DQRR_PDQCR);
803 static __inline__ uint8_t qm_dqrr_get_ithresh(struct qm_portal *portal)
805 register struct qm_dqrr *dqrr = &portal->dqrr;
806 return dqrr->ithresh;
809 static __inline__ void qm_dqrr_set_ithresh(struct qm_portal *portal, uint8_t ithresh)
811 qm_out(DQRR_ITR, ithresh);
814 static __inline__ uint8_t qm_dqrr_get_maxfill(struct qm_portal *portal)
816 return (uint8_t)((qm_in(CFG) & 0x00f00000) >> 20);
822 /* It's safer to code in terms of the 'mr' object than the 'portal' object,
823 * because the latter runs the risk of copy-n-paste errors from other code where
824 * we could manipulate some other structure within 'portal'. */
825 /* #define MR_API_START() register struct qm_mr *mr = &portal->mr */
827 #define MR_CARRYCLEAR(p) \
828 (void *)((uintptr_t)(p) & (~(uintptr_t)(QM_MR_SIZE << 6)))
830 static __inline__ uint8_t MR_PTR2IDX(struct qm_mr_entry *e)
832 return (uint8_t)(((uintptr_t)e >> 6) & (QM_MR_SIZE - 1));
835 static __inline__ struct qm_mr_entry *MR_INC(struct qm_mr_entry *e)
837 return MR_CARRYCLEAR(e + 1);
840 static __inline__ t_Error qm_mr_init(struct qm_portal *portal, e_QmPortalProduceMode pmode,
841 e_QmPortalMrConsumeMode cmode)
843 register struct qm_mr *mr = &portal->mr;
846 if (__qm_portal_bind(portal, QM_BIND_MR))
847 return ERROR_CODE(E_BUSY);
848 mr->ring = ptr_ADD(portal->addr.addr_ce, CL_MR);
849 mr->pi = (uint8_t)(qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1));
850 mr->ci = (uint8_t)(qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1));
851 mr->cursor = mr->ring + mr->ci;
852 mr->fill = cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
853 mr->vbit = (uint8_t)((qm_in(MR_PI_CINH) & QM_MR_SIZE) ?QM_MR_VERB_VBIT : 0);
854 mr->ithresh = (uint8_t)qm_in(MR_ITR);
861 #endif /* QM_CHECKING */
862 cfg = (qm_in(CFG) & 0xfffff0ff) |
863 ((cmode & 1) << 8); /* QCSP_CFG:MM */
869 static __inline__ void qm_mr_finish(struct qm_portal *portal)
871 register struct qm_mr *mr = &portal->mr;
872 if (mr->ci != MR_PTR2IDX(mr->cursor))
873 REPORT_ERROR(WARNING, E_INVALID_STATE, ("Ignoring completed MR entries"));
874 __qm_portal_unbind(portal, QM_BIND_MR);
877 static __inline__ void qm_mr_current_prefetch(struct qm_portal *portal)
879 register struct qm_mr *mr = &portal->mr;
883 static __inline__ struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
885 register struct qm_mr *mr = &portal->mr;
891 static __inline__ uint8_t qm_mr_cursor(struct qm_portal *portal)
893 register struct qm_mr *mr = &portal->mr;
894 return MR_PTR2IDX(mr->cursor);
897 static __inline__ uint8_t qm_mr_next(struct qm_portal *portal)
899 register struct qm_mr *mr = &portal->mr;
901 ASSERT_COND(mr->fill);
902 #endif /* QM_CHECKING */
903 mr->cursor = MR_INC(mr->cursor);
907 static __inline__ uint8_t qmPortalMrPciUpdate(struct qm_portal *portal)
909 register struct qm_mr *mr = &portal->mr;
910 uint8_t diff, old_pi = mr->pi;
912 ASSERT_COND(mr->pmode == e_QmPortalPCI);
913 #endif /* QM_CHECKING */
914 mr->pi = (uint8_t)qm_in(MR_PI_CINH);
915 diff = cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
920 static __inline__ void qmPortalMrPcePrefetch(struct qm_portal *portal)
923 register struct qm_mr *mr = &portal->mr;
924 ASSERT_COND(mr->pmode == e_QmPortalPCE);
925 #endif /* QM_CHECKING */
926 qm_cl_invalidate(MR_PI);
927 qm_cl_touch_ro(MR_PI);
930 static __inline__ uint8_t qmPortalMrPceUpdate(struct qm_portal *portal)
932 register struct qm_mr *mr = &portal->mr;
933 uint8_t diff, old_pi = mr->pi;
935 ASSERT_COND(mr->pmode == e_QmPortalPCE);
936 #endif /* QM_CHECKING */
937 mr->pi = (uint8_t)(qm_cl_in(MR_PI) & (QM_MR_SIZE - 1));
938 diff = cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
943 static __inline__ void qmPortalMrPvbUpdate(struct qm_portal *portal)
945 register struct qm_mr *mr = &portal->mr;
946 struct qm_mr_entry *res = ptr_ADD(mr->ring, qm_cl(mr->pi));
948 ASSERT_COND(mr->pmode == e_QmPortalPVB);
949 #endif /* QM_CHECKING */
950 dcbit_ro(ptr_ADD(mr->ring, qm_cl(mr->pi)));
951 if ((res->verb & QM_MR_VERB_VBIT) == mr->vbit) {
952 mr->pi = (uint8_t)((mr->pi + 1) & (QM_MR_SIZE - 1));
954 mr->vbit ^= QM_MR_VERB_VBIT;
959 static __inline__ void qmPortalMrCciConsume(struct qm_portal *portal, uint8_t num)
961 register struct qm_mr *mr = &portal->mr;
963 ASSERT_COND(mr->cmode == e_QmPortalMrCCI);
964 #endif /* QM_CHECKING */
965 mr->ci = (uint8_t)((mr->ci + num) & (QM_MR_SIZE - 1));
966 qm_out(MR_CI_CINH, mr->ci);
969 static __inline__ void qmPortalMrCciConsumeToCurrent(struct qm_portal *portal)
971 register struct qm_mr *mr = &portal->mr;
973 ASSERT_COND(mr->cmode == e_QmPortalMrCCI);
974 #endif /* QM_CHECKING */
975 mr->ci = MR_PTR2IDX(mr->cursor);
976 qm_out(MR_CI_CINH, mr->ci);
979 static __inline__ void qmPortalMrCcePrefetch(struct qm_portal *portal)
982 register struct qm_mr *mr = &portal->mr;
983 ASSERT_COND(mr->cmode == e_QmPortalMrCCE);
984 #endif /* QM_CHECKING */
985 qm_cl_invalidate(MR_CI);
986 qm_cl_touch_rw(MR_CI);
989 static __inline__ void qmPortalMrCceConsume(struct qm_portal *portal, uint8_t num)
991 register struct qm_mr *mr = &portal->mr;
993 ASSERT_COND(mr->cmode == e_QmPortalMrCCE);
994 #endif /* QM_CHECKING */
995 mr->ci = (uint8_t)((mr->ci + num) & (QM_MR_SIZE - 1));
996 qm_cl_out(MR_CI, mr->ci);
999 static __inline__ void qmPortalMrCceConsumeToCurrent(struct qm_portal *portal)
1001 register struct qm_mr *mr = &portal->mr;
1003 ASSERT_COND(mr->cmode == e_QmPortalMrCCE);
1004 #endif /* QM_CHECKING */
1005 mr->ci = MR_PTR2IDX(mr->cursor);
1006 qm_cl_out(MR_CI, mr->ci);
1009 static __inline__ uint8_t qm_mr_get_ci(struct qm_portal *portal)
1011 register struct qm_mr *mr = &portal->mr;
1015 static __inline__ uint8_t qm_mr_get_ithresh(struct qm_portal *portal)
1017 register struct qm_mr *mr = &portal->mr;
1021 static __inline__ void qm_mr_set_ithresh(struct qm_portal *portal, uint8_t ithresh)
1023 qm_out(MR_ITR, ithresh);
1026 /* ------------------------------ */
1027 /* --- Management command API --- */
1029 /* It's safer to code in terms of the 'mc' object than the 'portal' object,
1030 * because the latter runs the risk of copy-n-paste errors from other code where
1031 * we could manipulate some other structure within 'portal'. */
1032 /* #define MC_API_START() register struct qm_mc *mc = &portal->mc */
1034 static __inline__ t_Error qm_mc_init(struct qm_portal *portal)
1036 register struct qm_mc *mc = &portal->mc;
1037 if (__qm_portal_bind(portal, QM_BIND_MC))
1038 return ERROR_CODE(E_BUSY);
1039 mc->cr = ptr_ADD(portal->addr.addr_ce, CL_CR);
1040 mc->rr = ptr_ADD(portal->addr.addr_ce, CL_RR0);
1041 mc->rridx = (uint8_t)((mc->cr->__dont_write_directly__verb & QM_MCC_VERB_VBIT) ?
1043 mc->vbit = (uint8_t)(mc->rridx ? QM_MCC_VERB_VBIT : 0);
1045 mc->state = mc_idle;
1046 #endif /* QM_CHECKING */
1050 static __inline__ void qm_mc_finish(struct qm_portal *portal)
1053 register struct qm_mc *mc = &portal->mc;
1054 ASSERT_COND(mc->state == mc_idle);
1055 if (mc->state != mc_idle)
1056 REPORT_ERROR(WARNING, E_INVALID_STATE, ("Losing incomplete MC command"));
1057 #endif /* QM_CHECKING */
1058 __qm_portal_unbind(portal, QM_BIND_MC);
1061 static __inline__ struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
1063 register struct qm_mc *mc = &portal->mc;
1065 ASSERT_COND(mc->state == mc_idle);
1066 mc->state = mc_user;
1067 #endif /* QM_CHECKING */
1072 static __inline__ void qm_mc_abort(struct qm_portal *portal)
1075 register struct qm_mc *mc = &portal->mc;
1076 ASSERT_COND(mc->state == mc_user);
1077 mc->state = mc_idle;
1080 #endif /* QM_CHECKING */
1083 static __inline__ void qm_mc_commit(struct qm_portal *portal, uint8_t myverb)
1085 register struct qm_mc *mc = &portal->mc;
1087 ASSERT_COND(mc->state == mc_user);
1088 #endif /* QM_CHECKING */
1090 mc->cr->__dont_write_directly__verb = (uint8_t)(myverb | mc->vbit);
1092 dcbit_ro(mc->rr + mc->rridx);
1095 #endif /* QM_CHECKING */
1098 static __inline__ struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
1100 register struct qm_mc *mc = &portal->mc;
1101 struct qm_mc_result *rr = mc->rr + mc->rridx;
1103 ASSERT_COND(mc->state == mc_hw);
1104 #endif /* QM_CHECKING */
1105 /* The inactive response register's verb byte always returns zero until
1106 * its command is submitted and completed. This includes the valid-bit,
1107 * in case you were wondering... */
1113 mc->vbit ^= QM_MCC_VERB_VBIT;
1115 mc->state = mc_idle;
1116 #endif /* QM_CHECKING */
1120 /* ------------------------------------- */
1121 /* --- Portal interrupt register API --- */
1123 static __inline__ t_Error qm_isr_init(struct qm_portal *portal)
1125 if (__qm_portal_bind(portal, QM_BIND_ISR))
1126 return ERROR_CODE(E_BUSY);
1130 static __inline__ void qm_isr_finish(struct qm_portal *portal)
1132 __qm_portal_unbind(portal, QM_BIND_ISR);
1135 static __inline__ void qm_isr_set_iperiod(struct qm_portal *portal, uint16_t iperiod)
1137 qm_out(ITPR, iperiod);
1140 static __inline__ uint32_t __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
1142 return __qm_in(&portal->addr, REG_ISR + (n << 2));
1145 static __inline__ void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n, uint32_t val)
1147 __qm_out(&portal->addr, REG_ISR + (n << 2), val);