2 * Copyright (c) 2006 Sam Leffler, Errno Consulting
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
31 * Copyright (c) 2001-2005, Intel Corporation.
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the Intel Corporation nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
63 * Intel XScale Queue Manager support.
65 * Each IXP4XXX device has a hardware block that implements a priority
66 * queue manager that is shared between the XScale cpu and the backend
67 * devices (such as the NPE). Queues are accessed by reading/writing
68 * special memory locations. The queue contents are mapped into a shared
69 * SRAM region with entries managed in a circular buffer. The XScale
70 * processor can receive interrupts based on queue contents (a condition
71 * code determines when interrupts should be delivered).
73 * The code here basically replaces the qmgr class in the Intel Access
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/kernel.h>
79 #include <sys/module.h>
82 #include <sys/resource.h>
84 #include <sys/sysctl.h>
86 #include <machine/bus.h>
87 #include <machine/cpu.h>
88 #include <machine/cpufunc.h>
89 #include <machine/resource.h>
90 #include <machine/intr.h>
91 #include <arm/xscale/ixp425/ixp425reg.h>
92 #include <arm/xscale/ixp425/ixp425var.h>
94 #include <arm/xscale/ixp425/ixp425_qmgr.h>
97 * State per AQM hw queue.
98 * This structure holds q configuration and dispatch state.
101 int qSizeInWords; /* queue size in words */
103 uint32_t qOflowStatBitMask; /* overflow status mask */
104 int qWriteCount; /* queue write count */
106 bus_size_t qAccRegAddr; /* access register */
107 bus_size_t qUOStatRegAddr; /* status register */
108 bus_size_t qConfigRegAddr; /* config register */
109 int qSizeInEntries; /* queue size in entries */
111 uint32_t qUflowStatBitMask; /* underflow status mask */
112 int qReadCount; /* queue read count */
115 uint32_t qStatRegAddr;
116 uint32_t qStatBitsOffset;
117 uint32_t qStat0BitMask;
118 uint32_t qStat1BitMask;
120 uint32_t intRegCheckMask; /* interrupt reg check mask */
121 void (*cb)(int, void *); /* callback function */
122 void *cbarg; /* callback argument */
123 int priority; /* dispatch priority */
125 /* NB: needed only for A0 parts */
126 u_int statusWordOffset; /* status word offset */
127 uint32_t statusMask; /* status mask */
128 uint32_t statusCheckValue; /* status check value */
132 struct ixpqmgr_softc {
134 bus_space_tag_t sc_iot;
135 bus_space_handle_t sc_ioh;
136 struct resource *sc_irq; /* IRQ resource */
137 void *sc_ih; /* interrupt handler */
138 int sc_rid; /* resource id for irq */
140 struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES];
142 * This array contains a list of queue identifiers ordered by
143 * priority. The table is split logically between queue
144 * identifiers 0-31 and 32-63. To optimize lookups bit masks
145 * are kept for the first-32 and last-32 q's. When the
146 * table needs to be rebuilt mark rebuildTable and it'll
147 * happen after the next interrupt.
149 int priorityTable[IX_QMGR_MAX_NUM_QUEUES];
150 uint32_t lowPriorityTableFirstHalfMask;
151 uint32_t uppPriorityTableFirstHalfMask;
152 int rebuildTable; /* rebuild priorityTable */
154 uint32_t aqmFreeSramAddress; /* SRAM free space */
157 static int qmgr_debug = 0;
158 SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RW, &qmgr_debug,
159 0, "IXP425 Q-Manager debug msgs");
160 TUNABLE_INT("debug.qmgr", &qmgr_debug);
161 #define DPRINTF(dev, fmt, ...) do { \
162 if (qmgr_debug) printf(fmt, __VA_ARGS__); \
164 #define DPRINTFn(n, dev, fmt, ...) do { \
165 if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \
168 static struct ixpqmgr_softc *ixpqmgr_sc = NULL;
170 static void ixpqmgr_rebuild(struct ixpqmgr_softc *);
171 static void ixpqmgr_intr(void *);
173 static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId);
174 static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId);
175 static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf);
176 static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId);
177 static void aqm_reset(struct ixpqmgr_softc *sc);
180 dummyCallback(int qId, void *arg)
186 aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off)
188 DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off);
189 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
193 aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val)
195 DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val);
196 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
200 ixpqmgr_probe(device_t dev)
202 device_set_desc(dev, "IXP425 Q-Manager");
207 ixpqmgr_attach(device_t dev)
209 struct ixpqmgr_softc *sc = device_get_softc(dev);
210 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
216 sc->sc_iot = sa->sc_iot;
217 if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE,
219 panic("%s: Cannot map registers", device_get_name(dev));
221 /* NB: we only use the lower 32 q's */
222 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid,
223 IXP425_INT_QUE1_32, IXP425_INT_QUE33_64, 2, RF_ACTIVE);
225 panic("Unable to allocate the qmgr irqs.\n");
226 /* XXX could be a source of entropy */
227 bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
228 NULL, ixpqmgr_intr, NULL, &sc->sc_ih);
230 /* NB: softc is pre-zero'd */
231 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) {
232 struct qmgrInfo *qi = &sc->qinfo[i];
234 qi->cb = dummyCallback;
235 qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */
237 * There are two interrupt registers, 32 bits each. One
238 * for the lower queues(0-31) and one for the upper
239 * queues(32-63). Therefore need to mod by 32 i.e the
240 * min upper queue identifier.
242 qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID)));
245 * Register addresses and bit masks are calculated and
246 * stored here to optimize QRead, QWrite and QStatusGet
250 /* AQM Queue access reg addresses, per queue */
251 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
252 qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i);
253 qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i);
255 /* AQM Queue lower-group (0-31), only */
256 if (i < IX_QMGR_MIN_QUEUPP_QID) {
257 /* AQM Q underflow/overflow status reg address, per queue */
258 qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET +
259 ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) *
262 /* AQM Q underflow status bit masks for status reg per queue */
263 qi->qUflowStatBitMask =
264 (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) <<
265 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
266 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
268 /* AQM Q overflow status bit masks for status reg, per queue */
269 qi->qOflowStatBitMask =
270 (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) <<
271 ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) *
272 (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD));
274 /* AQM Q lower-group (0-31) status reg addresses, per queue */
275 qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET +
276 ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
279 /* AQM Q lower-group (0-31) status register bit offset */
280 qi->qStatBitsOffset =
281 (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) *
282 (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD);
283 } else { /* AQM Q upper-group (32-63), only */
284 qi->qUOStatRegAddr = 0; /* XXX */
286 /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */
287 qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
289 /* AQM Q upper-group (32-63) Full status register bitmasks */
290 qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID));
294 sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */
296 ixpqmgr_rebuild(sc); /* build inital priority table */
297 aqm_reset(sc); /* reset h/w */
301 ixpqmgr_detach(device_t dev)
303 struct ixpqmgr_softc *sc = device_get_softc(dev);
305 aqm_reset(sc); /* disable interrupts */
306 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
307 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid, sc->sc_irq);
308 bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE);
312 ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel,
313 void (*cb)(int, void *), void *cbarg)
315 struct ixpqmgr_softc *sc = ixpqmgr_sc;
316 struct qmgrInfo *qi = &sc->qinfo[qId];
318 DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n",
319 __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg);
321 /* NB: entry size is always 1 */
322 qi->qSizeInWords = qEntries;
326 qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */
329 /* Reset to dummy callback */
330 qi->cb = dummyCallback;
337 /* Write the config register; NB must be AFTER qinfo setup */
338 aqm_qcfg(sc, qId, ne, nf);
340 * Account for space just allocated to queue.
342 sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t));
344 /* Set the interrupt source if this queue is in the range 0-31 */
345 if (qId < IX_QMGR_MIN_QUEUPP_QID)
346 aqm_srcsel_write(sc, qId, srcSel);
348 if (cb != NULL) /* Enable the interrupt */
349 aqm_int_enable(sc, qId);
351 sc->rebuildTable = TRUE;
357 ixpqmgr_qwrite(int qId, uint32_t entry)
359 struct ixpqmgr_softc *sc = ixpqmgr_sc;
360 struct qmgrInfo *qi = &sc->qinfo[qId];
362 DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n",
363 __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries);
365 /* write the entry */
366 aqm_reg_write(sc, qi->qAccRegAddr, entry);
368 /* NB: overflow is available for lower queues only */
369 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
370 int qSize = qi->qSizeInEntries;
372 * Increment the current number of entries in the queue
373 * and check for overflow .
375 if (qi->qWriteCount++ == qSize) { /* check for overflow */
376 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
380 * Read the status twice because the status may
381 * not be immediately ready after the write operation
383 if ((status & qi->qOflowStatBitMask) ||
384 ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) {
386 * The queue is full, clear the overflow status bit if set.
388 aqm_reg_write(sc, qi->qUOStatRegAddr,
389 status & ~qi->qOflowStatBitMask);
390 qi->qWriteCount = qSize;
391 DPRINTFn(5, sc->sc_dev,
392 "%s(%u, 0x%x) Q full, overflow status cleared\n",
393 __func__, qId, entry);
397 * No overflow occured : someone is draining the queue
398 * and the current counter needs to be
399 * updated from the current number of entries in the queue
402 /* calculate number of words in q */
403 qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr);
404 DPRINTFn(2, sc->sc_dev,
405 "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n",
406 __func__, qId, entry, qPtrs);
407 qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f;
411 * The queue may be full at the time of the
412 * snapshot. Next access will check
413 * the overflow status again.
415 qi->qWriteCount = qSize;
417 /* convert the number of words to a number of entries */
418 qi->qWriteCount = qPtrs & (qSize - 1);
426 ixpqmgr_qread(int qId, uint32_t *entry)
428 struct ixpqmgr_softc *sc = ixpqmgr_sc;
429 struct qmgrInfo *qi = &sc->qinfo[qId];
430 bus_size_t off = qi->qAccRegAddr;
432 *entry = aqm_reg_read(sc, off);
435 * Reset the current read count : next access to the read function
436 * will force a underflow status check.
440 /* Check if underflow occurred on the read */
441 if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
442 /* get the queue status */
443 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
445 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
446 aqm_reg_write(sc, qi->qUOStatRegAddr,
447 status &~ qi->qUflowStatBitMask);
455 ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p)
457 struct ixpqmgr_softc *sc = ixpqmgr_sc;
458 struct qmgrInfo *qi = &sc->qinfo[qId];
460 bus_size_t off = qi->qAccRegAddr;
462 entry = aqm_reg_read(sc, off);
465 /* if we read a NULL entry, stop. We have underflowed */
468 *p++ = entry; /* store */
469 entry = aqm_reg_read(sc, off);
474 * Reset the current read count : next access to the read function
475 * will force a underflow status check.
479 /* Check if underflow occurred on the read */
480 if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) {
481 /* get the queue status */
482 uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr);
484 if (status & qi->qUflowStatBitMask) { /* clear underflow status */
485 aqm_reg_write(sc, qi->qUOStatRegAddr,
486 status &~ qi->qUflowStatBitMask);
494 ixpqmgr_getqstatus(int qId)
496 #define QLOWSTATMASK \
497 ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1)
498 struct ixpqmgr_softc *sc = ixpqmgr_sc;
499 const struct qmgrInfo *qi = &sc->qinfo[qId];
502 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
503 /* read the status of a queue in the range 0-31 */
504 status = aqm_reg_read(sc, qi->qStatRegAddr);
506 /* mask out the status bits relevant only to this queue */
507 status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK;
508 } else { /* read status of a queue in the range 32-63 */
510 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask)
511 status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */
512 if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask)
513 status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */
520 ixpqmgr_getqconfig(int qId)
522 struct ixpqmgr_softc *sc = ixpqmgr_sc;
524 return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
530 struct ixpqmgr_softc *sc = ixpqmgr_sc;
533 /* status registers */
534 printf("0x%04x: %08x %08x %08x %08x\n"
536 , aqm_reg_read(sc, 0x400)
537 , aqm_reg_read(sc, 0x400+4)
538 , aqm_reg_read(sc, 0x400+8)
539 , aqm_reg_read(sc, 0x400+12)
541 printf("0x%04x: %08x %08x %08x %08x\n"
543 , aqm_reg_read(sc, 0x410)
544 , aqm_reg_read(sc, 0x410+4)
545 , aqm_reg_read(sc, 0x410+8)
546 , aqm_reg_read(sc, 0x410+12)
548 printf("0x%04x: %08x %08x %08x %08x\n"
550 , aqm_reg_read(sc, 0x420)
551 , aqm_reg_read(sc, 0x420+4)
552 , aqm_reg_read(sc, 0x420+8)
553 , aqm_reg_read(sc, 0x420+12)
555 printf("0x%04x: %08x %08x %08x %08x\n"
557 , aqm_reg_read(sc, 0x430)
558 , aqm_reg_read(sc, 0x430+4)
559 , aqm_reg_read(sc, 0x430+8)
560 , aqm_reg_read(sc, 0x430+12)
562 /* q configuration registers */
563 for (a = 0x2000; a < 0x20ff; a += 32)
564 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
566 , aqm_reg_read(sc, a)
567 , aqm_reg_read(sc, a+4)
568 , aqm_reg_read(sc, a+8)
569 , aqm_reg_read(sc, a+12)
570 , aqm_reg_read(sc, a+16)
571 , aqm_reg_read(sc, a+20)
572 , aqm_reg_read(sc, a+24)
573 , aqm_reg_read(sc, a+28)
576 for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) {
578 printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n"
580 , aqm_reg_read(sc, a)
581 , aqm_reg_read(sc, a+4)
582 , aqm_reg_read(sc, a+8)
583 , aqm_reg_read(sc, a+12)
584 , aqm_reg_read(sc, a+16)
585 , aqm_reg_read(sc, a+20)
586 , aqm_reg_read(sc, a+24)
587 , aqm_reg_read(sc, a+28)
590 for (i = 0; i < 16; i++) {
591 printf("Q[%2d] config 0x%08x status 0x%02x "
592 "Q[%2d] config 0x%08x status 0x%02x\n"
593 , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i)
594 , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16)
600 ixpqmgr_notify_enable(int qId, int srcSel)
602 struct ixpqmgr_softc *sc = ixpqmgr_sc;
604 /* Calculate the checkMask and checkValue for this q */
605 aqm_calc_statuscheck(sc, qId, srcSel);
607 /* Set the interrupt source if this queue is in the range 0-31 */
608 if (qId < IX_QMGR_MIN_QUEUPP_QID)
609 aqm_srcsel_write(sc, qId, srcSel);
611 /* Enable the interrupt */
612 aqm_int_enable(sc, qId);
616 ixpqmgr_notify_disable(int qId)
618 struct ixpqmgr_softc *sc = ixpqmgr_sc;
620 aqm_int_disable(sc, qId);
624 * Rebuild the priority table used by the dispatcher.
627 ixpqmgr_rebuild(struct ixpqmgr_softc *sc)
630 int lowQuePriorityTableIndex, uppQuePriorityTableIndex;
633 sc->lowPriorityTableFirstHalfMask = 0;
634 sc->uppPriorityTableFirstHalfMask = 0;
636 lowQuePriorityTableIndex = 0;
637 uppQuePriorityTableIndex = 32;
638 for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) {
639 /* low priority q's */
640 for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) {
642 if (qi->priority == pri) {
644 * Build the priority table bitmask which match the
645 * queues of the first half of the priority table.
647 if (lowQuePriorityTableIndex < 16) {
648 sc->lowPriorityTableFirstHalfMask |=
651 sc->priorityTable[lowQuePriorityTableIndex++] = q;
654 /* high priority q's */
655 for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) {
657 if (qi->priority == pri) {
659 * Build the priority table bitmask which match the
660 * queues of the first half of the priority table .
662 if (uppQuePriorityTableIndex < 48) {
663 sc->uppPriorityTableFirstHalfMask |=
666 sc->priorityTable[uppQuePriorityTableIndex++] = q;
670 sc->rebuildTable = FALSE;
674 * Count the number of leading zero bits in a word,
675 * and return the same value than the CLZ instruction.
676 * Note this is similar to the standard ffs function but
677 * it counts zero's from the MSB instead of the LSB.
679 * word (in) return value (out)
687 * The C version of this function is used as a replacement
688 * for system not providing the equivalent of the CLZ
689 * assembly language instruction.
691 * Note that this version is big-endian
694 _lzcount(uint32_t word)
696 unsigned int lzcount = 0;
700 while ((word & 0x80000000) == 0) {
708 ixpqmgr_intr(void *arg)
710 struct ixpqmgr_softc *sc = ixpqmgr_sc;
711 uint32_t intRegVal; /* Interrupt reg val */
713 int priorityTableIndex; /* Priority table index */
714 int qIndex; /* Current queue being processed */
716 /* Read the interrupt register */
717 intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET);
718 /* Write back to clear interrupt */
719 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal);
721 DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n",
722 __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET));
724 /* No queue has interrupt register set */
725 if (intRegVal != 0) {
726 /* get the first queue Id from the interrupt register value */
727 qIndex = (32 - 1) - _lzcount(intRegVal);
729 DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n",
730 __func__, intRegVal, qIndex);
733 * Optimize for single callback case.
735 qi = &sc->qinfo[qIndex];
736 if (intRegVal == qi->intRegCheckMask) {
738 * Only 1 queue event triggered a notification.
739 * Call the callback function for this queue
741 qi->cb(qIndex, qi->cbarg);
744 * The event is triggered by more than 1 queue,
745 * the queue search will start from the beginning
746 * or the middle of the priority table.
748 * The search will end when all the bits of the interrupt
749 * register are cleared. There is no need to maintain
750 * a seperate value and test it at each iteration.
752 if (intRegVal & sc->lowPriorityTableFirstHalfMask) {
753 priorityTableIndex = 0;
755 priorityTableIndex = 16;
758 * Iterate over the priority table until all the bits
759 * of the interrupt register are cleared.
762 qIndex = sc->priorityTable[priorityTableIndex++];
763 qi = &sc->qinfo[qIndex];
765 /* If this queue caused this interrupt to be raised */
766 if (intRegVal & qi->intRegCheckMask) {
767 /* Call the callback function for this queue */
768 qi->cb(qIndex, qi->cbarg);
769 /* Clear the interrupt register bit */
770 intRegVal &= ~qi->intRegCheckMask;
776 /* Rebuild the priority table if needed */
777 if (sc->rebuildTable)
783 * Generate the parameters used to check if a Q's status matches
784 * the specified source select. We calculate which status word
785 * to check (statusWordOffset), the value to check the status
786 * against (statusCheckValue) and the mask (statusMask) to mask
787 * out all but the bits to check in the status word.
790 aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel)
792 struct qmgrInfo *qi = &qinfo[qId];
795 if (qId < IX_QMGR_MIN_QUEUPP_QID) {
797 case IX_QMGR_Q_SOURCE_ID_E:
798 qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK;
799 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
801 case IX_QMGR_Q_SOURCE_ID_NE:
802 qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK;
803 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
805 case IX_QMGR_Q_SOURCE_ID_NF:
806 qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK;
807 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
809 case IX_QMGR_Q_SOURCE_ID_F:
810 qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK;
811 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
813 case IX_QMGR_Q_SOURCE_ID_NOT_E:
814 qi->statusCheckValue = 0;
815 qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK;
817 case IX_QMGR_Q_SOURCE_ID_NOT_NE:
818 qi->statusCheckValue = 0;
819 qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK;
821 case IX_QMGR_Q_SOURCE_ID_NOT_NF:
822 qi->statusCheckValue = 0;
823 qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK;
825 case IX_QMGR_Q_SOURCE_ID_NOT_F:
826 qi->statusCheckValue = 0;
827 qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK;
830 /* Should never hit */
835 /* One nibble of status per queue so need to shift the
836 * check value and mask out to the correct position.
838 shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) *
839 IX_QMGR_QUELOWSTAT_BITS_PER_Q;
841 /* Calculate the which status word to check from the qId,
842 * 8 Qs status per word
844 qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD;
846 qi->statusCheckValue <<= shiftVal;
847 qi->statusMask <<= shiftVal;
849 /* One status word */
850 qi->statusWordOffset = 0;
851 /* Single bits per queue and int source bit hardwired NE,
854 qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID);
855 qi->statusCheckValue = qi->statusMask;
861 aqm_int_enable(struct ixpqmgr_softc *sc, int qId)
866 if (qId < IX_QMGR_MIN_QUEUPP_QID)
867 reg = IX_QMGR_QUEIEREG0_OFFSET;
869 reg = IX_QMGR_QUEIEREG1_OFFSET;
870 v = aqm_reg_read(sc, reg);
871 aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
873 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
874 __func__, qId, reg, v, aqm_reg_read(sc, reg));
878 aqm_int_disable(struct ixpqmgr_softc *sc, int qId)
883 if (qId < IX_QMGR_MIN_QUEUPP_QID)
884 reg = IX_QMGR_QUEIEREG0_OFFSET;
886 reg = IX_QMGR_QUEIEREG1_OFFSET;
887 v = aqm_reg_read(sc, reg);
888 aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID)));
890 DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n",
891 __func__, qId, reg, v, aqm_reg_read(sc, reg));
899 * N.B. this function will return 0 if supplied 0.
901 for (count = 0; n/2; count++)
906 static __inline unsigned
907 toAqmEntrySize(int entrySize)
909 /* entrySize 1("00"),2("01"),4("10") */
910 return log2(entrySize);
913 static __inline unsigned
914 toAqmBufferSize(unsigned bufferSizeInWords)
916 /* bufferSize 16("00"),32("01),64("10"),128("11") */
917 return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE);
920 static __inline unsigned
921 toAqmWatermark(int watermark)
924 * Watermarks 0("000"),1("001"),2("010"),4("011"),
925 * 8("100"),16("101"),32("110"),64("111")
927 return log2(2 * watermark);
931 aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf)
933 const struct qmgrInfo *qi = &sc->qinfo[qId];
935 uint32_t baseAddress;
937 /* Build config register */
938 qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) <<
939 IX_QMGR_Q_CONFIG_ESIZE_OFFSET)
940 | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) <<
941 IX_QMGR_Q_CONFIG_BSIZE_OFFSET);
943 /* baseAddress, calculated relative to start address */
944 baseAddress = sc->aqmFreeSramAddress;
946 /* base address must be word-aligned */
947 KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0,
948 ("address not word-aligned"));
950 /* Now convert to a 16 word pointer as required by QUECONFIG register */
951 baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT;
952 qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET;
955 qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET)
956 | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET);
958 DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n",
959 __func__, qId, ne, nf,
960 aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)),
961 qCfg, IX_QMGR_Q_CONFIG_ADDR_GET(qId));
963 aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg);
967 aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId)
973 * Calculate the register offset; multiple queues split across registers
975 off = IX_QMGR_INT0SRCSELREG0_OFFSET +
976 ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t));
978 v = aqm_reg_read(sc, off);
979 if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) {
980 /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */
983 const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD;
987 qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq;
988 mask = ((1 << bpq) - 1) << qshift; /* q's status mask */
991 v = (v &~ mask) | ((sourceId << qshift) & mask);
994 DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n",
995 __func__, qId, sourceId, aqm_reg_read(sc, off), v, off);
996 aqm_reg_write(sc, off, v);
1000 * Reset AQM registers to default values.
1003 aqm_reset(struct ixpqmgr_softc *sc)
1007 /* Reset queues 0..31 status registers 0..3 */
1008 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET,
1009 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1010 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET,
1011 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1012 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET,
1013 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1014 aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET,
1015 IX_QMGR_QUELOWSTAT_RESET_VALUE);
1017 /* Reset underflow/overflow status registers 0..1 */
1018 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET,
1019 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1020 aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET,
1021 IX_QMGR_QUEUOSTAT_RESET_VALUE);
1023 /* Reset queues 32..63 nearly empty status registers */
1024 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET,
1025 IX_QMGR_QUEUPPSTAT0_RESET_VALUE);
1027 /* Reset queues 32..63 full status registers */
1028 aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET,
1029 IX_QMGR_QUEUPPSTAT1_RESET_VALUE);
1031 /* Reset int0 status flag source select registers 0..3 */
1032 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET,
1033 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1034 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET,
1035 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1036 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET,
1037 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1038 aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET,
1039 IX_QMGR_INT0SRCSELREG_RESET_VALUE);
1041 /* Reset queue interrupt enable register 0..1 */
1042 aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET,
1043 IX_QMGR_QUEIEREG_RESET_VALUE);
1044 aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET,
1045 IX_QMGR_QUEIEREG_RESET_VALUE);
1047 /* Reset queue interrupt register 0..1 */
1048 aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1049 aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE);
1051 /* Reset queue configuration words 0..63 */
1052 for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++)
1053 aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr,
1054 IX_QMGR_QUECONFIG_RESET_VALUE);
1056 /* XXX zero SRAM to simplify debugging */
1057 for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET;
1058 i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t))
1059 aqm_reg_write(sc, i, 0);
1062 static device_method_t ixpqmgr_methods[] = {
1063 DEVMETHOD(device_probe, ixpqmgr_probe),
1064 DEVMETHOD(device_attach, ixpqmgr_attach),
1065 DEVMETHOD(device_detach, ixpqmgr_detach),
1070 static driver_t ixpqmgr_driver = {
1073 sizeof(struct ixpqmgr_softc),
1075 static devclass_t ixpqmgr_devclass;
1077 DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0);