2 * Copyright (c) 2006-2008 Sam Leffler, Errno Consulting
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
31 * Copyright (c) 2001-2005, Intel Corporation.
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the Intel Corporation nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
63 * Intel XScale Network Processing Engine (NPE) support.
65 * Each NPE has an ixpnpeX device associated with it that is
66 * attached at boot. Depending on the microcode loaded into
67 * an NPE there may be an Ethernet interface (npeX) or some
68 * other network interface (e.g. for ATM). This file has support
69 * for loading microcode images and the associated NPE CPU
70 * manipulations (start, stop, reset).
72 * The code here basically replaces the npeDl and npeMh classes
73 * in the Intel Access Library (IAL).
75 * NB: Microcode images are loaded with firmware(9). To
76 * include microcode in a static kernel include the
77 * ixpnpe_fw device. Otherwise the firmware will be
78 * automatically loaded from the filesystem.
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/malloc.h>
84 #include <sys/module.h>
87 #include <sys/resource.h>
89 #include <sys/sysctl.h>
91 #include <sys/linker.h>
92 #include <sys/firmware.h>
94 #include <machine/bus.h>
95 #include <machine/cpu.h>
96 #include <machine/cpufunc.h>
97 #include <machine/resource.h>
98 #include <machine/intr.h>
99 #include <arm/xscale/ixp425/ixp425reg.h>
100 #include <arm/xscale/ixp425/ixp425var.h>
102 #include <arm/xscale/ixp425/ixp425_npereg.h>
103 #include <arm/xscale/ixp425/ixp425_npevar.h>
105 struct ixpnpe_softc {
107 bus_space_tag_t sc_iot;
108 bus_space_handle_t sc_ioh;
109 bus_size_t sc_size; /* size of mapped register window */
110 struct resource *sc_irq; /* IRQ resource */
111 void *sc_ih; /* interrupt handler */
112 struct mtx sc_mtx; /* mailbox lock */
113 uint32_t sc_msg[2]; /* reply msg collected in ixpnpe_intr */
114 int sc_msgwaiting; /* sc_msg holds valid data */
116 int sc_nrefs; /* # of references */
118 int validImage; /* valid ucode image loaded */
119 int started; /* NPE is started */
120 uint8_t functionalityId;/* ucode functionality ID */
121 int insMemSize; /* size of instruction memory */
122 int dataMemSize; /* size of data memory */
123 uint32_t savedExecCount;
124 uint32_t savedEcsDbgCtxtReg2;
126 static struct ixpnpe_softc *npes[NPE_MAX];
128 #define IX_NPEDL_NPEIMAGE_FIELD_MASK 0xff
130 /* used to read download map from version in microcode image */
131 #define IX_NPEDL_BLOCK_TYPE_INSTRUCTION 0x00000000
132 #define IX_NPEDL_BLOCK_TYPE_DATA 0x00000001
133 #define IX_NPEDL_BLOCK_TYPE_STATE 0x00000002
134 #define IX_NPEDL_END_OF_DOWNLOAD_MAP 0x0000000F
137 * masks used to extract address info from State information context
138 * register addresses as read from microcode image
140 #define IX_NPEDL_MASK_STATE_ADDR_CTXT_REG 0x0000000F
141 #define IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM 0x000000F0
143 /* LSB offset of Context Number field in State-Info Context Address */
144 #define IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM 4
146 /* size (in words) of single State Information entry (ctxt reg address|data) */
147 #define IX_NPEDL_STATE_INFO_ENTRY_SIZE 2
152 } IxNpeDlNpeMgrDownloadMapBlockEntry;
155 IxNpeDlNpeMgrDownloadMapBlockEntry block;
157 } IxNpeDlNpeMgrDownloadMapEntry;
160 /* 1st entry in the download map (there may be more than one) */
161 IxNpeDlNpeMgrDownloadMapEntry entry[1];
162 } IxNpeDlNpeMgrDownloadMap;
164 /* used to access an instruction or data block in a microcode image */
166 uint32_t npeMemAddress;
169 } IxNpeDlNpeMgrCodeBlock;
171 /* used to access each Context Reg entry state-information block */
173 uint32_t addressInfo;
175 } IxNpeDlNpeMgrStateInfoCtxtRegEntry;
177 /* used to access a state-information block in a microcode image */
180 IxNpeDlNpeMgrStateInfoCtxtRegEntry ctxtRegEntry[1];
181 } IxNpeDlNpeMgrStateInfoBlock;
183 static int npe_debug = 0;
184 SYSCTL_INT(_debug, OID_AUTO, ixp425npe, CTLFLAG_RW, &npe_debug,
185 0, "IXP4XX NPE debug msgs");
186 TUNABLE_INT("debug.ixp425npe", &npe_debug);
187 #define DPRINTF(dev, fmt, ...) do { \
188 if (npe_debug) device_printf(dev, fmt, __VA_ARGS__); \
190 #define DPRINTFn(n, dev, fmt, ...) do { \
191 if (npe_debug >= n) printf(fmt, __VA_ARGS__); \
194 static int npe_checkbits(struct ixpnpe_softc *, uint32_t reg, uint32_t);
195 static int npe_isstopped(struct ixpnpe_softc *);
196 static int npe_load_ins(struct ixpnpe_softc *,
197 const IxNpeDlNpeMgrCodeBlock *bp, int verify);
198 static int npe_load_data(struct ixpnpe_softc *,
199 const IxNpeDlNpeMgrCodeBlock *bp, int verify);
200 static int npe_load_stateinfo(struct ixpnpe_softc *,
201 const IxNpeDlNpeMgrStateInfoBlock *bp, int verify);
202 static int npe_load_image(struct ixpnpe_softc *,
203 const uint32_t *imageCodePtr, int verify);
204 static int npe_cpu_reset(struct ixpnpe_softc *);
205 static int npe_cpu_start(struct ixpnpe_softc *);
206 static int npe_cpu_stop(struct ixpnpe_softc *);
207 static void npe_cmd_issue_write(struct ixpnpe_softc *,
208 uint32_t cmd, uint32_t addr, uint32_t data);
209 static uint32_t npe_cmd_issue_read(struct ixpnpe_softc *,
210 uint32_t cmd, uint32_t addr);
211 static int npe_ins_write(struct ixpnpe_softc *,
212 uint32_t addr, uint32_t data, int verify);
213 static int npe_data_write(struct ixpnpe_softc *,
214 uint32_t addr, uint32_t data, int verify);
215 static void npe_ecs_reg_write(struct ixpnpe_softc *,
216 uint32_t reg, uint32_t data);
217 static uint32_t npe_ecs_reg_read(struct ixpnpe_softc *, uint32_t reg);
218 static void npe_issue_cmd(struct ixpnpe_softc *, uint32_t command);
219 static void npe_cpu_step_save(struct ixpnpe_softc *);
220 static int npe_cpu_step(struct ixpnpe_softc *, uint32_t npeInstruction,
221 uint32_t ctxtNum, uint32_t ldur);
222 static void npe_cpu_step_restore(struct ixpnpe_softc *);
223 static int npe_logical_reg_read(struct ixpnpe_softc *,
224 uint32_t regAddr, uint32_t regSize,
225 uint32_t ctxtNum, uint32_t *regVal);
226 static int npe_logical_reg_write(struct ixpnpe_softc *,
227 uint32_t regAddr, uint32_t regVal,
228 uint32_t regSize, uint32_t ctxtNum, int verify);
229 static int npe_physical_reg_write(struct ixpnpe_softc *,
230 uint32_t regAddr, uint32_t regValue, int verify);
231 static int npe_ctx_reg_write(struct ixpnpe_softc *, uint32_t ctxtNum,
232 uint32_t ctxtReg, uint32_t ctxtRegVal, int verify);
234 static void ixpnpe_intr(void *arg);
237 npe_reg_read(struct ixpnpe_softc *sc, bus_size_t off)
239 uint32_t v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
240 DPRINTFn(9, sc->sc_dev, "%s(0x%lx) => 0x%x\n", __func__, off, v);
245 npe_reg_write(struct ixpnpe_softc *sc, bus_size_t off, uint32_t val)
247 DPRINTFn(9, sc->sc_dev, "%s(0x%lx, 0x%x)\n", __func__, off, val);
248 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
251 struct ixpnpe_softc *
252 ixpnpe_attach(device_t dev, int npeid)
258 uint32_t ins_memsize;
259 uint32_t data_memsize;
261 static const struct npeconfig npeconfigs[NPE_MAX] = {
263 .base = IXP425_NPE_A_HWBASE,
264 .size = IXP425_NPE_A_SIZE,
265 .irq = IXP425_INT_NPE_A,
266 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEA,
267 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEA
270 .base = IXP425_NPE_B_HWBASE,
271 .size = IXP425_NPE_B_SIZE,
272 .irq = IXP425_INT_NPE_B,
273 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEB,
274 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEB
277 .base = IXP425_NPE_C_HWBASE,
278 .size = IXP425_NPE_C_SIZE,
279 .irq = IXP425_INT_NPE_C,
280 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEC,
281 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEC
284 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
285 struct ixpnpe_softc *sc;
286 const struct npeconfig *config;
289 if (npeid >= NPE_MAX) {
290 device_printf(dev, "%s: bad npeid %d\n", __func__, npeid);
298 config = &npeconfigs[npeid];
301 sc = malloc(sizeof(struct ixpnpe_softc), M_TEMP, M_WAITOK | M_ZERO);
303 sc->sc_iot = sa->sc_iot;
304 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "npe driver", MTX_DEF);
305 sc->sc_npeid = npeid;
308 sc->sc_size = config->size;
309 if (cpu_is_ixp42x()) {
310 /* NB: instruction/data memory sizes are NPE-dependent */
311 sc->insMemSize = config->ins_memsize;
312 sc->dataMemSize = config->data_memsize;
314 sc->insMemSize = IXP46X_NPEDL_INS_MEMSIZE_WORDS;
315 sc->dataMemSize = IXP46X_NPEDL_DATA_MEMSIZE_WORDS;
318 if (bus_space_map(sc->sc_iot, config->base, sc->sc_size, 0, &sc->sc_ioh))
319 panic("%s: Cannot map registers", device_get_name(dev));
322 * Setup IRQ and handler for NPE message support.
325 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
326 config->irq, config->irq, 1, RF_ACTIVE);
327 if (sc->sc_irq == NULL)
328 panic("%s: Unable to allocate irq %u", device_get_name(dev),
330 /* XXX could be a source of entropy */
331 bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
332 NULL, ixpnpe_intr, sc, &sc->sc_ih);
334 * Enable output fifo interrupts (NB: must also set OFIFO Write Enable)
336 npe_reg_write(sc, IX_NPECTL,
337 npe_reg_read(sc, IX_NPECTL) | (IX_NPECTL_OFE | IX_NPECTL_OFWE));
345 ixpnpe_detach(struct ixpnpe_softc *sc)
347 if (--sc->sc_nrefs == 0) {
348 npes[sc->sc_npeid] = NULL;
350 /* disable output fifo interrupts */
351 npe_reg_write(sc, IX_NPECTL,
352 npe_reg_read(sc, IX_NPECTL) &~ (IX_NPECTL_OFE | IX_NPECTL_OFWE));
354 bus_teardown_intr(sc->sc_dev, sc->sc_irq, sc->sc_ih);
355 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_size);
356 mtx_destroy(&sc->sc_mtx);
362 ixpnpe_stopandreset(struct ixpnpe_softc *sc)
366 mtx_lock(&sc->sc_mtx);
367 error = npe_cpu_stop(sc); /* stop NPE */
369 error = npe_cpu_reset(sc); /* reset it */
371 sc->started = 0; /* mark stopped */
372 mtx_unlock(&sc->sc_mtx);
374 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
379 ixpnpe_start_locked(struct ixpnpe_softc *sc)
384 error = npe_cpu_start(sc);
390 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
395 ixpnpe_start(struct ixpnpe_softc *sc)
399 mtx_lock(&sc->sc_mtx);
400 ret = ixpnpe_start_locked(sc);
401 mtx_unlock(&sc->sc_mtx);
406 ixpnpe_stop(struct ixpnpe_softc *sc)
410 mtx_lock(&sc->sc_mtx);
411 error = npe_cpu_stop(sc);
414 mtx_unlock(&sc->sc_mtx);
416 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
421 * Indicates the start of an NPE Image, in new NPE Image Library format.
422 * 2 consecutive occurrences indicates the end of the NPE Image Library
424 #define NPE_IMAGE_MARKER 0xfeedf00d
427 * NPE Image Header definition, used in new NPE Image Library format
433 } IxNpeDlImageMgrImageHeader;
436 npe_findimage(struct ixpnpe_softc *sc,
437 const uint32_t *imageLibrary, uint32_t imageId,
438 const uint32_t **imagePtr, uint32_t *imageSize)
440 const IxNpeDlImageMgrImageHeader *image;
443 while (imageLibrary[offset] == NPE_IMAGE_MARKER) {
444 image = (const IxNpeDlImageMgrImageHeader *)
445 &imageLibrary[offset];
446 offset += sizeof(IxNpeDlImageMgrImageHeader)/sizeof(uint32_t);
448 DPRINTF(sc->sc_dev, "%s: off %u mark 0x%x id 0x%x size %u\n",
449 __func__, offset, image->marker, image->id, image->size);
450 if (image->id == imageId) {
451 *imagePtr = imageLibrary + offset;
452 *imageSize = image->size;
455 /* 2 consecutive NPE_IMAGE_MARKER's indicates end of library */
456 if (image->id == NPE_IMAGE_MARKER) {
457 DPRINTF(sc->sc_dev, "imageId 0x%08x not found in "
458 "image library header\n", imageId);
459 /* reached end of library, image not found */
462 offset += image->size;
468 ixpnpe_load_firmware(struct ixpnpe_softc *sc, const char *imageName,
471 static const char *devname[4] =
472 { "IXP425", "IXP435/IXP465", "DeviceID#2", "DeviceID#3" };
474 const uint32_t *imageCodePtr;
475 const struct firmware *fw;
478 DPRINTF(sc->sc_dev, "load %s, imageId 0x%08x\n", imageName, imageId);
481 IxFeatureCtrlDeviceId devid = IX_NPEDL_DEVICEID_FROM_IMAGEID_GET(imageId);
483 * Checking if image being loaded is meant for device that is running.
484 * Image is forward compatible. i.e Image built for IXP42X should run
485 * on IXP46X but not vice versa.
487 if (devid > (ixFeatureCtrlDeviceRead() & IX_FEATURE_CTRL_DEVICE_TYPE_MASK))
490 error = ixpnpe_stopandreset(sc); /* stop and reset the NPE */
494 fw = firmware_get(imageName);
498 /* Locate desired image in files w/ combined images */
499 error = npe_findimage(sc, fw->data, imageId, &imageCodePtr, &imageSize);
503 device_printf(sc->sc_dev,
504 "load fw image %s.NPE-%c Func 0x%x Rev %u.%u\n",
505 devname[NPEIMAGE_DEVID(imageId)], 'A' + NPEIMAGE_NPEID(imageId),
506 NPEIMAGE_FUNCID(imageId), NPEIMAGE_MAJOR(imageId),
507 NPEIMAGE_MINOR(imageId));
510 * If download was successful, store image Id in list of
511 * currently loaded images. If a critical error occured
512 * during download, record that the NPE has an invalid image
514 mtx_lock(&sc->sc_mtx);
515 error = npe_load_image(sc, imageCodePtr, 1 /*VERIFY*/);
518 error = ixpnpe_start_locked(sc);
522 sc->functionalityId = IX_NPEDL_FUNCTIONID_FROM_IMAGEID_GET(imageId);
523 mtx_unlock(&sc->sc_mtx);
525 firmware_put(fw, FIRMWARE_UNLOAD);
526 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
531 override_imageid(device_t dev, const char *resname, uint32_t *val)
533 int unit = device_get_unit(dev);
536 if (resource_int_value("npe", unit, resname, &resval) != 0)
540 device_printf(dev, "using npe.%d.%s=0x%x override\n",
541 unit, resname, resval);
547 ixpnpe_init(struct ixpnpe_softc *sc)
549 static const uint32_t npeconfig[NPE_MAX] = {
550 [NPE_A] = IXP425_NPE_A_IMAGEID,
551 [NPE_B] = IXP425_NPE_B_IMAGEID,
552 [NPE_C] = IXP425_NPE_C_IMAGEID,
554 uint32_t imageid, msg[2];
560 * Load NPE firmware and start it running. We assume
561 * that minor version bumps remain compatible so probe
562 * the firmware image starting with the expected version
563 * and then bump the minor version up to the max.
565 if (!override_imageid(sc->sc_dev, "imageid", &imageid))
566 imageid = npeconfig[sc->sc_npeid];
568 error = ixpnpe_load_firmware(sc, "npe_fw", imageid);
572 * ESRCH is returned when the requested image
575 if (error != ESRCH) {
576 device_printf(sc->sc_dev,
577 "cannot init NPE (error %d)\n", error);
580 /* bump the minor version up to the max possible */
581 if (NPEIMAGE_MINOR(imageid) == 0xff) {
582 device_printf(sc->sc_dev, "cannot locate firmware "
583 "(imageid 0x%08x)\n", imageid);
588 /* NB: firmware should respond with a status msg */
589 if (ixpnpe_recvmsg_sync(sc, msg) != 0) {
590 device_printf(sc->sc_dev,
591 "firmware did not respond as expected\n");
598 ixpnpe_getfunctionality(struct ixpnpe_softc *sc)
600 return (sc->validImage ? sc->functionalityId : 0);
604 npe_checkbits(struct ixpnpe_softc *sc, uint32_t reg, uint32_t expectedBitsSet)
608 val = npe_reg_read(sc, reg);
609 DPRINTFn(5, sc->sc_dev, "%s(0x%x, 0x%x) => 0x%x (%u)\n",
610 __func__, reg, expectedBitsSet, val,
611 (val & expectedBitsSet) == expectedBitsSet);
612 return ((val & expectedBitsSet) == expectedBitsSet);
616 npe_isstopped(struct ixpnpe_softc *sc)
618 return npe_checkbits(sc,
619 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP);
623 npe_load_ins(struct ixpnpe_softc *sc,
624 const IxNpeDlNpeMgrCodeBlock *bp, int verify)
626 uint32_t npeMemAddress;
629 npeMemAddress = bp->npeMemAddress;
630 blockSize = bp->size; /* NB: instruction/data count */
631 if (npeMemAddress + blockSize > sc->insMemSize) {
632 device_printf(sc->sc_dev,
633 "Block size %u too big for NPE memory\n", blockSize);
634 return EINVAL; /* XXX */
636 for (i = 0; i < blockSize; i++, npeMemAddress++) {
637 if (npe_ins_write(sc, npeMemAddress, bp->data[i], verify) != 0) {
638 device_printf(sc->sc_dev,
639 "NPE instruction write failed");
647 npe_load_data(struct ixpnpe_softc *sc,
648 const IxNpeDlNpeMgrCodeBlock *bp, int verify)
650 uint32_t npeMemAddress;
653 npeMemAddress = bp->npeMemAddress;
654 blockSize = bp->size; /* NB: instruction/data count */
655 if (npeMemAddress + blockSize > sc->dataMemSize) {
656 device_printf(sc->sc_dev,
657 "Block size %u too big for NPE memory\n", blockSize);
660 for (i = 0; i < blockSize; i++, npeMemAddress++) {
661 if (npe_data_write(sc, npeMemAddress, bp->data[i], verify) != 0) {
662 device_printf(sc->sc_dev, "NPE data write failed\n");
670 npe_load_stateinfo(struct ixpnpe_softc *sc,
671 const IxNpeDlNpeMgrStateInfoBlock *bp, int verify)
673 int i, nentries, error;
675 npe_cpu_step_save(sc);
677 /* for each state-info context register entry in block */
678 nentries = bp->size / IX_NPEDL_STATE_INFO_ENTRY_SIZE;
680 for (i = 0; i < nentries; i++) {
681 /* each state-info entry is 2 words (address, value) */
682 uint32_t regVal = bp->ctxtRegEntry[i].value;
683 uint32_t addrInfo = bp->ctxtRegEntry[i].addressInfo;
685 uint32_t reg = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_REG);
686 uint32_t cNum = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM) >>
687 IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM;
689 /* error-check Context Register No. and Context Number values */
690 if (!(0 <= reg && reg < IX_NPEDL_CTXT_REG_MAX)) {
691 device_printf(sc->sc_dev,
692 "invalid Context Register %u\n", reg);
696 if (!(0 <= cNum && cNum < IX_NPEDL_CTXT_NUM_MAX)) {
697 device_printf(sc->sc_dev,
698 "invalid Context Number %u\n", cNum);
702 /* NOTE that there is no STEVT register for Context 0 */
703 if (cNum == 0 && reg == IX_NPEDL_CTXT_REG_STEVT) {
704 device_printf(sc->sc_dev,
705 "no STEVT for Context 0\n");
710 if (npe_ctx_reg_write(sc, cNum, reg, regVal, verify) != 0) {
711 device_printf(sc->sc_dev,
712 "write of state-info to NPE failed\n");
718 npe_cpu_step_restore(sc);
723 npe_load_image(struct ixpnpe_softc *sc,
724 const uint32_t *imageCodePtr, int verify)
726 #define EOM(marker) ((marker) == IX_NPEDL_END_OF_DOWNLOAD_MAP)
727 const IxNpeDlNpeMgrDownloadMap *downloadMap;
730 if (!npe_isstopped(sc)) { /* verify NPE is stopped */
731 device_printf(sc->sc_dev,
732 "cannot load image, NPE not stopped\n");
737 * Read Download Map, checking each block type and calling
738 * appropriate function to perform download
741 downloadMap = (const IxNpeDlNpeMgrDownloadMap *) imageCodePtr;
742 for (i = 0; !EOM(downloadMap->entry[i].eodmMarker); i++) {
743 /* calculate pointer to block to be downloaded */
744 const uint32_t *bp = imageCodePtr +
745 downloadMap->entry[i].block.offset;
746 switch (downloadMap->entry[i].block.type) {
747 case IX_NPEDL_BLOCK_TYPE_INSTRUCTION:
748 error = npe_load_ins(sc,
749 (const IxNpeDlNpeMgrCodeBlock *) bp, verify);
750 DPRINTF(sc->sc_dev, "%s: inst, error %d\n",
753 case IX_NPEDL_BLOCK_TYPE_DATA:
754 error = npe_load_data(sc,
755 (const IxNpeDlNpeMgrCodeBlock *) bp, verify);
756 DPRINTF(sc->sc_dev, "%s: data, error %d\n",
759 case IX_NPEDL_BLOCK_TYPE_STATE:
760 error = npe_load_stateinfo(sc,
761 (const IxNpeDlNpeMgrStateInfoBlock *) bp, verify);
762 DPRINTF(sc->sc_dev, "%s: state, error %d\n",
766 device_printf(sc->sc_dev,
767 "unknown block type 0x%x in download map\n",
768 downloadMap->entry[i].block.type);
769 error = EIO; /* XXX */
779 /* contains Reset values for Context Store Registers */
780 static const struct {
782 uint32_t regResetVal;
783 } ixNpeDlEcsRegResetValues[] = {
784 { IX_NPEDL_ECS_BG_CTXT_REG_0, IX_NPEDL_ECS_BG_CTXT_REG_0_RESET },
785 { IX_NPEDL_ECS_BG_CTXT_REG_1, IX_NPEDL_ECS_BG_CTXT_REG_1_RESET },
786 { IX_NPEDL_ECS_BG_CTXT_REG_2, IX_NPEDL_ECS_BG_CTXT_REG_2_RESET },
787 { IX_NPEDL_ECS_PRI_1_CTXT_REG_0, IX_NPEDL_ECS_PRI_1_CTXT_REG_0_RESET },
788 { IX_NPEDL_ECS_PRI_1_CTXT_REG_1, IX_NPEDL_ECS_PRI_1_CTXT_REG_1_RESET },
789 { IX_NPEDL_ECS_PRI_1_CTXT_REG_2, IX_NPEDL_ECS_PRI_1_CTXT_REG_2_RESET },
790 { IX_NPEDL_ECS_PRI_2_CTXT_REG_0, IX_NPEDL_ECS_PRI_2_CTXT_REG_0_RESET },
791 { IX_NPEDL_ECS_PRI_2_CTXT_REG_1, IX_NPEDL_ECS_PRI_2_CTXT_REG_1_RESET },
792 { IX_NPEDL_ECS_PRI_2_CTXT_REG_2, IX_NPEDL_ECS_PRI_2_CTXT_REG_2_RESET },
793 { IX_NPEDL_ECS_DBG_CTXT_REG_0, IX_NPEDL_ECS_DBG_CTXT_REG_0_RESET },
794 { IX_NPEDL_ECS_DBG_CTXT_REG_1, IX_NPEDL_ECS_DBG_CTXT_REG_1_RESET },
795 { IX_NPEDL_ECS_DBG_CTXT_REG_2, IX_NPEDL_ECS_DBG_CTXT_REG_2_RESET },
796 { IX_NPEDL_ECS_INSTRUCT_REG, IX_NPEDL_ECS_INSTRUCT_REG_RESET }
799 /* contains Reset values for Context Store Registers */
800 static const uint32_t ixNpeDlCtxtRegResetValues[] = {
801 IX_NPEDL_CTXT_REG_RESET_STEVT,
802 IX_NPEDL_CTXT_REG_RESET_STARTPC,
803 IX_NPEDL_CTXT_REG_RESET_REGMAP,
804 IX_NPEDL_CTXT_REG_RESET_CINDEX,
807 #define IX_NPEDL_PARITY_BIT_MASK 0x3F00FFFF
808 #define IX_NPEDL_CONFIG_CTRL_REG_MASK 0x3F3FFFFF
812 * Reset the NPE and its coprocessor using the
813 * fuse bits in the feature control register.
818 uint32_t mask = EXP_FCTRL_NPEA << npeid;
821 v = ixp4xx_read_feature_bits();
822 ixp4xx_write_feature_bits(v &~ mask);
823 /* un-fuse and un-reset the NPE & coprocessor */
824 ixp4xx_write_feature_bits(v | mask);
829 npe_cpu_reset(struct ixpnpe_softc *sc)
831 #define N(a) (sizeof(a) / sizeof(a[0]))
832 uint32_t ctxtReg; /* identifies Context Store reg (0-3) */
835 uint32_t ixNpeConfigCtrlRegVal;
838 /* pre-store the NPE Config Control Register Value */
839 ixNpeConfigCtrlRegVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL);
840 ixNpeConfigCtrlRegVal |= 0x3F000000;
842 /* disable the parity interrupt */
843 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL,
844 (ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK));
845 DPRINTFn(2, sc->sc_dev, "%s: dis parity int, CTL => 0x%x\n",
846 __func__, ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK);
848 npe_cpu_step_save(sc);
853 while (npe_checkbits(sc,
854 IX_NPEDL_REG_OFFSET_WFIFO, IX_NPEDL_MASK_WFIFO_VALID)) {
855 /* read from the Watch-point FIFO until empty */
856 (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WFIFO);
859 while (npe_checkbits(sc,
860 IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_OFNE)) {
861 /* read from the outFIFO until empty */
862 (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_FIFO);
865 while (npe_checkbits(sc,
866 IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_IFNE)) {
868 * Step execution of the NPE intruction to read inFIFO using
869 * the Debug Executing Context stack.
871 error = npe_cpu_step(sc, IX_NPEDL_INSTR_RD_FIFO, 0, 0);
873 DPRINTF(sc->sc_dev, "%s: cannot step (1), error %u\n",
875 npe_cpu_step_restore(sc);
881 * Reset the mailbox reg
883 /* ...from XScale side */
884 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_MBST, IX_NPEDL_REG_RESET_MBST);
885 /* ...from NPE side */
886 error = npe_cpu_step(sc, IX_NPEDL_INSTR_RESET_MBOX, 0, 0);
888 DPRINTF(sc->sc_dev, "%s: cannot step (2), error %u\n",
890 npe_cpu_step_restore(sc);
895 * Reset the physical registers in the NPE register file:
896 * Note: no need to save/restore REGMAP for Context 0 here
897 * since all Context Store regs are reset in subsequent code.
900 regAddr < IX_NPEDL_TOTAL_NUM_PHYS_REG && error == 0;
902 /* for each physical register in the NPE reg file, write 0 : */
903 error = npe_physical_reg_write(sc, regAddr, 0, TRUE);
905 DPRINTF(sc->sc_dev, "%s: cannot write phy reg,"
906 "error %u\n", __func__, error);
907 npe_cpu_step_restore(sc);
908 return error; /* abort reset */
913 * Reset the context store:
915 for (i = IX_NPEDL_CTXT_NUM_MIN; i <= IX_NPEDL_CTXT_NUM_MAX; i++) {
916 /* set each context's Context Store registers to reset values */
917 for (ctxtReg = 0; ctxtReg < IX_NPEDL_CTXT_REG_MAX; ctxtReg++) {
918 /* NOTE that there is no STEVT register for Context 0 */
919 if (i == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STEVT)
921 regVal = ixNpeDlCtxtRegResetValues[ctxtReg];
922 error = npe_ctx_reg_write(sc, i, ctxtReg,
925 DPRINTF(sc->sc_dev, "%s: cannot write ctx reg,"
926 "error %u\n", __func__, error);
927 npe_cpu_step_restore(sc);
928 return error; /* abort reset */
933 npe_cpu_step_restore(sc);
935 /* write Reset values to Execution Context Stack registers */
936 for (i = 0; i < N(ixNpeDlEcsRegResetValues); i++)
937 npe_ecs_reg_write(sc,
938 ixNpeDlEcsRegResetValues[i].regAddr,
939 ixNpeDlEcsRegResetValues[i].regResetVal);
941 /* clear the profile counter */
942 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT);
944 /* clear registers EXCT, AP0, AP1, AP2 and AP3 */
945 for (regAddr = IX_NPEDL_REG_OFFSET_EXCT;
946 regAddr <= IX_NPEDL_REG_OFFSET_AP3;
947 regAddr += sizeof(uint32_t))
948 npe_reg_write(sc, regAddr, 0);
950 /* Reset the Watch-count register */
951 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_WC, 0);
954 * WR IXA00055043 - Remove IMEM Parity Introduced by NPE Reset Operation
955 * XXX Removed because it breaks IXP435 operation; e.g. on Gateworks
956 * XXX 2358 boards reseting NPE-A after NPE-C is running causes both
957 * XXX npe's to stop working
959 npe_reset(sc->sc_npeid);
962 * Call NpeMgr function to stop the NPE again after the Feature Control
963 * has unfused and Un-Reset the NPE and its associated Coprocessors.
965 error = npe_cpu_stop(sc);
967 /* restore NPE configuration bus Control Register - Parity Settings */
968 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL,
969 (ixNpeConfigCtrlRegVal & IX_NPEDL_CONFIG_CTRL_REG_MASK));
970 DPRINTFn(2, sc->sc_dev, "%s: restore CTL => 0x%x\n",
971 __func__, npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL));
978 npe_cpu_start(struct ixpnpe_softc *sc)
983 * Ensure only Background Context Stack Level is Active by turning off
984 * the Active bit in each of the other Executing Context Stack levels.
986 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0);
987 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
988 npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0, ecsRegVal);
990 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0);
991 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
992 npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0, ecsRegVal);
994 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0);
995 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
996 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsRegVal);
998 /* clear the pipeline */
999 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
1001 /* start NPE execution by issuing cmd through EXCTL register on NPE */
1002 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_START);
1005 * Check execution status of NPE to verify operation was successful.
1007 return npe_checkbits(sc,
1008 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_RUN) ? 0 : EIO;
1012 npe_cpu_stop(struct ixpnpe_softc *sc)
1014 /* stop NPE execution by issuing cmd through EXCTL register on NPE */
1015 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STOP);
1017 /* verify that NPE Stop was successful */
1018 return npe_checkbits(sc,
1019 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP) ? 0 : EIO;
1022 #define IX_NPEDL_REG_SIZE_BYTE 8
1023 #define IX_NPEDL_REG_SIZE_SHORT 16
1024 #define IX_NPEDL_REG_SIZE_WORD 32
1027 * Introduce extra read cycles after issuing read command to NPE
1028 * so that we read the register after the NPE has updated it
1029 * This is to overcome race condition between XScale and NPE
1031 #define IX_NPEDL_DELAY_READ_CYCLES 2
1033 * To mask top three MSBs of 32bit word to download into NPE IMEM
1035 #define IX_NPEDL_MASK_UNUSED_IMEM_BITS 0x1FFFFFFF;
1038 npe_cmd_issue_write(struct ixpnpe_softc *sc,
1039 uint32_t cmd, uint32_t addr, uint32_t data)
1041 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, data);
1042 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr);
1043 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
1047 npe_cmd_issue_read(struct ixpnpe_softc *sc, uint32_t cmd, uint32_t addr)
1052 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr);
1053 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
1054 for (i = 0; i <= IX_NPEDL_DELAY_READ_CYCLES; i++)
1055 data = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA);
1060 npe_ins_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify)
1062 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data);
1063 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_INS_MEM, addr, data);
1068 * Write invalid data to this reg, so we can see if we're
1069 * reading the EXDATA register too early.
1071 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data);
1074 * Disabled since top 3 MSB are not used for Azusa
1075 * hardware Refer WR:IXA00053900
1077 data &= IX_NPEDL_MASK_UNUSED_IMEM_BITS;
1079 rdata = npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_INS_MEM,
1081 rdata &= IX_NPEDL_MASK_UNUSED_IMEM_BITS;
1090 npe_data_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify)
1092 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data);
1093 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_DATA_MEM, addr, data);
1096 * Write invalid data to this reg, so we can see if we're
1097 * reading the EXDATA register too early.
1099 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data);
1100 if (data != npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_DATA_MEM, addr))
1107 npe_ecs_reg_write(struct ixpnpe_softc *sc, uint32_t reg, uint32_t data)
1109 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_ECS_REG, reg, data);
1113 npe_ecs_reg_read(struct ixpnpe_softc *sc, uint32_t reg)
1115 return npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_ECS_REG, reg);
1119 npe_issue_cmd(struct ixpnpe_softc *sc, uint32_t command)
1121 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, command);
1125 npe_cpu_step_save(struct ixpnpe_softc *sc)
1127 /* turn off the halt bit by clearing Execution Count register. */
1128 /* save reg contents 1st and restore later */
1129 sc->savedExecCount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXCT);
1130 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, 0);
1132 /* ensure that IF and IE are on (temporarily), so that we don't end up
1133 * stepping forever */
1134 sc->savedEcsDbgCtxtReg2 = npe_ecs_reg_read(sc,
1135 IX_NPEDL_ECS_DBG_CTXT_REG_2);
1137 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2,
1138 (sc->savedEcsDbgCtxtReg2 | IX_NPEDL_MASK_ECS_DBG_REG_2_IF |
1139 IX_NPEDL_MASK_ECS_DBG_REG_2_IE));
1143 npe_cpu_step(struct ixpnpe_softc *sc, uint32_t npeInstruction,
1144 uint32_t ctxtNum, uint32_t ldur)
1146 #define IX_NPE_DL_MAX_NUM_OF_RETRIES 1000000
1147 uint32_t ecsDbgRegVal;
1148 uint32_t oldWatchcount, newWatchcount;
1151 /* set the Active bit, and the LDUR, in the debug level */
1152 ecsDbgRegVal = IX_NPEDL_MASK_ECS_REG_0_ACTIVE |
1153 (ldur << IX_NPEDL_OFFSET_ECS_REG_0_LDUR);
1155 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsDbgRegVal);
1158 * Set CCTXT at ECS DEBUG L3 to specify in which context to execute the
1159 * instruction, and set SELCTXT at ECS DEBUG Level to specify which
1160 * context store to access.
1161 * Debug ECS Level Reg 1 has form 0x000n000n, where n = context number
1163 ecsDbgRegVal = (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_CCTXT) |
1164 (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_SELCTXT);
1166 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_1, ecsDbgRegVal);
1168 /* clear the pipeline */
1169 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
1171 /* load NPE instruction into the instruction register */
1172 npe_ecs_reg_write(sc, IX_NPEDL_ECS_INSTRUCT_REG, npeInstruction);
1174 /* need this value later to wait for completion of NPE execution step */
1175 oldWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1177 /* issue a Step One command via the Execution Control register */
1178 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STEP);
1181 * Force the XScale to wait until the NPE has finished execution step
1182 * NOTE that this delay will be very small, just long enough to allow a
1183 * single NPE instruction to complete execution; if instruction
1184 * execution is not completed before timeout retries, exit the while
1187 newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1188 for (tries = 0; tries < IX_NPE_DL_MAX_NUM_OF_RETRIES &&
1189 newWatchcount == oldWatchcount; tries++) {
1190 /* Watch Count register incr's when NPE completes an inst */
1191 newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1193 return (tries < IX_NPE_DL_MAX_NUM_OF_RETRIES) ? 0 : EIO;
1194 #undef IX_NPE_DL_MAX_NUM_OF_RETRIES
1198 npe_cpu_step_restore(struct ixpnpe_softc *sc)
1200 /* clear active bit in debug level */
1201 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, 0);
1203 /* clear the pipeline */
1204 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
1206 /* restore Execution Count register contents. */
1207 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, sc->savedExecCount);
1209 /* restore IF and IE bits to original values */
1210 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2, sc->savedEcsDbgCtxtReg2);
1214 npe_logical_reg_read(struct ixpnpe_softc *sc,
1215 uint32_t regAddr, uint32_t regSize,
1216 uint32_t ctxtNum, uint32_t *regVal)
1218 uint32_t npeInstruction, mask;
1222 case IX_NPEDL_REG_SIZE_BYTE:
1223 npeInstruction = IX_NPEDL_INSTR_RD_REG_BYTE;
1226 case IX_NPEDL_REG_SIZE_SHORT:
1227 npeInstruction = IX_NPEDL_INSTR_RD_REG_SHORT;
1230 case IX_NPEDL_REG_SIZE_WORD:
1231 npeInstruction = IX_NPEDL_INSTR_RD_REG_WORD;
1238 /* make regAddr be the SRC and DEST operands (e.g. movX d0, d0) */
1239 npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_SRC) |
1240 (regAddr << IX_NPEDL_OFFSET_INSTR_DEST);
1242 /* step execution of NPE inst using Debug Executing Context stack */
1243 error = npe_cpu_step(sc, npeInstruction, ctxtNum,
1244 IX_NPEDL_RD_INSTR_LDUR);
1246 DPRINTF(sc->sc_dev, "%s(0x%x, %u, %u), cannot step, error %d\n",
1247 __func__, regAddr, regSize, ctxtNum, error);
1250 /* read value of register from Execution Data register */
1251 *regVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA);
1253 /* align value from left to right */
1254 *regVal = (*regVal >> (IX_NPEDL_REG_SIZE_WORD - regSize)) & mask;
1260 npe_logical_reg_write(struct ixpnpe_softc *sc, uint32_t regAddr, uint32_t regVal,
1261 uint32_t regSize, uint32_t ctxtNum, int verify)
1265 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x, %u, %u)\n",
1266 __func__, regAddr, regVal, regSize, ctxtNum);
1267 if (regSize == IX_NPEDL_REG_SIZE_WORD) {
1269 * NPE register addressing is left-to-right: e.g. |d0|d1|d2|d3|
1270 * Write upper half-word (short) to |d0|d1|
1272 error = npe_logical_reg_write(sc, regAddr,
1273 regVal >> IX_NPEDL_REG_SIZE_SHORT,
1274 IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify);
1278 /* Write lower half-word (short) to |d2|d3| */
1279 error = npe_logical_reg_write(sc,
1280 regAddr + sizeof(uint16_t),
1282 IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify);
1284 uint32_t npeInstruction;
1287 case IX_NPEDL_REG_SIZE_BYTE:
1288 npeInstruction = IX_NPEDL_INSTR_WR_REG_BYTE;
1291 case IX_NPEDL_REG_SIZE_SHORT:
1292 npeInstruction = IX_NPEDL_INSTR_WR_REG_SHORT;
1298 /* fill dest operand field of inst with dest reg addr */
1299 npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_DEST);
1301 /* fill src operand field of inst with least-sig 5 bits of val*/
1303 ((regVal & IX_NPEDL_MASK_IMMED_INSTR_SRC_DATA) <<
1304 IX_NPEDL_OFFSET_INSTR_SRC);
1306 /* fill coprocessor field of inst with most-sig 11 bits of val*/
1308 ((regVal & IX_NPEDL_MASK_IMMED_INSTR_COPROC_DATA) <<
1309 IX_NPEDL_DISPLACE_IMMED_INSTR_COPROC_DATA);
1311 /* step execution of NPE intruction using Debug ECS */
1312 error = npe_cpu_step(sc, npeInstruction,
1313 ctxtNum, IX_NPEDL_WR_INSTR_LDUR);
1316 DPRINTF(sc->sc_dev, "%s(0x%x, 0x%x, %u, %u), error %u "
1317 "writing reg\n", __func__, regAddr, regVal, regSize,
1324 error = npe_logical_reg_read(sc, regAddr, regSize, ctxtNum,
1326 if (error == 0 && regVal != retRegVal)
1327 error = EIO; /* XXX ambiguous */
1333 * There are 32 physical registers used in an NPE. These are
1334 * treated as 16 pairs of 32-bit registers. To write one of the pair,
1335 * write the pair number (0-16) to the REGMAP for Context 0. Then write
1336 * the value to register 0 or 4 in the regfile, depending on which
1337 * register of the pair is to be written
1340 npe_physical_reg_write(struct ixpnpe_softc *sc,
1341 uint32_t regAddr, uint32_t regValue, int verify)
1346 * Set REGMAP for context 0 to (regAddr >> 1) to choose which pair
1347 * (0-16) of physical registers to write .
1349 error = npe_logical_reg_write(sc, IX_NPEDL_CTXT_REG_ADDR_REGMAP,
1350 (regAddr >> IX_NPEDL_OFFSET_PHYS_REG_ADDR_REGMAP),
1351 IX_NPEDL_REG_SIZE_SHORT, 0, verify);
1353 /* regAddr = 0 or 4 */
1354 regAddr = (regAddr & IX_NPEDL_MASK_PHYS_REG_ADDR_LOGICAL_ADDR) *
1356 error = npe_logical_reg_write(sc, regAddr, regValue,
1357 IX_NPEDL_REG_SIZE_WORD, 0, verify);
1363 npe_ctx_reg_write(struct ixpnpe_softc *sc, uint32_t ctxtNum,
1364 uint32_t ctxtReg, uint32_t ctxtRegVal, int verify)
1366 DPRINTFn(4, sc->sc_dev, "%s(%u, %u, %u)\n",
1367 __func__, ctxtNum, ctxtReg, ctxtRegVal);
1369 * Context 0 has no STARTPC. Instead, this value is used to set
1370 * NextPC for Background ECS, to set where NPE starts executing code
1372 if (ctxtNum == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STARTPC) {
1373 /* read BG_CTXT_REG_0, update NEXTPC bits, & write back to reg*/
1374 uint32_t v = npe_ecs_reg_read(sc, IX_NPEDL_ECS_BG_CTXT_REG_0);
1375 v &= ~IX_NPEDL_MASK_ECS_REG_0_NEXTPC;
1376 v |= (ctxtRegVal << IX_NPEDL_OFFSET_ECS_REG_0_NEXTPC) &
1377 IX_NPEDL_MASK_ECS_REG_0_NEXTPC;
1379 npe_ecs_reg_write(sc, IX_NPEDL_ECS_BG_CTXT_REG_0, v);
1382 static const struct {
1383 uint32_t regAddress;
1385 } regAccInfo[IX_NPEDL_CTXT_REG_MAX] = {
1386 { IX_NPEDL_CTXT_REG_ADDR_STEVT,
1387 IX_NPEDL_REG_SIZE_BYTE },
1388 { IX_NPEDL_CTXT_REG_ADDR_STARTPC,
1389 IX_NPEDL_REG_SIZE_SHORT },
1390 { IX_NPEDL_CTXT_REG_ADDR_REGMAP,
1391 IX_NPEDL_REG_SIZE_SHORT },
1392 { IX_NPEDL_CTXT_REG_ADDR_CINDEX,
1393 IX_NPEDL_REG_SIZE_BYTE }
1395 return npe_logical_reg_write(sc, regAccInfo[ctxtReg].regAddress,
1396 ctxtRegVal, regAccInfo[ctxtReg].regSize, ctxtNum, verify);
1401 * NPE Mailbox support.
1403 #define IX_NPEMH_MAXTRIES 100000
1406 ofifo_wait(struct ixpnpe_softc *sc)
1410 for (i = 0; i < IX_NPEMH_MAXTRIES; i++) {
1411 if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_OFNE)
1415 device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n",
1416 __func__, npe_reg_read(sc, IX_NPESTAT));
1421 getmsg(struct ixpnpe_softc *sc, uint32_t msg[2])
1423 mtx_assert(&sc->sc_mtx, MA_OWNED);
1425 if (!ofifo_wait(sc))
1427 msg[0] = npe_reg_read(sc, IX_NPEFIFO);
1428 DPRINTF(sc->sc_dev, "%s: msg0 0x%x\n", __func__, msg[0]);
1429 if (!ofifo_wait(sc))
1431 msg[1] = npe_reg_read(sc, IX_NPEFIFO);
1432 DPRINTF(sc->sc_dev, "%s: msg1 0x%x\n", __func__, msg[1]);
1437 ixpnpe_intr(void *arg)
1439 struct ixpnpe_softc *sc = arg;
1442 mtx_lock(&sc->sc_mtx);
1443 status = npe_reg_read(sc, IX_NPESTAT);
1444 DPRINTF(sc->sc_dev, "%s: status 0x%x\n", __func__, status);
1445 if ((status & IX_NPESTAT_OFINT) == 0) {
1446 /* NB: should not happen */
1447 device_printf(sc->sc_dev, "%s: status 0x%x\n",
1449 /* XXX must silence interrupt? */
1450 mtx_unlock(&sc->sc_mtx);
1454 * A message is waiting in the output FIFO, copy it so
1455 * the interrupt will be silenced.
1457 if (getmsg(sc, sc->sc_msg) == 0)
1458 sc->sc_msgwaiting = 1;
1459 mtx_unlock(&sc->sc_mtx);
1463 ififo_wait(struct ixpnpe_softc *sc)
1467 for (i = 0; i < IX_NPEMH_MAXTRIES; i++) {
1468 if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_IFNF)
1472 device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n",
1473 __func__, npe_reg_read(sc, IX_NPESTAT));
1478 putmsg(struct ixpnpe_softc *sc, const uint32_t msg[2])
1480 mtx_assert(&sc->sc_mtx, MA_OWNED);
1482 DPRINTF(sc->sc_dev, "%s: msg 0x%x:0x%x\n", __func__, msg[0], msg[1]);
1483 if (!ififo_wait(sc))
1485 npe_reg_write(sc, IX_NPEFIFO, msg[0]);
1486 if (!ififo_wait(sc))
1488 npe_reg_write(sc, IX_NPEFIFO, msg[1]);
1494 * Send a msg to the NPE and wait for a reply. We spin as
1495 * we may be called early with interrupts not properly setup.
1498 ixpnpe_sendandrecvmsg_sync(struct ixpnpe_softc *sc,
1499 const uint32_t send[2], uint32_t recv[2])
1503 mtx_lock(&sc->sc_mtx);
1504 error = putmsg(sc, send);
1506 error = getmsg(sc, recv);
1507 mtx_unlock(&sc->sc_mtx);
1513 * Send a msg to the NPE w/o waiting for a reply.
1516 ixpnpe_sendmsg_async(struct ixpnpe_softc *sc, const uint32_t msg[2])
1520 mtx_lock(&sc->sc_mtx);
1521 error = putmsg(sc, msg);
1522 mtx_unlock(&sc->sc_mtx);
1528 recvmsg_locked(struct ixpnpe_softc *sc, uint32_t msg[2])
1530 mtx_assert(&sc->sc_mtx, MA_OWNED);
1532 DPRINTF(sc->sc_dev, "%s: msgwaiting %d\n", __func__, sc->sc_msgwaiting);
1533 if (sc->sc_msgwaiting) {
1534 msg[0] = sc->sc_msg[0];
1535 msg[1] = sc->sc_msg[1];
1536 sc->sc_msgwaiting = 0;
1543 * Receive any msg previously received from the NPE. If nothing
1544 * is available we return EAGAIN and the caller is required to
1545 * do a synchronous receive or try again later.
1548 ixpnpe_recvmsg_async(struct ixpnpe_softc *sc, uint32_t msg[2])
1552 mtx_lock(&sc->sc_mtx);
1553 error = recvmsg_locked(sc, msg);
1554 mtx_unlock(&sc->sc_mtx);
1560 * Receive a msg from the NPE. If one was received asynchronously
1561 * then it's returned; otherwise we poll synchronously.
1564 ixpnpe_recvmsg_sync(struct ixpnpe_softc *sc, uint32_t msg[2])
1568 mtx_lock(&sc->sc_mtx);
1569 error = recvmsg_locked(sc, msg);
1570 if (error == EAGAIN)
1571 error = getmsg(sc, msg);
1572 mtx_unlock(&sc->sc_mtx);