2 * Copyright (c) 2006-2008 Sam Leffler, Errno Consulting
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
31 * Copyright (c) 2001-2005, Intel Corporation.
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 * 3. Neither the name of the Intel Corporation nor the names of its contributors
43 * may be used to endorse or promote products derived from this software
44 * without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
63 * Intel XScale Network Processing Engine (NPE) support.
65 * Each NPE has an ixpnpeX device associated with it that is
66 * attached at boot. Depending on the microcode loaded into
67 * an NPE there may be an Ethernet interface (npeX) or some
68 * other network interface (e.g. for ATM). This file has support
69 * for loading microcode images and the associated NPE CPU
70 * manipulations (start, stop, reset).
72 * The code here basically replaces the npeDl and npeMh classes
73 * in the Intel Access Library (IAL).
75 * NB: Microcode images are loaded with firmware(9). To
76 * include microcode in a static kernel include the
77 * ixpnpe_fw device. Otherwise the firmware will be
78 * automatically loaded from the filesystem.
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/malloc.h>
84 #include <sys/module.h>
87 #include <sys/resource.h>
89 #include <sys/sysctl.h>
91 #include <sys/linker.h>
92 #include <sys/firmware.h>
94 #include <machine/bus.h>
95 #include <machine/cpu.h>
96 #include <machine/cpufunc.h>
97 #include <machine/resource.h>
98 #include <machine/intr.h>
99 #include <arm/xscale/ixp425/ixp425reg.h>
100 #include <arm/xscale/ixp425/ixp425var.h>
102 #include <arm/xscale/ixp425/ixp425_npereg.h>
103 #include <arm/xscale/ixp425/ixp425_npevar.h>
105 struct ixpnpe_softc {
107 bus_space_tag_t sc_iot;
108 bus_space_handle_t sc_ioh;
109 bus_size_t sc_size; /* size of mapped register window */
110 struct resource *sc_irq; /* IRQ resource */
111 void *sc_ih; /* interrupt handler */
112 struct mtx sc_mtx; /* mailbox lock */
113 uint32_t sc_msg[2]; /* reply msg collected in ixpnpe_intr */
114 int sc_msgwaiting; /* sc_msg holds valid data */
116 int sc_nrefs; /* # of references */
118 int validImage; /* valid ucode image loaded */
119 int started; /* NPE is started */
120 uint8_t functionalityId;/* ucode functionality ID */
121 int insMemSize; /* size of instruction memory */
122 int dataMemSize; /* size of data memory */
123 uint32_t savedExecCount;
124 uint32_t savedEcsDbgCtxtReg2;
126 static struct ixpnpe_softc *npes[NPE_MAX];
128 #define IX_NPEDL_NPEIMAGE_FIELD_MASK 0xff
130 /* used to read download map from version in microcode image */
131 #define IX_NPEDL_BLOCK_TYPE_INSTRUCTION 0x00000000
132 #define IX_NPEDL_BLOCK_TYPE_DATA 0x00000001
133 #define IX_NPEDL_BLOCK_TYPE_STATE 0x00000002
134 #define IX_NPEDL_END_OF_DOWNLOAD_MAP 0x0000000F
137 * masks used to extract address info from State information context
138 * register addresses as read from microcode image
140 #define IX_NPEDL_MASK_STATE_ADDR_CTXT_REG 0x0000000F
141 #define IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM 0x000000F0
143 /* LSB offset of Context Number field in State-Info Context Address */
144 #define IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM 4
146 /* size (in words) of single State Information entry (ctxt reg address|data) */
147 #define IX_NPEDL_STATE_INFO_ENTRY_SIZE 2
152 } IxNpeDlNpeMgrDownloadMapBlockEntry;
155 IxNpeDlNpeMgrDownloadMapBlockEntry block;
157 } IxNpeDlNpeMgrDownloadMapEntry;
160 /* 1st entry in the download map (there may be more than one) */
161 IxNpeDlNpeMgrDownloadMapEntry entry[1];
162 } IxNpeDlNpeMgrDownloadMap;
164 /* used to access an instruction or data block in a microcode image */
166 uint32_t npeMemAddress;
169 } IxNpeDlNpeMgrCodeBlock;
171 /* used to access each Context Reg entry state-information block */
173 uint32_t addressInfo;
175 } IxNpeDlNpeMgrStateInfoCtxtRegEntry;
177 /* used to access a state-information block in a microcode image */
180 IxNpeDlNpeMgrStateInfoCtxtRegEntry ctxtRegEntry[1];
181 } IxNpeDlNpeMgrStateInfoBlock;
183 static int npe_debug = 0;
184 SYSCTL_INT(_debug, OID_AUTO, ixp425npe, CTLFLAG_RWTUN, &npe_debug,
185 0, "IXP4XX NPE debug msgs");
186 #define DPRINTF(dev, fmt, ...) do { \
187 if (npe_debug) device_printf(dev, fmt, __VA_ARGS__); \
189 #define DPRINTFn(n, dev, fmt, ...) do { \
190 if (npe_debug >= n) printf(fmt, __VA_ARGS__); \
193 static int npe_checkbits(struct ixpnpe_softc *, uint32_t reg, uint32_t);
194 static int npe_isstopped(struct ixpnpe_softc *);
195 static int npe_load_ins(struct ixpnpe_softc *,
196 const IxNpeDlNpeMgrCodeBlock *bp, int verify);
197 static int npe_load_data(struct ixpnpe_softc *,
198 const IxNpeDlNpeMgrCodeBlock *bp, int verify);
199 static int npe_load_stateinfo(struct ixpnpe_softc *,
200 const IxNpeDlNpeMgrStateInfoBlock *bp, int verify);
201 static int npe_load_image(struct ixpnpe_softc *,
202 const uint32_t *imageCodePtr, int verify);
203 static int npe_cpu_reset(struct ixpnpe_softc *);
204 static int npe_cpu_start(struct ixpnpe_softc *);
205 static int npe_cpu_stop(struct ixpnpe_softc *);
206 static void npe_cmd_issue_write(struct ixpnpe_softc *,
207 uint32_t cmd, uint32_t addr, uint32_t data);
208 static uint32_t npe_cmd_issue_read(struct ixpnpe_softc *,
209 uint32_t cmd, uint32_t addr);
210 static int npe_ins_write(struct ixpnpe_softc *,
211 uint32_t addr, uint32_t data, int verify);
212 static int npe_data_write(struct ixpnpe_softc *,
213 uint32_t addr, uint32_t data, int verify);
214 static void npe_ecs_reg_write(struct ixpnpe_softc *,
215 uint32_t reg, uint32_t data);
216 static uint32_t npe_ecs_reg_read(struct ixpnpe_softc *, uint32_t reg);
217 static void npe_issue_cmd(struct ixpnpe_softc *, uint32_t command);
218 static void npe_cpu_step_save(struct ixpnpe_softc *);
219 static int npe_cpu_step(struct ixpnpe_softc *, uint32_t npeInstruction,
220 uint32_t ctxtNum, uint32_t ldur);
221 static void npe_cpu_step_restore(struct ixpnpe_softc *);
222 static int npe_logical_reg_read(struct ixpnpe_softc *,
223 uint32_t regAddr, uint32_t regSize,
224 uint32_t ctxtNum, uint32_t *regVal);
225 static int npe_logical_reg_write(struct ixpnpe_softc *,
226 uint32_t regAddr, uint32_t regVal,
227 uint32_t regSize, uint32_t ctxtNum, int verify);
228 static int npe_physical_reg_write(struct ixpnpe_softc *,
229 uint32_t regAddr, uint32_t regValue, int verify);
230 static int npe_ctx_reg_write(struct ixpnpe_softc *, uint32_t ctxtNum,
231 uint32_t ctxtReg, uint32_t ctxtRegVal, int verify);
233 static void ixpnpe_intr(void *arg);
236 npe_reg_read(struct ixpnpe_softc *sc, bus_size_t off)
238 uint32_t v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, off);
239 DPRINTFn(9, sc->sc_dev, "%s(0x%lx) => 0x%x\n", __func__, off, v);
244 npe_reg_write(struct ixpnpe_softc *sc, bus_size_t off, uint32_t val)
246 DPRINTFn(9, sc->sc_dev, "%s(0x%lx, 0x%x)\n", __func__, off, val);
247 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
250 struct ixpnpe_softc *
251 ixpnpe_attach(device_t dev, int npeid)
257 uint32_t ins_memsize;
258 uint32_t data_memsize;
260 static const struct npeconfig npeconfigs[NPE_MAX] = {
262 .base = IXP425_NPE_A_HWBASE,
263 .size = IXP425_NPE_A_SIZE,
264 .irq = IXP425_INT_NPE_A,
265 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEA,
266 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEA
269 .base = IXP425_NPE_B_HWBASE,
270 .size = IXP425_NPE_B_SIZE,
271 .irq = IXP425_INT_NPE_B,
272 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEB,
273 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEB
276 .base = IXP425_NPE_C_HWBASE,
277 .size = IXP425_NPE_C_SIZE,
278 .irq = IXP425_INT_NPE_C,
279 .ins_memsize = IX_NPEDL_INS_MEMSIZE_WORDS_NPEC,
280 .data_memsize = IX_NPEDL_DATA_MEMSIZE_WORDS_NPEC
283 struct ixp425_softc *sa = device_get_softc(device_get_parent(dev));
284 struct ixpnpe_softc *sc;
285 const struct npeconfig *config;
288 if (npeid >= NPE_MAX) {
289 device_printf(dev, "%s: bad npeid %d\n", __func__, npeid);
297 config = &npeconfigs[npeid];
300 sc = malloc(sizeof(struct ixpnpe_softc), M_TEMP, M_WAITOK | M_ZERO);
302 sc->sc_iot = sa->sc_iot;
303 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "npe driver", MTX_DEF);
304 sc->sc_npeid = npeid;
307 sc->sc_size = config->size;
308 if (cpu_is_ixp42x()) {
309 /* NB: instruction/data memory sizes are NPE-dependent */
310 sc->insMemSize = config->ins_memsize;
311 sc->dataMemSize = config->data_memsize;
313 sc->insMemSize = IXP46X_NPEDL_INS_MEMSIZE_WORDS;
314 sc->dataMemSize = IXP46X_NPEDL_DATA_MEMSIZE_WORDS;
317 if (bus_space_map(sc->sc_iot, config->base, sc->sc_size, 0, &sc->sc_ioh))
318 panic("%s: Cannot map registers", device_get_name(dev));
321 * Setup IRQ and handler for NPE message support.
324 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
325 config->irq, config->irq, 1, RF_ACTIVE);
326 if (sc->sc_irq == NULL)
327 panic("%s: Unable to allocate irq %u", device_get_name(dev),
329 /* XXX could be a source of entropy */
330 bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
331 NULL, ixpnpe_intr, sc, &sc->sc_ih);
333 * Enable output fifo interrupts (NB: must also set OFIFO Write Enable)
335 npe_reg_write(sc, IX_NPECTL,
336 npe_reg_read(sc, IX_NPECTL) | (IX_NPECTL_OFE | IX_NPECTL_OFWE));
344 ixpnpe_detach(struct ixpnpe_softc *sc)
346 if (--sc->sc_nrefs == 0) {
347 npes[sc->sc_npeid] = NULL;
349 /* disable output fifo interrupts */
350 npe_reg_write(sc, IX_NPECTL,
351 npe_reg_read(sc, IX_NPECTL) &~ (IX_NPECTL_OFE | IX_NPECTL_OFWE));
353 bus_teardown_intr(sc->sc_dev, sc->sc_irq, sc->sc_ih);
354 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_size);
355 mtx_destroy(&sc->sc_mtx);
361 ixpnpe_stopandreset(struct ixpnpe_softc *sc)
365 mtx_lock(&sc->sc_mtx);
366 error = npe_cpu_stop(sc); /* stop NPE */
368 error = npe_cpu_reset(sc); /* reset it */
370 sc->started = 0; /* mark stopped */
371 mtx_unlock(&sc->sc_mtx);
373 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
378 ixpnpe_start_locked(struct ixpnpe_softc *sc)
383 error = npe_cpu_start(sc);
389 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
394 ixpnpe_start(struct ixpnpe_softc *sc)
398 mtx_lock(&sc->sc_mtx);
399 ret = ixpnpe_start_locked(sc);
400 mtx_unlock(&sc->sc_mtx);
405 ixpnpe_stop(struct ixpnpe_softc *sc)
409 mtx_lock(&sc->sc_mtx);
410 error = npe_cpu_stop(sc);
413 mtx_unlock(&sc->sc_mtx);
415 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
420 * Indicates the start of an NPE Image, in new NPE Image Library format.
421 * 2 consecutive occurrences indicates the end of the NPE Image Library
423 #define NPE_IMAGE_MARKER 0xfeedf00d
426 * NPE Image Header definition, used in new NPE Image Library format
432 } IxNpeDlImageMgrImageHeader;
435 npe_findimage(struct ixpnpe_softc *sc,
436 const uint32_t *imageLibrary, uint32_t imageId,
437 const uint32_t **imagePtr, uint32_t *imageSize)
439 const IxNpeDlImageMgrImageHeader *image;
442 while (imageLibrary[offset] == NPE_IMAGE_MARKER) {
443 image = (const IxNpeDlImageMgrImageHeader *)
444 &imageLibrary[offset];
445 offset += sizeof(IxNpeDlImageMgrImageHeader)/sizeof(uint32_t);
447 DPRINTF(sc->sc_dev, "%s: off %u mark 0x%x id 0x%x size %u\n",
448 __func__, offset, image->marker, image->id, image->size);
449 if (image->id == imageId) {
450 *imagePtr = imageLibrary + offset;
451 *imageSize = image->size;
454 /* 2 consecutive NPE_IMAGE_MARKER's indicates end of library */
455 if (image->id == NPE_IMAGE_MARKER) {
456 DPRINTF(sc->sc_dev, "imageId 0x%08x not found in "
457 "image library header\n", imageId);
458 /* reached end of library, image not found */
461 offset += image->size;
467 ixpnpe_load_firmware(struct ixpnpe_softc *sc, const char *imageName,
470 static const char *devname[4] =
471 { "IXP425", "IXP435/IXP465", "DeviceID#2", "DeviceID#3" };
473 const uint32_t *imageCodePtr;
474 const struct firmware *fw;
477 DPRINTF(sc->sc_dev, "load %s, imageId 0x%08x\n", imageName, imageId);
480 IxFeatureCtrlDeviceId devid = IX_NPEDL_DEVICEID_FROM_IMAGEID_GET(imageId);
482 * Checking if image being loaded is meant for device that is running.
483 * Image is forward compatible. i.e Image built for IXP42X should run
484 * on IXP46X but not vice versa.
486 if (devid > (ixFeatureCtrlDeviceRead() & IX_FEATURE_CTRL_DEVICE_TYPE_MASK))
489 error = ixpnpe_stopandreset(sc); /* stop and reset the NPE */
493 fw = firmware_get(imageName);
497 /* Locate desired image in files w/ combined images */
498 error = npe_findimage(sc, fw->data, imageId, &imageCodePtr, &imageSize);
502 device_printf(sc->sc_dev,
503 "load fw image %s.NPE-%c Func 0x%x Rev %u.%u\n",
504 devname[NPEIMAGE_DEVID(imageId)], 'A' + NPEIMAGE_NPEID(imageId),
505 NPEIMAGE_FUNCID(imageId), NPEIMAGE_MAJOR(imageId),
506 NPEIMAGE_MINOR(imageId));
509 * If download was successful, store image Id in list of
510 * currently loaded images. If a critical error occurred
511 * during download, record that the NPE has an invalid image
513 mtx_lock(&sc->sc_mtx);
514 error = npe_load_image(sc, imageCodePtr, 1 /*VERIFY*/);
517 error = ixpnpe_start_locked(sc);
521 sc->functionalityId = IX_NPEDL_FUNCTIONID_FROM_IMAGEID_GET(imageId);
522 mtx_unlock(&sc->sc_mtx);
524 firmware_put(fw, FIRMWARE_UNLOAD);
525 DPRINTF(sc->sc_dev, "%s: error %d\n", __func__, error);
530 override_imageid(device_t dev, const char *resname, uint32_t *val)
532 int unit = device_get_unit(dev);
535 if (resource_int_value("npe", unit, resname, &resval) != 0)
539 device_printf(dev, "using npe.%d.%s=0x%x override\n",
540 unit, resname, resval);
546 ixpnpe_init(struct ixpnpe_softc *sc)
548 static const uint32_t npeconfig[NPE_MAX] = {
549 [NPE_A] = IXP425_NPE_A_IMAGEID,
550 [NPE_B] = IXP425_NPE_B_IMAGEID,
551 [NPE_C] = IXP425_NPE_C_IMAGEID,
553 uint32_t imageid, msg[2];
559 * Load NPE firmware and start it running. We assume
560 * that minor version bumps remain compatible so probe
561 * the firmware image starting with the expected version
562 * and then bump the minor version up to the max.
564 if (!override_imageid(sc->sc_dev, "imageid", &imageid))
565 imageid = npeconfig[sc->sc_npeid];
567 error = ixpnpe_load_firmware(sc, "npe_fw", imageid);
571 * ESRCH is returned when the requested image
574 if (error != ESRCH) {
575 device_printf(sc->sc_dev,
576 "cannot init NPE (error %d)\n", error);
579 /* bump the minor version up to the max possible */
580 if (NPEIMAGE_MINOR(imageid) == 0xff) {
581 device_printf(sc->sc_dev, "cannot locate firmware "
582 "(imageid 0x%08x)\n", imageid);
587 /* NB: firmware should respond with a status msg */
588 if (ixpnpe_recvmsg_sync(sc, msg) != 0) {
589 device_printf(sc->sc_dev,
590 "firmware did not respond as expected\n");
597 ixpnpe_getfunctionality(struct ixpnpe_softc *sc)
599 return (sc->validImage ? sc->functionalityId : 0);
603 npe_checkbits(struct ixpnpe_softc *sc, uint32_t reg, uint32_t expectedBitsSet)
607 val = npe_reg_read(sc, reg);
608 DPRINTFn(5, sc->sc_dev, "%s(0x%x, 0x%x) => 0x%x (%u)\n",
609 __func__, reg, expectedBitsSet, val,
610 (val & expectedBitsSet) == expectedBitsSet);
611 return ((val & expectedBitsSet) == expectedBitsSet);
615 npe_isstopped(struct ixpnpe_softc *sc)
617 return npe_checkbits(sc,
618 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP);
622 npe_load_ins(struct ixpnpe_softc *sc,
623 const IxNpeDlNpeMgrCodeBlock *bp, int verify)
625 uint32_t npeMemAddress;
628 npeMemAddress = bp->npeMemAddress;
629 blockSize = bp->size; /* NB: instruction/data count */
630 if (npeMemAddress + blockSize > sc->insMemSize) {
631 device_printf(sc->sc_dev,
632 "Block size %u too big for NPE memory\n", blockSize);
633 return EINVAL; /* XXX */
635 for (i = 0; i < blockSize; i++, npeMemAddress++) {
636 if (npe_ins_write(sc, npeMemAddress, bp->data[i], verify) != 0) {
637 device_printf(sc->sc_dev,
638 "NPE instruction write failed");
646 npe_load_data(struct ixpnpe_softc *sc,
647 const IxNpeDlNpeMgrCodeBlock *bp, int verify)
649 uint32_t npeMemAddress;
652 npeMemAddress = bp->npeMemAddress;
653 blockSize = bp->size; /* NB: instruction/data count */
654 if (npeMemAddress + blockSize > sc->dataMemSize) {
655 device_printf(sc->sc_dev,
656 "Block size %u too big for NPE memory\n", blockSize);
659 for (i = 0; i < blockSize; i++, npeMemAddress++) {
660 if (npe_data_write(sc, npeMemAddress, bp->data[i], verify) != 0) {
661 device_printf(sc->sc_dev, "NPE data write failed\n");
669 npe_load_stateinfo(struct ixpnpe_softc *sc,
670 const IxNpeDlNpeMgrStateInfoBlock *bp, int verify)
672 int i, nentries, error;
674 npe_cpu_step_save(sc);
676 /* for each state-info context register entry in block */
677 nentries = bp->size / IX_NPEDL_STATE_INFO_ENTRY_SIZE;
679 for (i = 0; i < nentries; i++) {
680 /* each state-info entry is 2 words (address, value) */
681 uint32_t regVal = bp->ctxtRegEntry[i].value;
682 uint32_t addrInfo = bp->ctxtRegEntry[i].addressInfo;
684 uint32_t reg = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_REG);
685 uint32_t cNum = (addrInfo & IX_NPEDL_MASK_STATE_ADDR_CTXT_NUM) >>
686 IX_NPEDL_OFFSET_STATE_ADDR_CTXT_NUM;
688 /* error-check Context Register No. and Context Number values */
689 if (!(0 <= reg && reg < IX_NPEDL_CTXT_REG_MAX)) {
690 device_printf(sc->sc_dev,
691 "invalid Context Register %u\n", reg);
695 if (!(0 <= cNum && cNum < IX_NPEDL_CTXT_NUM_MAX)) {
696 device_printf(sc->sc_dev,
697 "invalid Context Number %u\n", cNum);
701 /* NOTE that there is no STEVT register for Context 0 */
702 if (cNum == 0 && reg == IX_NPEDL_CTXT_REG_STEVT) {
703 device_printf(sc->sc_dev,
704 "no STEVT for Context 0\n");
709 if (npe_ctx_reg_write(sc, cNum, reg, regVal, verify) != 0) {
710 device_printf(sc->sc_dev,
711 "write of state-info to NPE failed\n");
717 npe_cpu_step_restore(sc);
722 npe_load_image(struct ixpnpe_softc *sc,
723 const uint32_t *imageCodePtr, int verify)
725 #define EOM(marker) ((marker) == IX_NPEDL_END_OF_DOWNLOAD_MAP)
726 const IxNpeDlNpeMgrDownloadMap *downloadMap;
729 if (!npe_isstopped(sc)) { /* verify NPE is stopped */
730 device_printf(sc->sc_dev,
731 "cannot load image, NPE not stopped\n");
736 * Read Download Map, checking each block type and calling
737 * appropriate function to perform download
740 downloadMap = (const IxNpeDlNpeMgrDownloadMap *) imageCodePtr;
741 for (i = 0; !EOM(downloadMap->entry[i].eodmMarker); i++) {
742 /* calculate pointer to block to be downloaded */
743 const uint32_t *bp = imageCodePtr +
744 downloadMap->entry[i].block.offset;
745 switch (downloadMap->entry[i].block.type) {
746 case IX_NPEDL_BLOCK_TYPE_INSTRUCTION:
747 error = npe_load_ins(sc,
748 (const IxNpeDlNpeMgrCodeBlock *) bp, verify);
749 DPRINTF(sc->sc_dev, "%s: inst, error %d\n",
752 case IX_NPEDL_BLOCK_TYPE_DATA:
753 error = npe_load_data(sc,
754 (const IxNpeDlNpeMgrCodeBlock *) bp, verify);
755 DPRINTF(sc->sc_dev, "%s: data, error %d\n",
758 case IX_NPEDL_BLOCK_TYPE_STATE:
759 error = npe_load_stateinfo(sc,
760 (const IxNpeDlNpeMgrStateInfoBlock *) bp, verify);
761 DPRINTF(sc->sc_dev, "%s: state, error %d\n",
765 device_printf(sc->sc_dev,
766 "unknown block type 0x%x in download map\n",
767 downloadMap->entry[i].block.type);
768 error = EIO; /* XXX */
778 /* contains Reset values for Context Store Registers */
779 static const struct {
781 uint32_t regResetVal;
782 } ixNpeDlEcsRegResetValues[] = {
783 { IX_NPEDL_ECS_BG_CTXT_REG_0, IX_NPEDL_ECS_BG_CTXT_REG_0_RESET },
784 { IX_NPEDL_ECS_BG_CTXT_REG_1, IX_NPEDL_ECS_BG_CTXT_REG_1_RESET },
785 { IX_NPEDL_ECS_BG_CTXT_REG_2, IX_NPEDL_ECS_BG_CTXT_REG_2_RESET },
786 { IX_NPEDL_ECS_PRI_1_CTXT_REG_0, IX_NPEDL_ECS_PRI_1_CTXT_REG_0_RESET },
787 { IX_NPEDL_ECS_PRI_1_CTXT_REG_1, IX_NPEDL_ECS_PRI_1_CTXT_REG_1_RESET },
788 { IX_NPEDL_ECS_PRI_1_CTXT_REG_2, IX_NPEDL_ECS_PRI_1_CTXT_REG_2_RESET },
789 { IX_NPEDL_ECS_PRI_2_CTXT_REG_0, IX_NPEDL_ECS_PRI_2_CTXT_REG_0_RESET },
790 { IX_NPEDL_ECS_PRI_2_CTXT_REG_1, IX_NPEDL_ECS_PRI_2_CTXT_REG_1_RESET },
791 { IX_NPEDL_ECS_PRI_2_CTXT_REG_2, IX_NPEDL_ECS_PRI_2_CTXT_REG_2_RESET },
792 { IX_NPEDL_ECS_DBG_CTXT_REG_0, IX_NPEDL_ECS_DBG_CTXT_REG_0_RESET },
793 { IX_NPEDL_ECS_DBG_CTXT_REG_1, IX_NPEDL_ECS_DBG_CTXT_REG_1_RESET },
794 { IX_NPEDL_ECS_DBG_CTXT_REG_2, IX_NPEDL_ECS_DBG_CTXT_REG_2_RESET },
795 { IX_NPEDL_ECS_INSTRUCT_REG, IX_NPEDL_ECS_INSTRUCT_REG_RESET }
798 /* contains Reset values for Context Store Registers */
799 static const uint32_t ixNpeDlCtxtRegResetValues[] = {
800 IX_NPEDL_CTXT_REG_RESET_STEVT,
801 IX_NPEDL_CTXT_REG_RESET_STARTPC,
802 IX_NPEDL_CTXT_REG_RESET_REGMAP,
803 IX_NPEDL_CTXT_REG_RESET_CINDEX,
806 #define IX_NPEDL_PARITY_BIT_MASK 0x3F00FFFF
807 #define IX_NPEDL_CONFIG_CTRL_REG_MASK 0x3F3FFFFF
811 * Reset the NPE and its coprocessor using the
812 * fuse bits in the feature control register.
817 uint32_t mask = EXP_FCTRL_NPEA << npeid;
820 v = ixp4xx_read_feature_bits();
821 ixp4xx_write_feature_bits(v &~ mask);
822 /* un-fuse and un-reset the NPE & coprocessor */
823 ixp4xx_write_feature_bits(v | mask);
828 npe_cpu_reset(struct ixpnpe_softc *sc)
830 #define N(a) (sizeof(a) / sizeof(a[0]))
831 uint32_t ctxtReg; /* identifies Context Store reg (0-3) */
834 uint32_t ixNpeConfigCtrlRegVal;
837 /* pre-store the NPE Config Control Register Value */
838 ixNpeConfigCtrlRegVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL);
839 ixNpeConfigCtrlRegVal |= 0x3F000000;
841 /* disable the parity interrupt */
842 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL,
843 (ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK));
844 DPRINTFn(2, sc->sc_dev, "%s: dis parity int, CTL => 0x%x\n",
845 __func__, ixNpeConfigCtrlRegVal & IX_NPEDL_PARITY_BIT_MASK);
847 npe_cpu_step_save(sc);
852 while (npe_checkbits(sc,
853 IX_NPEDL_REG_OFFSET_WFIFO, IX_NPEDL_MASK_WFIFO_VALID)) {
854 /* read from the Watch-point FIFO until empty */
855 (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WFIFO);
858 while (npe_checkbits(sc,
859 IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_OFNE)) {
860 /* read from the outFIFO until empty */
861 (void) npe_reg_read(sc, IX_NPEDL_REG_OFFSET_FIFO);
864 while (npe_checkbits(sc,
865 IX_NPEDL_REG_OFFSET_STAT, IX_NPEDL_MASK_STAT_IFNE)) {
867 * Step execution of the NPE instruction to read inFIFO using
868 * the Debug Executing Context stack.
870 error = npe_cpu_step(sc, IX_NPEDL_INSTR_RD_FIFO, 0, 0);
872 DPRINTF(sc->sc_dev, "%s: cannot step (1), error %u\n",
874 npe_cpu_step_restore(sc);
880 * Reset the mailbox reg
882 /* ...from XScale side */
883 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_MBST, IX_NPEDL_REG_RESET_MBST);
884 /* ...from NPE side */
885 error = npe_cpu_step(sc, IX_NPEDL_INSTR_RESET_MBOX, 0, 0);
887 DPRINTF(sc->sc_dev, "%s: cannot step (2), error %u\n",
889 npe_cpu_step_restore(sc);
894 * Reset the physical registers in the NPE register file:
895 * Note: no need to save/restore REGMAP for Context 0 here
896 * since all Context Store regs are reset in subsequent code.
899 regAddr < IX_NPEDL_TOTAL_NUM_PHYS_REG && error == 0;
901 /* for each physical register in the NPE reg file, write 0 : */
902 error = npe_physical_reg_write(sc, regAddr, 0, TRUE);
904 DPRINTF(sc->sc_dev, "%s: cannot write phy reg,"
905 "error %u\n", __func__, error);
906 npe_cpu_step_restore(sc);
907 return error; /* abort reset */
912 * Reset the context store:
914 for (i = IX_NPEDL_CTXT_NUM_MIN; i <= IX_NPEDL_CTXT_NUM_MAX; i++) {
915 /* set each context's Context Store registers to reset values */
916 for (ctxtReg = 0; ctxtReg < IX_NPEDL_CTXT_REG_MAX; ctxtReg++) {
917 /* NOTE that there is no STEVT register for Context 0 */
918 if (i == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STEVT)
920 regVal = ixNpeDlCtxtRegResetValues[ctxtReg];
921 error = npe_ctx_reg_write(sc, i, ctxtReg,
924 DPRINTF(sc->sc_dev, "%s: cannot write ctx reg,"
925 "error %u\n", __func__, error);
926 npe_cpu_step_restore(sc);
927 return error; /* abort reset */
932 npe_cpu_step_restore(sc);
934 /* write Reset values to Execution Context Stack registers */
935 for (i = 0; i < N(ixNpeDlEcsRegResetValues); i++)
936 npe_ecs_reg_write(sc,
937 ixNpeDlEcsRegResetValues[i].regAddr,
938 ixNpeDlEcsRegResetValues[i].regResetVal);
940 /* clear the profile counter */
941 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT);
943 /* clear registers EXCT, AP0, AP1, AP2 and AP3 */
944 for (regAddr = IX_NPEDL_REG_OFFSET_EXCT;
945 regAddr <= IX_NPEDL_REG_OFFSET_AP3;
946 regAddr += sizeof(uint32_t))
947 npe_reg_write(sc, regAddr, 0);
949 /* Reset the Watch-count register */
950 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_WC, 0);
953 * WR IXA00055043 - Remove IMEM Parity Introduced by NPE Reset Operation
954 * XXX Removed because it breaks IXP435 operation; e.g. on Gateworks
955 * XXX 2358 boards reseting NPE-A after NPE-C is running causes both
956 * XXX npe's to stop working
958 npe_reset(sc->sc_npeid);
961 * Call NpeMgr function to stop the NPE again after the Feature Control
962 * has unfused and Un-Reset the NPE and its associated Coprocessors.
964 error = npe_cpu_stop(sc);
966 /* restore NPE configuration bus Control Register - Parity Settings */
967 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_CTL,
968 (ixNpeConfigCtrlRegVal & IX_NPEDL_CONFIG_CTRL_REG_MASK));
969 DPRINTFn(2, sc->sc_dev, "%s: restore CTL => 0x%x\n",
970 __func__, npe_reg_read(sc, IX_NPEDL_REG_OFFSET_CTL));
977 npe_cpu_start(struct ixpnpe_softc *sc)
982 * Ensure only Background Context Stack Level is Active by turning off
983 * the Active bit in each of the other Executing Context Stack levels.
985 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0);
986 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
987 npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_1_CTXT_REG_0, ecsRegVal);
989 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0);
990 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
991 npe_ecs_reg_write(sc, IX_NPEDL_ECS_PRI_2_CTXT_REG_0, ecsRegVal);
993 ecsRegVal = npe_ecs_reg_read(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0);
994 ecsRegVal &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE;
995 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsRegVal);
997 /* clear the pipeline */
998 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
1000 /* start NPE execution by issuing cmd through EXCTL register on NPE */
1001 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_START);
1004 * Check execution status of NPE to verify operation was successful.
1006 return npe_checkbits(sc,
1007 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_RUN) ? 0 : EIO;
1011 npe_cpu_stop(struct ixpnpe_softc *sc)
1013 /* stop NPE execution by issuing cmd through EXCTL register on NPE */
1014 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STOP);
1016 /* verify that NPE Stop was successful */
1017 return npe_checkbits(sc,
1018 IX_NPEDL_REG_OFFSET_EXCTL, IX_NPEDL_EXCTL_STATUS_STOP) ? 0 : EIO;
1021 #define IX_NPEDL_REG_SIZE_BYTE 8
1022 #define IX_NPEDL_REG_SIZE_SHORT 16
1023 #define IX_NPEDL_REG_SIZE_WORD 32
1026 * Introduce extra read cycles after issuing read command to NPE
1027 * so that we read the register after the NPE has updated it
1028 * This is to overcome race condition between XScale and NPE
1030 #define IX_NPEDL_DELAY_READ_CYCLES 2
1032 * To mask top three MSBs of 32bit word to download into NPE IMEM
1034 #define IX_NPEDL_MASK_UNUSED_IMEM_BITS 0x1FFFFFFF;
1037 npe_cmd_issue_write(struct ixpnpe_softc *sc,
1038 uint32_t cmd, uint32_t addr, uint32_t data)
1040 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, data);
1041 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr);
1042 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
1046 npe_cmd_issue_read(struct ixpnpe_softc *sc, uint32_t cmd, uint32_t addr)
1051 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXAD, addr);
1052 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, cmd);
1053 for (i = 0; i <= IX_NPEDL_DELAY_READ_CYCLES; i++)
1054 data = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA);
1059 npe_ins_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify)
1061 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data);
1062 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_INS_MEM, addr, data);
1067 * Write invalid data to this reg, so we can see if we're
1068 * reading the EXDATA register too early.
1070 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data);
1073 * Disabled since top 3 MSB are not used for Azusa
1074 * hardware Refer WR:IXA00053900
1076 data &= IX_NPEDL_MASK_UNUSED_IMEM_BITS;
1078 rdata = npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_INS_MEM,
1080 rdata &= IX_NPEDL_MASK_UNUSED_IMEM_BITS;
1089 npe_data_write(struct ixpnpe_softc *sc, uint32_t addr, uint32_t data, int verify)
1091 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, addr, data);
1092 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_DATA_MEM, addr, data);
1095 * Write invalid data to this reg, so we can see if we're
1096 * reading the EXDATA register too early.
1098 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXDATA, ~data);
1099 if (data != npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_DATA_MEM, addr))
1106 npe_ecs_reg_write(struct ixpnpe_softc *sc, uint32_t reg, uint32_t data)
1108 npe_cmd_issue_write(sc, IX_NPEDL_EXCTL_CMD_WR_ECS_REG, reg, data);
1112 npe_ecs_reg_read(struct ixpnpe_softc *sc, uint32_t reg)
1114 return npe_cmd_issue_read(sc, IX_NPEDL_EXCTL_CMD_RD_ECS_REG, reg);
1118 npe_issue_cmd(struct ixpnpe_softc *sc, uint32_t command)
1120 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCTL, command);
1124 npe_cpu_step_save(struct ixpnpe_softc *sc)
1126 /* turn off the halt bit by clearing Execution Count register. */
1127 /* save reg contents 1st and restore later */
1128 sc->savedExecCount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXCT);
1129 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, 0);
1131 /* ensure that IF and IE are on (temporarily), so that we don't end up
1132 * stepping forever */
1133 sc->savedEcsDbgCtxtReg2 = npe_ecs_reg_read(sc,
1134 IX_NPEDL_ECS_DBG_CTXT_REG_2);
1136 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2,
1137 (sc->savedEcsDbgCtxtReg2 | IX_NPEDL_MASK_ECS_DBG_REG_2_IF |
1138 IX_NPEDL_MASK_ECS_DBG_REG_2_IE));
1142 npe_cpu_step(struct ixpnpe_softc *sc, uint32_t npeInstruction,
1143 uint32_t ctxtNum, uint32_t ldur)
1145 #define IX_NPE_DL_MAX_NUM_OF_RETRIES 1000000
1146 uint32_t ecsDbgRegVal;
1147 uint32_t oldWatchcount, newWatchcount;
1150 /* set the Active bit, and the LDUR, in the debug level */
1151 ecsDbgRegVal = IX_NPEDL_MASK_ECS_REG_0_ACTIVE |
1152 (ldur << IX_NPEDL_OFFSET_ECS_REG_0_LDUR);
1154 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, ecsDbgRegVal);
1157 * Set CCTXT at ECS DEBUG L3 to specify in which context to execute the
1158 * instruction, and set SELCTXT at ECS DEBUG Level to specify which
1159 * context store to access.
1160 * Debug ECS Level Reg 1 has form 0x000n000n, where n = context number
1162 ecsDbgRegVal = (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_CCTXT) |
1163 (ctxtNum << IX_NPEDL_OFFSET_ECS_REG_1_SELCTXT);
1165 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_1, ecsDbgRegVal);
1167 /* clear the pipeline */
1168 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
1170 /* load NPE instruction into the instruction register */
1171 npe_ecs_reg_write(sc, IX_NPEDL_ECS_INSTRUCT_REG, npeInstruction);
1173 /* need this value later to wait for completion of NPE execution step */
1174 oldWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1176 /* issue a Step One command via the Execution Control register */
1177 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_STEP);
1180 * Force the XScale to wait until the NPE has finished execution step
1181 * NOTE that this delay will be very small, just long enough to allow a
1182 * single NPE instruction to complete execution; if instruction
1183 * execution is not completed before timeout retries, exit the while
1186 newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1187 for (tries = 0; tries < IX_NPE_DL_MAX_NUM_OF_RETRIES &&
1188 newWatchcount == oldWatchcount; tries++) {
1189 /* Watch Count register incr's when NPE completes an inst */
1190 newWatchcount = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_WC);
1192 return (tries < IX_NPE_DL_MAX_NUM_OF_RETRIES) ? 0 : EIO;
1193 #undef IX_NPE_DL_MAX_NUM_OF_RETRIES
1197 npe_cpu_step_restore(struct ixpnpe_softc *sc)
1199 /* clear active bit in debug level */
1200 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_0, 0);
1202 /* clear the pipeline */
1203 npe_issue_cmd(sc, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE);
1205 /* restore Execution Count register contents. */
1206 npe_reg_write(sc, IX_NPEDL_REG_OFFSET_EXCT, sc->savedExecCount);
1208 /* restore IF and IE bits to original values */
1209 npe_ecs_reg_write(sc, IX_NPEDL_ECS_DBG_CTXT_REG_2, sc->savedEcsDbgCtxtReg2);
1213 npe_logical_reg_read(struct ixpnpe_softc *sc,
1214 uint32_t regAddr, uint32_t regSize,
1215 uint32_t ctxtNum, uint32_t *regVal)
1217 uint32_t npeInstruction, mask;
1221 case IX_NPEDL_REG_SIZE_BYTE:
1222 npeInstruction = IX_NPEDL_INSTR_RD_REG_BYTE;
1225 case IX_NPEDL_REG_SIZE_SHORT:
1226 npeInstruction = IX_NPEDL_INSTR_RD_REG_SHORT;
1229 case IX_NPEDL_REG_SIZE_WORD:
1230 npeInstruction = IX_NPEDL_INSTR_RD_REG_WORD;
1237 /* make regAddr be the SRC and DEST operands (e.g. movX d0, d0) */
1238 npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_SRC) |
1239 (regAddr << IX_NPEDL_OFFSET_INSTR_DEST);
1241 /* step execution of NPE inst using Debug Executing Context stack */
1242 error = npe_cpu_step(sc, npeInstruction, ctxtNum,
1243 IX_NPEDL_RD_INSTR_LDUR);
1245 DPRINTF(sc->sc_dev, "%s(0x%x, %u, %u), cannot step, error %d\n",
1246 __func__, regAddr, regSize, ctxtNum, error);
1249 /* read value of register from Execution Data register */
1250 *regVal = npe_reg_read(sc, IX_NPEDL_REG_OFFSET_EXDATA);
1252 /* align value from left to right */
1253 *regVal = (*regVal >> (IX_NPEDL_REG_SIZE_WORD - regSize)) & mask;
1259 npe_logical_reg_write(struct ixpnpe_softc *sc, uint32_t regAddr, uint32_t regVal,
1260 uint32_t regSize, uint32_t ctxtNum, int verify)
1264 DPRINTFn(4, sc->sc_dev, "%s(0x%x, 0x%x, %u, %u)\n",
1265 __func__, regAddr, regVal, regSize, ctxtNum);
1266 if (regSize == IX_NPEDL_REG_SIZE_WORD) {
1268 * NPE register addressing is left-to-right: e.g. |d0|d1|d2|d3|
1269 * Write upper half-word (short) to |d0|d1|
1271 error = npe_logical_reg_write(sc, regAddr,
1272 regVal >> IX_NPEDL_REG_SIZE_SHORT,
1273 IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify);
1277 /* Write lower half-word (short) to |d2|d3| */
1278 error = npe_logical_reg_write(sc,
1279 regAddr + sizeof(uint16_t),
1281 IX_NPEDL_REG_SIZE_SHORT, ctxtNum, verify);
1283 uint32_t npeInstruction;
1286 case IX_NPEDL_REG_SIZE_BYTE:
1287 npeInstruction = IX_NPEDL_INSTR_WR_REG_BYTE;
1290 case IX_NPEDL_REG_SIZE_SHORT:
1291 npeInstruction = IX_NPEDL_INSTR_WR_REG_SHORT;
1297 /* fill dest operand field of inst with dest reg addr */
1298 npeInstruction |= (regAddr << IX_NPEDL_OFFSET_INSTR_DEST);
1300 /* fill src operand field of inst with least-sig 5 bits of val*/
1302 ((regVal & IX_NPEDL_MASK_IMMED_INSTR_SRC_DATA) <<
1303 IX_NPEDL_OFFSET_INSTR_SRC);
1305 /* fill coprocessor field of inst with most-sig 11 bits of val*/
1307 ((regVal & IX_NPEDL_MASK_IMMED_INSTR_COPROC_DATA) <<
1308 IX_NPEDL_DISPLACE_IMMED_INSTR_COPROC_DATA);
1310 /* step execution of NPE instruction using Debug ECS */
1311 error = npe_cpu_step(sc, npeInstruction,
1312 ctxtNum, IX_NPEDL_WR_INSTR_LDUR);
1315 DPRINTF(sc->sc_dev, "%s(0x%x, 0x%x, %u, %u), error %u "
1316 "writing reg\n", __func__, regAddr, regVal, regSize,
1323 error = npe_logical_reg_read(sc, regAddr, regSize, ctxtNum,
1325 if (error == 0 && regVal != retRegVal)
1326 error = EIO; /* XXX ambiguous */
1332 * There are 32 physical registers used in an NPE. These are
1333 * treated as 16 pairs of 32-bit registers. To write one of the pair,
1334 * write the pair number (0-16) to the REGMAP for Context 0. Then write
1335 * the value to register 0 or 4 in the regfile, depending on which
1336 * register of the pair is to be written
1339 npe_physical_reg_write(struct ixpnpe_softc *sc,
1340 uint32_t regAddr, uint32_t regValue, int verify)
1345 * Set REGMAP for context 0 to (regAddr >> 1) to choose which pair
1346 * (0-16) of physical registers to write .
1348 error = npe_logical_reg_write(sc, IX_NPEDL_CTXT_REG_ADDR_REGMAP,
1349 (regAddr >> IX_NPEDL_OFFSET_PHYS_REG_ADDR_REGMAP),
1350 IX_NPEDL_REG_SIZE_SHORT, 0, verify);
1352 /* regAddr = 0 or 4 */
1353 regAddr = (regAddr & IX_NPEDL_MASK_PHYS_REG_ADDR_LOGICAL_ADDR) *
1355 error = npe_logical_reg_write(sc, regAddr, regValue,
1356 IX_NPEDL_REG_SIZE_WORD, 0, verify);
1362 npe_ctx_reg_write(struct ixpnpe_softc *sc, uint32_t ctxtNum,
1363 uint32_t ctxtReg, uint32_t ctxtRegVal, int verify)
1365 DPRINTFn(4, sc->sc_dev, "%s(%u, %u, %u)\n",
1366 __func__, ctxtNum, ctxtReg, ctxtRegVal);
1368 * Context 0 has no STARTPC. Instead, this value is used to set
1369 * NextPC for Background ECS, to set where NPE starts executing code
1371 if (ctxtNum == 0 && ctxtReg == IX_NPEDL_CTXT_REG_STARTPC) {
1372 /* read BG_CTXT_REG_0, update NEXTPC bits, & write back to reg*/
1373 uint32_t v = npe_ecs_reg_read(sc, IX_NPEDL_ECS_BG_CTXT_REG_0);
1374 v &= ~IX_NPEDL_MASK_ECS_REG_0_NEXTPC;
1375 v |= (ctxtRegVal << IX_NPEDL_OFFSET_ECS_REG_0_NEXTPC) &
1376 IX_NPEDL_MASK_ECS_REG_0_NEXTPC;
1378 npe_ecs_reg_write(sc, IX_NPEDL_ECS_BG_CTXT_REG_0, v);
1381 static const struct {
1382 uint32_t regAddress;
1384 } regAccInfo[IX_NPEDL_CTXT_REG_MAX] = {
1385 { IX_NPEDL_CTXT_REG_ADDR_STEVT,
1386 IX_NPEDL_REG_SIZE_BYTE },
1387 { IX_NPEDL_CTXT_REG_ADDR_STARTPC,
1388 IX_NPEDL_REG_SIZE_SHORT },
1389 { IX_NPEDL_CTXT_REG_ADDR_REGMAP,
1390 IX_NPEDL_REG_SIZE_SHORT },
1391 { IX_NPEDL_CTXT_REG_ADDR_CINDEX,
1392 IX_NPEDL_REG_SIZE_BYTE }
1394 return npe_logical_reg_write(sc, regAccInfo[ctxtReg].regAddress,
1395 ctxtRegVal, regAccInfo[ctxtReg].regSize, ctxtNum, verify);
1400 * NPE Mailbox support.
1402 #define IX_NPEMH_MAXTRIES 100000
1405 ofifo_wait(struct ixpnpe_softc *sc)
1409 for (i = 0; i < IX_NPEMH_MAXTRIES; i++) {
1410 if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_OFNE)
1414 device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n",
1415 __func__, npe_reg_read(sc, IX_NPESTAT));
1420 getmsg(struct ixpnpe_softc *sc, uint32_t msg[2])
1422 mtx_assert(&sc->sc_mtx, MA_OWNED);
1424 if (!ofifo_wait(sc))
1426 msg[0] = npe_reg_read(sc, IX_NPEFIFO);
1427 DPRINTF(sc->sc_dev, "%s: msg0 0x%x\n", __func__, msg[0]);
1428 if (!ofifo_wait(sc))
1430 msg[1] = npe_reg_read(sc, IX_NPEFIFO);
1431 DPRINTF(sc->sc_dev, "%s: msg1 0x%x\n", __func__, msg[1]);
1436 ixpnpe_intr(void *arg)
1438 struct ixpnpe_softc *sc = arg;
1441 mtx_lock(&sc->sc_mtx);
1442 status = npe_reg_read(sc, IX_NPESTAT);
1443 DPRINTF(sc->sc_dev, "%s: status 0x%x\n", __func__, status);
1444 if ((status & IX_NPESTAT_OFINT) == 0) {
1445 /* NB: should not happen */
1446 device_printf(sc->sc_dev, "%s: status 0x%x\n",
1448 /* XXX must silence interrupt? */
1449 mtx_unlock(&sc->sc_mtx);
1453 * A message is waiting in the output FIFO, copy it so
1454 * the interrupt will be silenced.
1456 if (getmsg(sc, sc->sc_msg) == 0)
1457 sc->sc_msgwaiting = 1;
1458 mtx_unlock(&sc->sc_mtx);
1462 ififo_wait(struct ixpnpe_softc *sc)
1466 for (i = 0; i < IX_NPEMH_MAXTRIES; i++) {
1467 if (npe_reg_read(sc, IX_NPESTAT) & IX_NPESTAT_IFNF)
1471 device_printf(sc->sc_dev, "%s: timeout, last status 0x%x\n",
1472 __func__, npe_reg_read(sc, IX_NPESTAT));
1477 putmsg(struct ixpnpe_softc *sc, const uint32_t msg[2])
1479 mtx_assert(&sc->sc_mtx, MA_OWNED);
1481 DPRINTF(sc->sc_dev, "%s: msg 0x%x:0x%x\n", __func__, msg[0], msg[1]);
1482 if (!ififo_wait(sc))
1484 npe_reg_write(sc, IX_NPEFIFO, msg[0]);
1485 if (!ififo_wait(sc))
1487 npe_reg_write(sc, IX_NPEFIFO, msg[1]);
1493 * Send a msg to the NPE and wait for a reply. We spin as
1494 * we may be called early with interrupts not properly setup.
1497 ixpnpe_sendandrecvmsg_sync(struct ixpnpe_softc *sc,
1498 const uint32_t send[2], uint32_t recv[2])
1502 mtx_lock(&sc->sc_mtx);
1503 error = putmsg(sc, send);
1505 error = getmsg(sc, recv);
1506 mtx_unlock(&sc->sc_mtx);
1512 * Send a msg to the NPE w/o waiting for a reply.
1515 ixpnpe_sendmsg_async(struct ixpnpe_softc *sc, const uint32_t msg[2])
1519 mtx_lock(&sc->sc_mtx);
1520 error = putmsg(sc, msg);
1521 mtx_unlock(&sc->sc_mtx);
1527 recvmsg_locked(struct ixpnpe_softc *sc, uint32_t msg[2])
1529 mtx_assert(&sc->sc_mtx, MA_OWNED);
1531 DPRINTF(sc->sc_dev, "%s: msgwaiting %d\n", __func__, sc->sc_msgwaiting);
1532 if (sc->sc_msgwaiting) {
1533 msg[0] = sc->sc_msg[0];
1534 msg[1] = sc->sc_msg[1];
1535 sc->sc_msgwaiting = 0;
1542 * Receive any msg previously received from the NPE. If nothing
1543 * is available we return EAGAIN and the caller is required to
1544 * do a synchronous receive or try again later.
1547 ixpnpe_recvmsg_async(struct ixpnpe_softc *sc, uint32_t msg[2])
1551 mtx_lock(&sc->sc_mtx);
1552 error = recvmsg_locked(sc, msg);
1553 mtx_unlock(&sc->sc_mtx);
1559 * Receive a msg from the NPE. If one was received asynchronously
1560 * then it's returned; otherwise we poll synchronously.
1563 ixpnpe_recvmsg_sync(struct ixpnpe_softc *sc, uint32_t msg[2])
1567 mtx_lock(&sc->sc_mtx);
1568 error = recvmsg_locked(sc, msg);
1569 if (error == EAGAIN)
1570 error = getmsg(sc, msg);
1571 mtx_unlock(&sc->sc_mtx);