2 * Copyright (c) 2015 Landon Fuller <landon@landonf.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
37 #include <sys/systm.h>
39 #include <machine/bus.h>
40 #include <machine/resource.h>
42 #include <dev/bhnd/cores/chipc/chipcreg.h>
44 #include "bcma_eromreg.h"
45 #include "bcma_eromvar.h"
48 * BCMA Enumeration ROM (EROM) Table
50 * Provides auto-discovery of BCMA cores on Broadcom's HND SoC.
52 * The EROM core address can be found at BCMA_CC_EROM_ADDR within the
53 * ChipCommon registers. The table itself is comprised of 32-bit
54 * type-tagged entries, organized into an array of variable-length
55 * core descriptor records.
57 * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF)
63 static const char *bcma_erom_entry_type_name (uint8_t entry);
65 static uint32_t bcma_eio_read4(struct bcma_erom_io *io,
68 static int bcma_erom_read32(struct bcma_erom *erom,
70 static int bcma_erom_skip32(struct bcma_erom *erom);
72 static int bcma_erom_skip_core(struct bcma_erom *erom);
73 static int bcma_erom_skip_mport(struct bcma_erom *erom);
74 static int bcma_erom_skip_sport_region(struct bcma_erom *erom);
76 static int bcma_erom_seek_next(struct bcma_erom *erom,
78 static int bcma_erom_region_to_port_type(struct bcma_erom *erom,
79 uint8_t region_type, bhnd_port_type *port_type);
82 static int bcma_erom_peek32(struct bcma_erom *erom,
85 static bus_size_t bcma_erom_tell(struct bcma_erom *erom);
86 static void bcma_erom_seek(struct bcma_erom *erom,
88 static void bcma_erom_reset(struct bcma_erom *erom);
90 static int bcma_erom_seek_matching_core(struct bcma_erom *sc,
91 const struct bhnd_core_match *desc,
92 struct bhnd_core_info *core);
94 static int bcma_erom_parse_core(struct bcma_erom *erom,
95 struct bcma_erom_core *core);
97 static int bcma_erom_parse_mport(struct bcma_erom *erom,
98 struct bcma_erom_mport *mport);
100 static int bcma_erom_parse_sport_region(struct bcma_erom *erom,
101 struct bcma_erom_sport_region *region);
103 static void bcma_erom_to_core_info(const struct bcma_erom_core *core,
104 u_int core_idx, int core_unit,
105 struct bhnd_core_info *info);
108 * BCMA EROM generic I/O context
110 struct bcma_erom_io {
111 struct bhnd_resource *res; /**< memory resource, or NULL if initialized
112 with bus space tag and handle */
113 int rid; /**< memory resource id, or -1 */
115 bus_space_tag_t bst; /**< bus space tag, if any */
116 bus_space_handle_t bsh; /**< bus space handle, if any */
118 bus_size_t start; /**< base read offset */
122 * BCMA EROM per-instance state.
125 struct bhnd_erom obj;
126 device_t dev; /**< parent device, or NULL if none. */
127 struct bcma_erom_io io; /**< I/O context */
128 bus_size_t offset; /**< current read offset */
131 #define EROM_LOG(erom, fmt, ...) do { \
132 if (erom->dev != NULL) { \
133 device_printf(erom->dev, "erom[0x%llx]: " fmt, \
134 (unsigned long long) (erom->offset), ##__VA_ARGS__);\
136 printf("erom[0x%llx]: " fmt, \
137 (unsigned long long) (erom->offset), ##__VA_ARGS__);\
141 /** Return the type name for an EROM entry */
143 bcma_erom_entry_type_name (uint8_t entry)
145 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
146 case BCMA_EROM_ENTRY_TYPE_CORE:
148 case BCMA_EROM_ENTRY_TYPE_MPORT:
150 case BCMA_EROM_ENTRY_TYPE_REGION:
159 * Read a 32-bit value from an EROM I/O context.
161 * @param io EROM I/O context.
162 * @param offset Read offset.
165 bcma_eio_read4(struct bcma_erom_io *io, bus_size_t offset)
169 read_off = io->start + offset;
171 return (bhnd_bus_read_4(io->res, read_off));
173 return (bus_space_read_4(io->bst, io->bsh, read_off));
176 /* Initialize bcma_erom resource I/O context */
178 bcma_eio_init(struct bcma_erom_io *io, struct bhnd_resource *res, int rid,
186 /* Initialize bcma_erom bus space I/O context */
188 bcma_eio_init_static(struct bcma_erom_io *io, bus_space_tag_t bst,
189 bus_space_handle_t bsh, bus_size_t offset)
198 /* BCMA implementation of BHND_EROM_INIT() */
200 bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
201 device_t parent, int rid)
203 struct bcma_erom *sc;
204 struct bhnd_resource *res;
206 sc = (struct bcma_erom *)erom;
210 res = bhnd_alloc_resource(parent, SYS_RES_MEMORY, &rid, cid->enum_addr,
211 cid->enum_addr + BCMA_EROM_TABLE_SIZE - 1, BCMA_EROM_TABLE_SIZE,
212 RF_ACTIVE|RF_SHAREABLE);
217 bcma_eio_init(&sc->io, res, rid, BCMA_EROM_TABLE_START);
222 /* BCMA implementation of BHND_EROM_INIT_STATIC() */
224 bcma_erom_init_static(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
225 bus_space_tag_t bst, bus_space_handle_t bsh)
227 struct bcma_erom *sc;
229 sc = (struct bcma_erom *)erom;
233 bcma_eio_init_static(&sc->io, bst, bsh, BCMA_EROM_TABLE_START);
238 /* Common implementation of BHND_EROM_PROBE/BHND_EROM_PROBE_STATIC */
240 bcma_erom_probe_common(struct bcma_erom_io *io, const struct bhnd_chipid *hint,
241 struct bhnd_chipid *cid)
243 uint32_t idreg, eromptr;
245 /* Hints aren't supported; all BCMA devices have a ChipCommon
250 /* Confirm CHIPC_EROMPTR availability */
251 idreg = bcma_eio_read4(io, CHIPC_ID);
252 if (!BHND_CHIPTYPE_HAS_EROM(CHIPC_GET_BITS(idreg, CHIPC_ID_BUS)))
255 /* Fetch EROM address */
256 eromptr = bcma_eio_read4(io, CHIPC_EROMPTR);
258 /* Parse chip identifier */
259 *cid = bhnd_parse_chipid(idreg, eromptr);
261 /* Verify chip type */
262 switch (cid->chip_type) {
263 case BHND_CHIPTYPE_BCMA:
264 return (BUS_PROBE_DEFAULT);
266 case BHND_CHIPTYPE_BCMA_ALT:
267 case BHND_CHIPTYPE_UBUS:
268 return (BUS_PROBE_GENERIC);
276 bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_resource *res,
277 bus_size_t offset, const struct bhnd_chipid *hint, struct bhnd_chipid *cid)
279 struct bcma_erom_io io;
281 bcma_eio_init(&io, res, rman_get_rid(res->res),
282 offset + BCMA_EROM_TABLE_START);
284 return (bcma_erom_probe_common(&io, hint, cid));
288 bcma_erom_probe_static(bhnd_erom_class_t *cls, bus_space_tag_t bst,
289 bus_space_handle_t bsh, bus_addr_t paddr, const struct bhnd_chipid *hint,
290 struct bhnd_chipid *cid)
292 struct bcma_erom_io io;
294 bcma_eio_init_static(&io, bst, bsh, BCMA_EROM_TABLE_START);
295 return (bcma_erom_probe_common(&io, hint, cid));
300 bcma_erom_fini(bhnd_erom_t *erom)
302 struct bcma_erom *sc = (struct bcma_erom *)erom;
304 if (sc->io.res != NULL) {
305 bhnd_release_resource(sc->dev, SYS_RES_MEMORY, sc->io.rid,
314 bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
315 struct bhnd_core_info *core)
317 struct bcma_erom *sc = (struct bcma_erom *)erom;
319 /* Search for the first matching core */
320 return (bcma_erom_seek_matching_core(sc, desc, core));
324 bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
325 bhnd_port_type port_type, u_int port_num, u_int region_num,
326 struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size)
328 struct bcma_erom *sc;
329 struct bcma_erom_core ec;
331 uint8_t region_port, region_type;
335 sc = (struct bcma_erom *)erom;
337 /* Seek to the first matching core and provide the core info
339 if ((error = bcma_erom_seek_matching_core(sc, desc, core)))
342 if ((error = bcma_erom_parse_core(sc, &ec)))
345 /* Skip master ports */
346 for (u_long i = 0; i < ec.num_mport; i++) {
347 if ((error = bcma_erom_skip_mport(sc)))
351 /* Seek to the region block for the given port type */
354 bhnd_port_type p_type;
357 if ((error = bcma_erom_peek32(sc, &entry)))
360 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
363 /* Expected region type? */
364 r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
365 error = bcma_erom_region_to_port_type(sc, r_type, &p_type);
369 if (p_type == port_type) {
374 /* Skip to next entry */
375 if ((error = bcma_erom_skip_sport_region(sc)))
382 /* Found the appropriate port type block; now find the region records
383 * for the given port number */
385 for (u_int i = 0; i <= port_num; i++) {
386 bhnd_port_type p_type;
388 if ((error = bcma_erom_peek32(sc, &entry)))
391 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
394 /* Fetch the type/port of the first region entry */
395 region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
396 region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
398 /* Have we found the region entries for the desired port? */
400 error = bcma_erom_region_to_port_type(sc, region_type,
405 if (p_type == port_type)
411 /* Otherwise, seek to next block of region records */
413 uint8_t next_type, next_port;
415 if ((error = bcma_erom_skip_sport_region(sc)))
418 if ((error = bcma_erom_peek32(sc, &entry)))
421 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
424 next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
425 next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
427 if (next_type != region_type ||
428 next_port != region_port)
436 /* Finally, search for the requested region number */
437 for (u_int i = 0; i <= region_num; i++) {
438 struct bcma_erom_sport_region region;
439 uint8_t next_port, next_type;
441 if ((error = bcma_erom_peek32(sc, &entry)))
444 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
447 /* Check for the end of the region block */
448 next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
449 next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
451 if (next_type != region_type ||
452 next_port != region_port)
455 /* Parse the region */
456 if ((error = bcma_erom_parse_sport_region(sc, ®ion)))
459 /* Is this our target region_num? */
460 if (i == region_num) {
462 *addr = region.base_addr;
473 bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores,
476 struct bcma_erom *sc;
477 struct bhnd_core_info *buffer;
478 bus_size_t initial_offset;
482 sc = (struct bcma_erom *)erom;
485 initial_offset = bcma_erom_tell(sc);
487 /* Determine the core count */
489 for (count = 0, error = 0; !error; count++) {
490 struct bcma_erom_core core;
492 /* Seek to the first readable core entry */
493 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
499 /* Read past the core descriptor */
500 if ((error = bcma_erom_parse_core(sc, &core)))
504 /* Allocate our output buffer */
505 buffer = malloc(sizeof(struct bhnd_core_info) * count, M_BHND,
507 if (buffer == NULL) {
512 /* Parse all core descriptors */
514 for (u_int i = 0; i < count; i++) {
515 struct bcma_erom_core core;
519 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
523 error = bcma_erom_parse_core(sc, &core);
527 /* Determine the unit number */
529 for (u_int j = 0; j < i; j++) {
530 if (buffer[i].vendor == buffer[j].vendor &&
531 buffer[i].device == buffer[j].device)
535 /* Convert to a bhnd info record */
536 bcma_erom_to_core_info(&core, i, unit, &buffer[i]);
545 free(buffer, M_BHND);
548 /* Restore the initial position */
549 bcma_erom_seek(sc, initial_offset);
554 bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores)
560 * Return the current read position.
563 bcma_erom_tell(struct bcma_erom *erom)
565 return (erom->offset);
569 * Seek to an absolute read position.
572 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset)
574 erom->offset = offset;
578 * Read a 32-bit entry value from the EROM table without advancing the
581 * @param erom EROM read state.
582 * @param entry Will contain the read result on success.
584 * @retval ENOENT The end of the EROM table was reached.
585 * @retval non-zero The read could not be completed.
588 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry)
590 if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) {
591 EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n");
595 *entry = bcma_eio_read4(&erom->io, erom->offset);
600 * Read a 32-bit entry value from the EROM table.
602 * @param erom EROM read state.
603 * @param entry Will contain the read result on success.
605 * @retval ENOENT The end of the EROM table was reached.
606 * @retval non-zero The read could not be completed.
609 bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry)
613 if ((error = bcma_erom_peek32(erom, entry)) == 0)
620 * Read and discard 32-bit entry value from the EROM table.
622 * @param erom EROM read state.
624 * @retval ENOENT The end of the EROM table was reached.
625 * @retval non-zero The read could not be completed.
628 bcma_erom_skip32(struct bcma_erom *erom)
632 return bcma_erom_read32(erom, &entry);
636 * Read and discard a core descriptor from the EROM table.
638 * @param erom EROM read state.
640 * @retval ENOENT The end of the EROM table was reached.
641 * @retval non-zero The read could not be completed.
644 bcma_erom_skip_core(struct bcma_erom *erom)
646 struct bcma_erom_core core;
647 return (bcma_erom_parse_core(erom, &core));
651 * Read and discard a master port descriptor from the EROM table.
653 * @param erom EROM read state.
655 * @retval ENOENT The end of the EROM table was reached.
656 * @retval non-zero The read could not be completed.
659 bcma_erom_skip_mport(struct bcma_erom *erom)
661 struct bcma_erom_mport mp;
662 return (bcma_erom_parse_mport(erom, &mp));
666 * Read and discard a port region descriptor from the EROM table.
668 * @param erom EROM read state.
670 * @retval ENOENT The end of the EROM table was reached.
671 * @retval non-zero The read could not be completed.
674 bcma_erom_skip_sport_region(struct bcma_erom *erom)
676 struct bcma_erom_sport_region r;
677 return (bcma_erom_parse_sport_region(erom, &r));
681 * Seek to the next entry matching the given EROM entry type.
683 * @param erom EROM read state.
684 * @param etype One of BCMA_EROM_ENTRY_TYPE_CORE,
685 * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION.
687 * @retval ENOENT The end of the EROM table was reached.
688 * @retval non-zero Reading or parsing the descriptor failed.
691 bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype)
696 /* Iterate until we hit an entry matching the requested type. */
697 while (!(error = bcma_erom_peek32(erom, &entry))) {
699 if (entry == BCMA_EROM_TABLE_EOF)
703 if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID))
706 /* Entry type matches? */
707 if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype)
710 /* Skip non-matching entry types. */
711 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
712 case BCMA_EROM_ENTRY_TYPE_CORE:
713 if ((error = bcma_erom_skip_core(erom)))
718 case BCMA_EROM_ENTRY_TYPE_MPORT:
719 if ((error = bcma_erom_skip_mport(erom)))
724 case BCMA_EROM_ENTRY_TYPE_REGION:
725 if ((error = bcma_erom_skip_sport_region(erom)))
730 /* Unknown entry type! */
739 * Return the read position to the start of the EROM table.
741 * @param erom EROM read state.
744 bcma_erom_reset(struct bcma_erom *erom)
750 * Seek to the first core entry matching @p desc.
752 * @param erom EROM read state.
753 * @param desc The core match descriptor.
754 * @param[out] core On success, the matching core info. If the core info
755 * is not desired, a NULL pointer may be provided.
757 * @retval ENOENT The end of the EROM table was reached before @p index was
759 * @retval non-zero Reading or parsing failed.
762 bcma_erom_seek_matching_core(struct bcma_erom *sc,
763 const struct bhnd_core_match *desc, struct bhnd_core_info *core)
765 struct bhnd_core_match imatch;
766 bus_size_t core_offset, next_offset;
769 /* Seek to table start. */
772 /* We can't determine a core's unit number during the initial scan. */
774 imatch.m.match.core_unit = 0;
776 /* Locate the first matching core */
777 for (u_int i = 0; i < UINT_MAX; i++) {
778 struct bcma_erom_core ec;
779 struct bhnd_core_info ci;
781 /* Seek to the next core */
782 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
786 /* Save the core offset */
787 core_offset = bcma_erom_tell(sc);
790 if ((error = bcma_erom_parse_core(sc, &ec)))
793 bcma_erom_to_core_info(&ec, i, 0, &ci);
795 /* Check for initial match */
796 if (!bhnd_core_matches(&ci, &imatch))
799 /* Re-scan preceding cores to determine the unit number. */
800 next_offset = bcma_erom_tell(sc);
802 for (u_int j = 0; j < i; j++) {
804 error = bcma_erom_seek_next(sc,
805 BCMA_EROM_ENTRY_TYPE_CORE);
809 if ((error = bcma_erom_parse_core(sc, &ec)))
812 /* Bump the unit number? */
813 if (ec.vendor == ci.vendor && ec.device == ci.device)
817 /* Check for full match against now-valid unit number */
818 if (!bhnd_core_matches(&ci, desc)) {
819 /* Reposition to allow reading the next core */
820 bcma_erom_seek(sc, next_offset);
824 /* Found; seek to the core's initial offset and provide
825 * the core info to the caller */
826 bcma_erom_seek(sc, core_offset);
833 /* Not found, or a parse error occured */
838 * Read the next core descriptor from the EROM table.
840 * @param erom EROM read state.
841 * @param[out] core On success, will be populated with the parsed core
844 * @retval ENOENT The end of the EROM table was reached.
845 * @retval non-zero Reading or parsing the core descriptor failed.
848 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core)
853 /* Parse CoreDescA */
854 if ((error = bcma_erom_read32(erom, &entry)))
858 if (entry == BCMA_EROM_TABLE_EOF)
861 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
862 EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n",
863 entry, bcma_erom_entry_type_name(entry));
868 core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER);
869 core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID);
871 /* Parse CoreDescB */
872 if ((error = bcma_erom_read32(erom, &entry)))
875 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
879 core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV);
880 core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP);
881 core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP);
882 core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP);
883 core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP);
889 * Read the next master port descriptor from the EROM table.
891 * @param erom EROM read state.
892 * @param[out] mport On success, will be populated with the parsed
895 * @retval non-zero Reading or parsing the descriptor failed.
898 bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport)
903 /* Parse the master port descriptor */
904 if ((error = bcma_erom_read32(erom, &entry)))
907 if (!BCMA_EROM_ENTRY_IS(entry, MPORT))
910 mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID);
911 mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM);
917 * Read the next slave port region descriptor from the EROM table.
919 * @param erom EROM read state.
920 * @param[out] mport On success, will be populated with the parsed
923 * @retval ENOENT The end of the region descriptor table was reached.
924 * @retval non-zero Reading or parsing the descriptor failed.
927 bcma_erom_parse_sport_region(struct bcma_erom *erom,
928 struct bcma_erom_sport_region *region)
934 /* Peek at the region descriptor */
935 if (bcma_erom_peek32(erom, &entry))
938 /* A non-region entry signals the end of the region table */
939 if (!BCMA_EROM_ENTRY_IS(entry, REGION)) {
942 bcma_erom_skip32(erom);
945 region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE);
946 region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
947 region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
948 size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
950 /* If region address is 64-bit, fetch the high bits. */
951 if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) {
952 if ((error = bcma_erom_read32(erom, &entry)))
955 region->base_addr |= ((bhnd_addr_t) entry << 32);
958 /* Parse the region size; it's either encoded as the binary logarithm
959 * of the number of 4K pages (i.e. log2 n), or its encoded as a
960 * 32-bit/64-bit literal value directly following the current entry. */
961 if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
962 if ((error = bcma_erom_read32(erom, &entry)))
965 region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL);
967 if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) {
968 if ((error = bcma_erom_read32(erom, &entry)))
970 region->size |= ((bhnd_size_t) entry << 32);
973 region->size = BCMA_EROM_REGION_SIZE_BASE << size_type;
976 /* Verify that addr+size does not overflow. */
977 if (region->size != 0 &&
978 BHND_ADDR_MAX - (region->size - 1) < region->base_addr)
980 EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n",
981 bcma_erom_entry_type_name(region->region_type),
983 (unsigned long long) region->base_addr,
984 (unsigned long long) region->size);
993 * Convert a bcma_erom_core record to its bhnd_core_info representation.
995 * @param core EROM core record to convert.
996 * @param core_idx The core index of @p core.
997 * @param core_unit The core unit of @p core.
998 * @param[out] info The populated bhnd_core_info representation.
1001 bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx,
1002 int core_unit, struct bhnd_core_info *info)
1004 info->vendor = core->vendor;
1005 info->device = core->device;
1006 info->hwrev = core->rev;
1007 info->core_idx = core_idx;
1008 info->unit = core_unit;
1012 * Map an EROM region type to its corresponding port type.
1014 * @param region_type Region type value.
1015 * @param[out] port_type On success, the corresponding port type.
1018 bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type,
1019 bhnd_port_type *port_type)
1021 switch (region_type) {
1022 case BCMA_EROM_REGION_TYPE_DEVICE:
1023 *port_type = BHND_PORT_DEVICE;
1025 case BCMA_EROM_REGION_TYPE_BRIDGE:
1026 *port_type = BHND_PORT_BRIDGE;
1028 case BCMA_EROM_REGION_TYPE_MWRAP:
1029 case BCMA_EROM_REGION_TYPE_SWRAP:
1030 *port_type = BHND_PORT_AGENT;
1033 EROM_LOG(erom, "unsupported region type %hhx\n",
1040 * Register all MMIO region descriptors for the given slave port.
1042 * @param erom EROM read state.
1043 * @param corecfg Core info to be populated with the scanned port regions.
1044 * @param port_num Port index for which regions will be parsed.
1045 * @param region_type The region type to be parsed.
1046 * @param[out] offset The offset at which to perform parsing. On success, this
1047 * will be updated to point to the next EROM table entry.
1050 bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom,
1051 struct bcma_corecfg *corecfg, bcma_pid_t port_num,
1052 uint8_t region_type)
1054 struct bcma_sport *sport;
1055 struct bcma_sport_list *sports;
1056 bus_size_t entry_offset;
1058 bhnd_port_type port_type;
1062 /* Determine the port type for this region type. */
1063 error = bcma_erom_region_to_port_type(erom, region_type, &port_type);
1067 /* Fetch the list to be populated */
1068 sports = bcma_corecfg_get_port_list(corecfg, port_type);
1070 /* Allocate a new port descriptor */
1071 sport = bcma_alloc_sport(port_num, port_type);
1075 /* Read all address regions defined for this port */
1076 for (bcma_rmid_t region_num = 0;; region_num++) {
1077 struct bcma_map *map;
1078 struct bcma_erom_sport_region spr;
1080 /* No valid port definition should come anywhere near
1082 if (region_num == BCMA_RMID_MAX) {
1083 EROM_LOG(erom, "core%u %s%u: region count reached "
1084 "upper limit of %u\n",
1085 corecfg->core_info.core_idx,
1086 bhnd_port_type_name(port_type),
1087 port_num, BCMA_RMID_MAX);
1093 /* Parse the next region entry. */
1094 entry_offset = bcma_erom_tell(erom);
1095 error = bcma_erom_parse_sport_region(erom, &spr);
1096 if (error && error != ENOENT) {
1097 EROM_LOG(erom, "core%u %s%u.%u: invalid slave port "
1099 corecfg->core_info.core_idx,
1100 bhnd_port_type_name(port_type),
1101 port_num, region_num);
1105 /* ENOENT signals no further region entries */
1106 if (error == ENOENT) {
1107 /* No further entries */
1112 /* A region or type mismatch also signals no further region
1114 if (spr.region_port != port_num ||
1115 spr.region_type != region_type)
1117 /* We don't want to consume this entry */
1118 bcma_erom_seek(erom, entry_offset);
1125 * Create the map entry.
1127 map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT);
1133 map->m_region_num = region_num;
1134 map->m_base = spr.base_addr;
1135 map->m_size = spr.size;
1138 /* Add the region map to the port */
1139 STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link);
1140 sport->sp_num_maps++;
1144 /* Append the new port descriptor on success, or deallocate the
1145 * partially parsed descriptor on failure. */
1147 STAILQ_INSERT_TAIL(sports, sport, sp_link);
1148 } else if (sport != NULL) {
1149 bcma_free_sport(sport);
1156 * Parse the next core entry from the EROM table and produce a bcma_corecfg
1157 * to be owned by the caller.
1159 * @param erom A bcma EROM instance.
1160 * @param[out] result On success, the core's device info. The caller inherits
1161 * ownership of this allocation.
1163 * @return If successful, returns 0. If the end of the EROM table is hit,
1164 * ENOENT will be returned. On error, returns a non-zero error value.
1167 bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
1169 struct bcma_corecfg *cfg;
1170 struct bcma_erom_core core;
1171 uint8_t first_region_type;
1172 bus_size_t initial_offset;
1178 initial_offset = bcma_erom_tell(erom);
1180 /* Parse the next core entry */
1181 if ((error = bcma_erom_parse_core(erom, &core)))
1184 /* Determine the core's index and unit numbers */
1185 bcma_erom_reset(erom);
1188 for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
1189 struct bcma_erom_core prev_core;
1191 /* Parse next core */
1192 error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1196 if ((error = bcma_erom_parse_core(erom, &prev_core)))
1199 /* Is earlier unit? */
1200 if (core.vendor == prev_core.vendor &&
1201 core.device == prev_core.device)
1206 /* Seek to next core */
1207 error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1212 /* We already parsed the core descriptor */
1213 if ((error = bcma_erom_skip_core(erom)))
1216 /* Allocate our corecfg */
1217 cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
1218 core.device, core.rev);
1222 /* These are 5-bit values in the EROM table, and should never be able
1223 * to overflow BCMA_PID_MAX. */
1224 KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
1225 KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
1226 KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
1227 ("unsupported wport count"));
1231 "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
1233 bhnd_vendor_name(core.vendor),
1234 bhnd_find_core_name(core.vendor, core.device),
1235 core.device, core.rev, core_unit);
1238 cfg->num_master_ports = core.num_mport;
1239 cfg->num_dev_ports = 0; /* determined below */
1240 cfg->num_bridge_ports = 0; /* determined blow */
1241 cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
1243 /* Parse Master Port Descriptors */
1244 for (uint8_t i = 0; i < core.num_mport; i++) {
1245 struct bcma_mport *mport;
1246 struct bcma_erom_mport mpd;
1248 /* Parse the master port descriptor */
1249 error = bcma_erom_parse_mport(erom, &mpd);
1253 /* Initialize a new bus mport structure */
1254 mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
1255 if (mport == NULL) {
1260 mport->mp_vid = mpd.port_vid;
1261 mport->mp_num = mpd.port_num;
1264 STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
1269 * Determine whether this is a bridge device; if so, we can
1270 * expect the first sequence of address region descriptors to
1271 * be of EROM_REGION_TYPE_BRIDGE instead of
1272 * BCMA_EROM_REGION_TYPE_DEVICE.
1274 * It's unclear whether this is the correct mechanism by which we
1275 * should detect/handle bridge devices, but this approach matches
1276 * that of (some of) Broadcom's published drivers.
1278 if (core.num_dport > 0) {
1281 if ((error = bcma_erom_peek32(erom, &entry)))
1284 if (BCMA_EROM_ENTRY_IS(entry, REGION) &&
1285 BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
1287 first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
1288 cfg->num_dev_ports = 0;
1289 cfg->num_bridge_ports = core.num_dport;
1291 first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
1292 cfg->num_dev_ports = core.num_dport;
1293 cfg->num_bridge_ports = 0;
1297 /* Device/bridge port descriptors */
1298 for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
1299 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1306 /* Wrapper (aka device management) descriptors (for master ports). */
1307 for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
1308 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1309 BCMA_EROM_REGION_TYPE_MWRAP);
1316 /* Wrapper (aka device management) descriptors (for slave ports). */
1317 for (uint8_t i = 0; i < core.num_swrap; i++) {
1318 /* Slave wrapper ports are not numbered distinctly from master
1322 * Broadcom DDR1/DDR2 Memory Controller
1323 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) ->
1324 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2)
1326 * ARM BP135 AMBA3 AXI to APB Bridge
1327 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) ->
1328 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2)
1332 * (core.num_mwrap > 0) ?
1334 * ((core.vendor == BHND_MFGID_BCM) ? 1 : 0)
1337 sp_num = (core.num_mwrap > 0) ?
1339 ((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
1340 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1341 BCMA_EROM_REGION_TYPE_SWRAP);
1352 bcma_free_corecfg(cfg);
1357 static kobj_method_t bcma_erom_methods[] = {
1358 KOBJMETHOD(bhnd_erom_probe, bcma_erom_probe),
1359 KOBJMETHOD(bhnd_erom_probe_static, bcma_erom_probe_static),
1360 KOBJMETHOD(bhnd_erom_init, bcma_erom_init),
1361 KOBJMETHOD(bhnd_erom_init_static, bcma_erom_init_static),
1362 KOBJMETHOD(bhnd_erom_fini, bcma_erom_fini),
1363 KOBJMETHOD(bhnd_erom_get_core_table, bcma_erom_get_core_table),
1364 KOBJMETHOD(bhnd_erom_free_core_table, bcma_erom_free_core_table),
1365 KOBJMETHOD(bhnd_erom_lookup_core, bcma_erom_lookup_core),
1366 KOBJMETHOD(bhnd_erom_lookup_core_addr, bcma_erom_lookup_core_addr),
1371 BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom));