2 * Copyright (c) 2015-2017 Landon Fuller <landonf@landonf.org>
3 * Copyright (c) 2017 The FreeBSD Foundation
6 * Portions of this software were developed by Landon Fuller
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17 * redistribution must be conditioned upon including a substantially
18 * similar Disclaimer requirement for further binary redistribution.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGES.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/limits.h>
41 #include <sys/systm.h>
43 #include <machine/bus.h>
44 #include <machine/resource.h>
46 #include <dev/bhnd/cores/chipc/chipcreg.h>
48 #include "bcma_eromreg.h"
49 #include "bcma_eromvar.h"
52 * BCMA Enumeration ROM (EROM) Table
54 * Provides auto-discovery of BCMA cores on Broadcom's HND SoC.
56 * The EROM core address can be found at BCMA_CC_EROM_ADDR within the
57 * ChipCommon registers. The table itself is comprised of 32-bit
58 * type-tagged entries, organized into an array of variable-length
59 * core descriptor records.
61 * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF)
65 static const char *bcma_erom_entry_type_name (uint8_t entry);
67 static int bcma_erom_read32(struct bcma_erom *erom,
69 static int bcma_erom_skip32(struct bcma_erom *erom);
71 static int bcma_erom_skip_core(struct bcma_erom *erom);
72 static int bcma_erom_skip_mport(struct bcma_erom *erom);
73 static int bcma_erom_skip_sport_region(struct bcma_erom *erom);
75 static int bcma_erom_seek_next(struct bcma_erom *erom,
77 static int bcma_erom_region_to_port_type(struct bcma_erom *erom,
78 uint8_t region_type, bhnd_port_type *port_type);
81 static int bcma_erom_peek32(struct bcma_erom *erom,
84 static bus_size_t bcma_erom_tell(struct bcma_erom *erom);
85 static void bcma_erom_seek(struct bcma_erom *erom,
87 static void bcma_erom_reset(struct bcma_erom *erom);
89 static int bcma_erom_seek_matching_core(struct bcma_erom *sc,
90 const struct bhnd_core_match *desc,
91 struct bhnd_core_info *core);
93 static int bcma_erom_parse_core(struct bcma_erom *erom,
94 struct bcma_erom_core *core);
96 static int bcma_erom_parse_mport(struct bcma_erom *erom,
97 struct bcma_erom_mport *mport);
99 static int bcma_erom_parse_sport_region(struct bcma_erom *erom,
100 struct bcma_erom_sport_region *region);
102 static void bcma_erom_to_core_info(const struct bcma_erom_core *core,
103 u_int core_idx, int core_unit,
104 struct bhnd_core_info *info);
107 * BCMA EROM per-instance state.
110 struct bhnd_erom obj;
111 device_t dev; /**< parent device, or NULL if none. */
112 struct bhnd_erom_io *eio; /**< bus I/O callbacks */
113 bhnd_size_t offset; /**< current read offset */
116 #define EROM_LOG(erom, fmt, ...) do { \
117 printf("%s erom[0x%llx]: " fmt, __FUNCTION__, \
118 (unsigned long long)(erom->offset), ##__VA_ARGS__); \
121 /** Return the type name for an EROM entry */
123 bcma_erom_entry_type_name (uint8_t entry)
125 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
126 case BCMA_EROM_ENTRY_TYPE_CORE:
128 case BCMA_EROM_ENTRY_TYPE_MPORT:
130 case BCMA_EROM_ENTRY_TYPE_REGION:
137 /* BCMA implementation of BHND_EROM_INIT() */
139 bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
140 struct bhnd_erom_io *eio)
142 struct bcma_erom *sc;
143 bhnd_addr_t table_addr;
146 sc = (struct bcma_erom *)erom;
150 /* Determine erom table address */
151 if (BHND_ADDR_MAX - BCMA_EROM_TABLE_START < cid->enum_addr)
152 return (ENXIO); /* would overflow */
154 table_addr = cid->enum_addr + BCMA_EROM_TABLE_START;
156 /* Try to map the erom table */
157 error = bhnd_erom_io_map(sc->eio, table_addr, BCMA_EROM_TABLE_SIZE);
164 /* BCMA implementation of BHND_EROM_PROBE() */
166 bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio,
167 const struct bhnd_chipid *hint, struct bhnd_chipid *cid)
169 uint32_t idreg, eromptr;
171 /* Hints aren't supported; all BCMA devices have a ChipCommon
176 /* Confirm CHIPC_EROMPTR availability */
177 idreg = bhnd_erom_io_read(eio, CHIPC_ID, 4);
178 if (!BHND_CHIPTYPE_HAS_EROM(CHIPC_GET_BITS(idreg, CHIPC_ID_BUS)))
181 /* Fetch EROM address */
182 eromptr = bhnd_erom_io_read(eio, CHIPC_EROMPTR, 4);
184 /* Parse chip identifier */
185 *cid = bhnd_parse_chipid(idreg, eromptr);
187 /* Verify chip type */
188 switch (cid->chip_type) {
189 case BHND_CHIPTYPE_BCMA:
190 return (BUS_PROBE_DEFAULT);
192 case BHND_CHIPTYPE_BCMA_ALT:
193 case BHND_CHIPTYPE_UBUS:
194 return (BUS_PROBE_GENERIC);
202 bcma_erom_fini(bhnd_erom_t *erom)
204 struct bcma_erom *sc = (struct bcma_erom *)erom;
206 bhnd_erom_io_fini(sc->eio);
210 bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
211 struct bhnd_core_info *core)
213 struct bcma_erom *sc = (struct bcma_erom *)erom;
215 /* Search for the first matching core */
216 return (bcma_erom_seek_matching_core(sc, desc, core));
220 bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
221 bhnd_port_type port_type, u_int port_num, u_int region_num,
222 struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size)
224 struct bcma_erom *sc;
225 struct bcma_erom_core ec;
227 uint8_t region_port, region_type;
231 sc = (struct bcma_erom *)erom;
233 /* Seek to the first matching core and provide the core info
235 if ((error = bcma_erom_seek_matching_core(sc, desc, core)))
238 if ((error = bcma_erom_parse_core(sc, &ec)))
241 /* Skip master ports */
242 for (u_long i = 0; i < ec.num_mport; i++) {
243 if ((error = bcma_erom_skip_mport(sc)))
247 /* Seek to the region block for the given port type */
250 bhnd_port_type p_type;
253 if ((error = bcma_erom_peek32(sc, &entry)))
256 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
259 /* Expected region type? */
260 r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
261 error = bcma_erom_region_to_port_type(sc, r_type, &p_type);
265 if (p_type == port_type) {
270 /* Skip to next entry */
271 if ((error = bcma_erom_skip_sport_region(sc)))
278 /* Found the appropriate port type block; now find the region records
279 * for the given port number */
281 for (u_int i = 0; i <= port_num; i++) {
282 bhnd_port_type p_type;
284 if ((error = bcma_erom_peek32(sc, &entry)))
287 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
290 /* Fetch the type/port of the first region entry */
291 region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
292 region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
294 /* Have we found the region entries for the desired port? */
296 error = bcma_erom_region_to_port_type(sc, region_type,
301 if (p_type == port_type)
307 /* Otherwise, seek to next block of region records */
309 uint8_t next_type, next_port;
311 if ((error = bcma_erom_skip_sport_region(sc)))
314 if ((error = bcma_erom_peek32(sc, &entry)))
317 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
320 next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
321 next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
323 if (next_type != region_type ||
324 next_port != region_port)
332 /* Finally, search for the requested region number */
333 for (u_int i = 0; i <= region_num; i++) {
334 struct bcma_erom_sport_region region;
335 uint8_t next_port, next_type;
337 if ((error = bcma_erom_peek32(sc, &entry)))
340 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
343 /* Check for the end of the region block */
344 next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
345 next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
347 if (next_type != region_type ||
348 next_port != region_port)
351 /* Parse the region */
352 if ((error = bcma_erom_parse_sport_region(sc, ®ion)))
355 /* Is this our target region_num? */
356 if (i == region_num) {
358 *addr = region.base_addr;
369 bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores,
372 struct bcma_erom *sc;
373 struct bhnd_core_info *buffer;
374 bus_size_t initial_offset;
378 sc = (struct bcma_erom *)erom;
381 initial_offset = bcma_erom_tell(sc);
383 /* Determine the core count */
385 for (count = 0, error = 0; !error; count++) {
386 struct bcma_erom_core core;
388 /* Seek to the first readable core entry */
389 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
395 /* Read past the core descriptor */
396 if ((error = bcma_erom_parse_core(sc, &core)))
400 /* Allocate our output buffer */
401 buffer = malloc(sizeof(struct bhnd_core_info) * count, M_BHND,
403 if (buffer == NULL) {
408 /* Parse all core descriptors */
410 for (u_int i = 0; i < count; i++) {
411 struct bcma_erom_core core;
415 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
419 error = bcma_erom_parse_core(sc, &core);
423 /* Determine the unit number */
425 for (u_int j = 0; j < i; j++) {
426 if (buffer[i].vendor == buffer[j].vendor &&
427 buffer[i].device == buffer[j].device)
431 /* Convert to a bhnd info record */
432 bcma_erom_to_core_info(&core, i, unit, &buffer[i]);
441 free(buffer, M_BHND);
444 /* Restore the initial position */
445 bcma_erom_seek(sc, initial_offset);
450 bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores)
456 * Return the current read position.
459 bcma_erom_tell(struct bcma_erom *erom)
461 return (erom->offset);
465 * Seek to an absolute read position.
468 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset)
470 erom->offset = offset;
474 * Read a 32-bit entry value from the EROM table without advancing the
477 * @param erom EROM read state.
478 * @param entry Will contain the read result on success.
480 * @retval ENOENT The end of the EROM table was reached.
481 * @retval non-zero The read could not be completed.
484 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry)
486 if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) {
487 EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n");
491 *entry = bhnd_erom_io_read(erom->eio, erom->offset, 4);
496 * Read a 32-bit entry value from the EROM table.
498 * @param erom EROM read state.
499 * @param entry Will contain the read result on success.
501 * @retval ENOENT The end of the EROM table was reached.
502 * @retval non-zero The read could not be completed.
505 bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry)
509 if ((error = bcma_erom_peek32(erom, entry)) == 0)
516 * Read and discard 32-bit entry value from the EROM table.
518 * @param erom EROM read state.
520 * @retval ENOENT The end of the EROM table was reached.
521 * @retval non-zero The read could not be completed.
524 bcma_erom_skip32(struct bcma_erom *erom)
528 return bcma_erom_read32(erom, &entry);
532 * Read and discard a core descriptor from the EROM table.
534 * @param erom EROM read state.
536 * @retval ENOENT The end of the EROM table was reached.
537 * @retval non-zero The read could not be completed.
540 bcma_erom_skip_core(struct bcma_erom *erom)
542 struct bcma_erom_core core;
543 return (bcma_erom_parse_core(erom, &core));
547 * Read and discard a master port descriptor from the EROM table.
549 * @param erom EROM read state.
551 * @retval ENOENT The end of the EROM table was reached.
552 * @retval non-zero The read could not be completed.
555 bcma_erom_skip_mport(struct bcma_erom *erom)
557 struct bcma_erom_mport mp;
558 return (bcma_erom_parse_mport(erom, &mp));
562 * Read and discard a port region descriptor from the EROM table.
564 * @param erom EROM read state.
566 * @retval ENOENT The end of the EROM table was reached.
567 * @retval non-zero The read could not be completed.
570 bcma_erom_skip_sport_region(struct bcma_erom *erom)
572 struct bcma_erom_sport_region r;
573 return (bcma_erom_parse_sport_region(erom, &r));
577 * Seek to the next entry matching the given EROM entry type.
579 * @param erom EROM read state.
580 * @param etype One of BCMA_EROM_ENTRY_TYPE_CORE,
581 * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION.
583 * @retval ENOENT The end of the EROM table was reached.
584 * @retval non-zero Reading or parsing the descriptor failed.
587 bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype)
592 /* Iterate until we hit an entry matching the requested type. */
593 while (!(error = bcma_erom_peek32(erom, &entry))) {
595 if (entry == BCMA_EROM_TABLE_EOF)
599 if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID))
602 /* Entry type matches? */
603 if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype)
606 /* Skip non-matching entry types. */
607 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
608 case BCMA_EROM_ENTRY_TYPE_CORE:
609 if ((error = bcma_erom_skip_core(erom)))
614 case BCMA_EROM_ENTRY_TYPE_MPORT:
615 if ((error = bcma_erom_skip_mport(erom)))
620 case BCMA_EROM_ENTRY_TYPE_REGION:
621 if ((error = bcma_erom_skip_sport_region(erom)))
626 /* Unknown entry type! */
635 * Return the read position to the start of the EROM table.
637 * @param erom EROM read state.
640 bcma_erom_reset(struct bcma_erom *erom)
646 * Seek to the first core entry matching @p desc.
648 * @param erom EROM read state.
649 * @param desc The core match descriptor.
650 * @param[out] core On success, the matching core info. If the core info
651 * is not desired, a NULL pointer may be provided.
653 * @retval ENOENT The end of the EROM table was reached before @p index was
655 * @retval non-zero Reading or parsing failed.
658 bcma_erom_seek_matching_core(struct bcma_erom *sc,
659 const struct bhnd_core_match *desc, struct bhnd_core_info *core)
661 struct bhnd_core_match imatch;
662 bus_size_t core_offset, next_offset;
665 /* Seek to table start. */
668 /* We can't determine a core's unit number during the initial scan. */
670 imatch.m.match.core_unit = 0;
672 /* Locate the first matching core */
673 for (u_int i = 0; i < UINT_MAX; i++) {
674 struct bcma_erom_core ec;
675 struct bhnd_core_info ci;
677 /* Seek to the next core */
678 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
682 /* Save the core offset */
683 core_offset = bcma_erom_tell(sc);
686 if ((error = bcma_erom_parse_core(sc, &ec)))
689 bcma_erom_to_core_info(&ec, i, 0, &ci);
691 /* Check for initial match */
692 if (!bhnd_core_matches(&ci, &imatch))
695 /* Re-scan preceding cores to determine the unit number. */
696 next_offset = bcma_erom_tell(sc);
698 for (u_int j = 0; j < i; j++) {
700 error = bcma_erom_seek_next(sc,
701 BCMA_EROM_ENTRY_TYPE_CORE);
705 if ((error = bcma_erom_parse_core(sc, &ec)))
708 /* Bump the unit number? */
709 if (ec.vendor == ci.vendor && ec.device == ci.device)
713 /* Check for full match against now-valid unit number */
714 if (!bhnd_core_matches(&ci, desc)) {
715 /* Reposition to allow reading the next core */
716 bcma_erom_seek(sc, next_offset);
720 /* Found; seek to the core's initial offset and provide
721 * the core info to the caller */
722 bcma_erom_seek(sc, core_offset);
729 /* Not found, or a parse error occured */
734 * Read the next core descriptor from the EROM table.
736 * @param erom EROM read state.
737 * @param[out] core On success, will be populated with the parsed core
740 * @retval ENOENT The end of the EROM table was reached.
741 * @retval non-zero Reading or parsing the core descriptor failed.
744 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core)
749 /* Parse CoreDescA */
750 if ((error = bcma_erom_read32(erom, &entry)))
754 if (entry == BCMA_EROM_TABLE_EOF)
757 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
758 EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n",
759 entry, bcma_erom_entry_type_name(entry));
764 core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER);
765 core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID);
767 /* Parse CoreDescB */
768 if ((error = bcma_erom_read32(erom, &entry)))
771 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
775 core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV);
776 core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP);
777 core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP);
778 core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP);
779 core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP);
785 * Read the next master port descriptor from the EROM table.
787 * @param erom EROM read state.
788 * @param[out] mport On success, will be populated with the parsed
791 * @retval non-zero Reading or parsing the descriptor failed.
794 bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport)
799 /* Parse the master port descriptor */
800 if ((error = bcma_erom_read32(erom, &entry)))
803 if (!BCMA_EROM_ENTRY_IS(entry, MPORT))
806 mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID);
807 mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM);
813 * Read the next slave port region descriptor from the EROM table.
815 * @param erom EROM read state.
816 * @param[out] mport On success, will be populated with the parsed
819 * @retval ENOENT The end of the region descriptor table was reached.
820 * @retval non-zero Reading or parsing the descriptor failed.
823 bcma_erom_parse_sport_region(struct bcma_erom *erom,
824 struct bcma_erom_sport_region *region)
830 /* Peek at the region descriptor */
831 if (bcma_erom_peek32(erom, &entry))
834 /* A non-region entry signals the end of the region table */
835 if (!BCMA_EROM_ENTRY_IS(entry, REGION)) {
838 bcma_erom_skip32(erom);
841 region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE);
842 region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
843 region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
844 size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
846 /* If region address is 64-bit, fetch the high bits. */
847 if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) {
848 if ((error = bcma_erom_read32(erom, &entry)))
851 region->base_addr |= ((bhnd_addr_t) entry << 32);
854 /* Parse the region size; it's either encoded as the binary logarithm
855 * of the number of 4K pages (i.e. log2 n), or its encoded as a
856 * 32-bit/64-bit literal value directly following the current entry. */
857 if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
858 if ((error = bcma_erom_read32(erom, &entry)))
861 region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL);
863 if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) {
864 if ((error = bcma_erom_read32(erom, &entry)))
866 region->size |= ((bhnd_size_t) entry << 32);
869 region->size = BCMA_EROM_REGION_SIZE_BASE << size_type;
872 /* Verify that addr+size does not overflow. */
873 if (region->size != 0 &&
874 BHND_ADDR_MAX - (region->size - 1) < region->base_addr)
876 EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n",
877 bcma_erom_entry_type_name(region->region_type),
879 (unsigned long long) region->base_addr,
880 (unsigned long long) region->size);
889 * Convert a bcma_erom_core record to its bhnd_core_info representation.
891 * @param core EROM core record to convert.
892 * @param core_idx The core index of @p core.
893 * @param core_unit The core unit of @p core.
894 * @param[out] info The populated bhnd_core_info representation.
897 bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx,
898 int core_unit, struct bhnd_core_info *info)
900 info->vendor = core->vendor;
901 info->device = core->device;
902 info->hwrev = core->rev;
903 info->core_idx = core_idx;
904 info->unit = core_unit;
908 * Map an EROM region type to its corresponding port type.
910 * @param region_type Region type value.
911 * @param[out] port_type On success, the corresponding port type.
914 bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type,
915 bhnd_port_type *port_type)
917 switch (region_type) {
918 case BCMA_EROM_REGION_TYPE_DEVICE:
919 *port_type = BHND_PORT_DEVICE;
921 case BCMA_EROM_REGION_TYPE_BRIDGE:
922 *port_type = BHND_PORT_BRIDGE;
924 case BCMA_EROM_REGION_TYPE_MWRAP:
925 case BCMA_EROM_REGION_TYPE_SWRAP:
926 *port_type = BHND_PORT_AGENT;
929 EROM_LOG(erom, "unsupported region type %hhx\n",
936 * Register all MMIO region descriptors for the given slave port.
938 * @param erom EROM read state.
939 * @param corecfg Core info to be populated with the scanned port regions.
940 * @param port_num Port index for which regions will be parsed.
941 * @param region_type The region type to be parsed.
942 * @param[out] offset The offset at which to perform parsing. On success, this
943 * will be updated to point to the next EROM table entry.
946 bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom,
947 struct bcma_corecfg *corecfg, bcma_pid_t port_num,
950 struct bcma_sport *sport;
951 struct bcma_sport_list *sports;
952 bus_size_t entry_offset;
954 bhnd_port_type port_type;
958 /* Determine the port type for this region type. */
959 error = bcma_erom_region_to_port_type(erom, region_type, &port_type);
963 /* Fetch the list to be populated */
964 sports = bcma_corecfg_get_port_list(corecfg, port_type);
966 /* Allocate a new port descriptor */
967 sport = bcma_alloc_sport(port_num, port_type);
971 /* Read all address regions defined for this port */
972 for (bcma_rmid_t region_num = 0;; region_num++) {
973 struct bcma_map *map;
974 struct bcma_erom_sport_region spr;
976 /* No valid port definition should come anywhere near
978 if (region_num == BCMA_RMID_MAX) {
979 EROM_LOG(erom, "core%u %s%u: region count reached "
980 "upper limit of %u\n",
981 corecfg->core_info.core_idx,
982 bhnd_port_type_name(port_type),
983 port_num, BCMA_RMID_MAX);
989 /* Parse the next region entry. */
990 entry_offset = bcma_erom_tell(erom);
991 error = bcma_erom_parse_sport_region(erom, &spr);
992 if (error && error != ENOENT) {
993 EROM_LOG(erom, "core%u %s%u.%u: invalid slave port "
995 corecfg->core_info.core_idx,
996 bhnd_port_type_name(port_type),
997 port_num, region_num);
1001 /* ENOENT signals no further region entries */
1002 if (error == ENOENT) {
1003 /* No further entries */
1008 /* A region or type mismatch also signals no further region
1010 if (spr.region_port != port_num ||
1011 spr.region_type != region_type)
1013 /* We don't want to consume this entry */
1014 bcma_erom_seek(erom, entry_offset);
1021 * Create the map entry.
1023 map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT);
1029 map->m_region_num = region_num;
1030 map->m_base = spr.base_addr;
1031 map->m_size = spr.size;
1034 /* Add the region map to the port */
1035 STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link);
1036 sport->sp_num_maps++;
1040 /* Append the new port descriptor on success, or deallocate the
1041 * partially parsed descriptor on failure. */
1043 STAILQ_INSERT_TAIL(sports, sport, sp_link);
1044 } else if (sport != NULL) {
1045 bcma_free_sport(sport);
1052 * Parse the next core entry from the EROM table and produce a bcma_corecfg
1053 * to be owned by the caller.
1055 * @param erom A bcma EROM instance.
1056 * @param[out] result On success, the core's device info. The caller inherits
1057 * ownership of this allocation.
1059 * @return If successful, returns 0. If the end of the EROM table is hit,
1060 * ENOENT will be returned. On error, returns a non-zero error value.
1063 bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
1065 struct bcma_corecfg *cfg;
1066 struct bcma_erom_core core;
1067 uint8_t first_region_type;
1068 bus_size_t initial_offset;
1074 initial_offset = bcma_erom_tell(erom);
1076 /* Parse the next core entry */
1077 if ((error = bcma_erom_parse_core(erom, &core)))
1080 /* Determine the core's index and unit numbers */
1081 bcma_erom_reset(erom);
1084 for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
1085 struct bcma_erom_core prev_core;
1087 /* Parse next core */
1088 error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1092 if ((error = bcma_erom_parse_core(erom, &prev_core)))
1095 /* Is earlier unit? */
1096 if (core.vendor == prev_core.vendor &&
1097 core.device == prev_core.device)
1102 /* Seek to next core */
1103 error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1108 /* We already parsed the core descriptor */
1109 if ((error = bcma_erom_skip_core(erom)))
1112 /* Allocate our corecfg */
1113 cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
1114 core.device, core.rev);
1118 /* These are 5-bit values in the EROM table, and should never be able
1119 * to overflow BCMA_PID_MAX. */
1120 KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
1121 KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
1122 KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
1123 ("unsupported wport count"));
1127 "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
1129 bhnd_vendor_name(core.vendor),
1130 bhnd_find_core_name(core.vendor, core.device),
1131 core.device, core.rev, core_unit);
1134 cfg->num_master_ports = core.num_mport;
1135 cfg->num_dev_ports = 0; /* determined below */
1136 cfg->num_bridge_ports = 0; /* determined blow */
1137 cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
1139 /* Parse Master Port Descriptors */
1140 for (uint8_t i = 0; i < core.num_mport; i++) {
1141 struct bcma_mport *mport;
1142 struct bcma_erom_mport mpd;
1144 /* Parse the master port descriptor */
1145 error = bcma_erom_parse_mport(erom, &mpd);
1149 /* Initialize a new bus mport structure */
1150 mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
1151 if (mport == NULL) {
1156 mport->mp_vid = mpd.port_vid;
1157 mport->mp_num = mpd.port_num;
1160 STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
1165 * Determine whether this is a bridge device; if so, we can
1166 * expect the first sequence of address region descriptors to
1167 * be of EROM_REGION_TYPE_BRIDGE instead of
1168 * BCMA_EROM_REGION_TYPE_DEVICE.
1170 * It's unclear whether this is the correct mechanism by which we
1171 * should detect/handle bridge devices, but this approach matches
1172 * that of (some of) Broadcom's published drivers.
1174 if (core.num_dport > 0) {
1177 if ((error = bcma_erom_peek32(erom, &entry)))
1180 if (BCMA_EROM_ENTRY_IS(entry, REGION) &&
1181 BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
1183 first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
1184 cfg->num_dev_ports = 0;
1185 cfg->num_bridge_ports = core.num_dport;
1187 first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
1188 cfg->num_dev_ports = core.num_dport;
1189 cfg->num_bridge_ports = 0;
1193 /* Device/bridge port descriptors */
1194 for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
1195 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1202 /* Wrapper (aka device management) descriptors (for master ports). */
1203 for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
1204 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1205 BCMA_EROM_REGION_TYPE_MWRAP);
1212 /* Wrapper (aka device management) descriptors (for slave ports). */
1213 for (uint8_t i = 0; i < core.num_swrap; i++) {
1214 /* Slave wrapper ports are not numbered distinctly from master
1218 * Broadcom DDR1/DDR2 Memory Controller
1219 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) ->
1220 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2)
1222 * ARM BP135 AMBA3 AXI to APB Bridge
1223 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) ->
1224 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2)
1228 * (core.num_mwrap > 0) ?
1230 * ((core.vendor == BHND_MFGID_BCM) ? 1 : 0)
1233 sp_num = (core.num_mwrap > 0) ?
1235 ((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
1236 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1237 BCMA_EROM_REGION_TYPE_SWRAP);
1244 * Seek to the next core entry (if any), skipping any dangling/invalid
1247 * On the BCM4706, the EROM entry for the memory controller core
1248 * (0x4bf/0x52E) contains a dangling/unused slave wrapper port region
1251 if ((error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) {
1252 if (error != ENOENT)
1261 bcma_free_corecfg(cfg);
1267 bcma_erom_dump(bhnd_erom_t *erom)
1269 struct bcma_erom *sc;
1273 sc = (struct bcma_erom *)erom;
1275 bcma_erom_reset(sc);
1277 while (!(error = bcma_erom_read32(sc, &entry))) {
1279 if (entry == BCMA_EROM_TABLE_EOF) {
1280 EROM_LOG(sc, "EOF\n");
1285 if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) {
1286 EROM_LOG(sc, "invalid EROM entry %#x\n", entry);
1290 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
1291 case BCMA_EROM_ENTRY_TYPE_CORE: {
1293 EROM_LOG(sc, "coreA (0x%x)\n", entry);
1294 EROM_LOG(sc, "\tdesigner:\t0x%x\n",
1295 BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER));
1296 EROM_LOG(sc, "\tid:\t\t0x%x\n",
1297 BCMA_EROM_GET_ATTR(entry, COREA_ID));
1298 EROM_LOG(sc, "\tclass:\t\t0x%x\n",
1299 BCMA_EROM_GET_ATTR(entry, COREA_CLASS));
1302 if ((error = bcma_erom_read32(sc, &entry))) {
1303 EROM_LOG(sc, "error reading CoreDescB: %d\n",
1308 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
1309 EROM_LOG(sc, "invalid core descriptor; found "
1310 "unexpected entry %#x (type=%s)\n",
1311 entry, bcma_erom_entry_type_name(entry));
1315 EROM_LOG(sc, "coreB (0x%x)\n", entry);
1316 EROM_LOG(sc, "\trev:\t0x%x\n",
1317 BCMA_EROM_GET_ATTR(entry, COREB_REV));
1318 EROM_LOG(sc, "\tnummp:\t0x%x\n",
1319 BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP));
1320 EROM_LOG(sc, "\tnumdp:\t0x%x\n",
1321 BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP));
1322 EROM_LOG(sc, "\tnumwmp:\t0x%x\n",
1323 BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1324 EROM_LOG(sc, "\tnumwsp:\t0x%x\n",
1325 BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1329 case BCMA_EROM_ENTRY_TYPE_MPORT:
1330 EROM_LOG(sc, "\tmport 0x%x\n", entry);
1331 EROM_LOG(sc, "\t\tport:\t0x%x\n",
1332 BCMA_EROM_GET_ATTR(entry, MPORT_NUM));
1333 EROM_LOG(sc, "\t\tid:\t\t0x%x\n",
1334 BCMA_EROM_GET_ATTR(entry, MPORT_ID));
1337 case BCMA_EROM_ENTRY_TYPE_REGION: {
1341 addr64 = (BCMA_EROM_GET_ATTR(entry, REGION_64BIT) != 0);
1342 size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
1344 EROM_LOG(sc, "\tregion 0x%x:\n", entry);
1345 EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1346 addr64 ? "baselo" : "base",
1347 BCMA_EROM_GET_ATTR(entry, REGION_BASE));
1348 EROM_LOG(sc, "\t\tport:\t0x%x\n",
1349 BCMA_EROM_GET_ATTR(entry, REGION_PORT));
1350 EROM_LOG(sc, "\t\ttype:\t0x%x\n",
1351 BCMA_EROM_GET_ATTR(entry, REGION_TYPE));
1352 EROM_LOG(sc, "\t\tsztype:\t0x%hhx\n", size_type);
1354 /* Read the base address high bits */
1356 if ((error = bcma_erom_read32(sc, &entry))) {
1357 EROM_LOG(sc, "error reading region "
1358 "base address high bits %d\n",
1363 EROM_LOG(sc, "\t\tbasehi:\t0x%x\n", entry);
1366 /* Read extended size descriptor */
1367 if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
1370 if ((error = bcma_erom_read32(sc, &entry))) {
1371 EROM_LOG(sc, "error reading region "
1372 "size descriptor %d\n",
1377 if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT))
1382 EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1383 size64 ? "sizelo" : "size",
1384 BCMA_EROM_GET_ATTR(entry, RSIZE_VAL));
1387 error = bcma_erom_read32(sc, &entry);
1389 EROM_LOG(sc, "error reading "
1390 "region size high bits: "
1395 EROM_LOG(sc, "\t\tsizehi:\t0x%x\n",
1403 EROM_LOG(sc, "unknown EROM entry 0x%x (type=%s)\n",
1404 entry, bcma_erom_entry_type_name(entry));
1409 if (error == ENOENT)
1410 EROM_LOG(sc, "BCMA EROM table missing terminating EOF\n");
1412 EROM_LOG(sc, "EROM read failed: %d\n", error);
1417 static kobj_method_t bcma_erom_methods[] = {
1418 KOBJMETHOD(bhnd_erom_probe, bcma_erom_probe),
1419 KOBJMETHOD(bhnd_erom_init, bcma_erom_init),
1420 KOBJMETHOD(bhnd_erom_fini, bcma_erom_fini),
1421 KOBJMETHOD(bhnd_erom_get_core_table, bcma_erom_get_core_table),
1422 KOBJMETHOD(bhnd_erom_free_core_table, bcma_erom_free_core_table),
1423 KOBJMETHOD(bhnd_erom_lookup_core, bcma_erom_lookup_core),
1424 KOBJMETHOD(bhnd_erom_lookup_core_addr, bcma_erom_lookup_core_addr),
1425 KOBJMETHOD(bhnd_erom_dump, bcma_erom_dump),
1430 BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom));