2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2015-2017 Landon Fuller <landonf@landonf.org>
5 * Copyright (c) 2017 The FreeBSD Foundation
8 * Portions of this software were developed by Landon Fuller
9 * under sponsorship from the FreeBSD Foundation.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
19 * redistribution must be conditioned upon including a substantially
20 * similar Disclaimer requirement for further binary redistribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
26 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
27 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
28 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
31 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGES.
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
39 #include <sys/param.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
43 #include <sys/systm.h>
45 #include <machine/bus.h>
46 #include <machine/resource.h>
48 #include <dev/bhnd/cores/chipc/chipcreg.h>
50 #include "bcma_eromreg.h"
51 #include "bcma_eromvar.h"
54 * BCMA Enumeration ROM (EROM) Table
56 * Provides auto-discovery of BCMA cores on Broadcom's HND SoC.
58 * The EROM core address can be found at BCMA_CC_EROM_ADDR within the
59 * ChipCommon registers. The table itself is comprised of 32-bit
60 * type-tagged entries, organized into an array of variable-length
61 * core descriptor records.
63 * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF)
67 static const char *bcma_erom_entry_type_name (uint8_t entry);
69 static int bcma_erom_read32(struct bcma_erom *erom,
71 static int bcma_erom_skip32(struct bcma_erom *erom);
73 static int bcma_erom_skip_core(struct bcma_erom *erom);
74 static int bcma_erom_skip_mport(struct bcma_erom *erom);
75 static int bcma_erom_skip_sport_region(struct bcma_erom *erom);
77 static int bcma_erom_seek_next(struct bcma_erom *erom,
79 static int bcma_erom_region_to_port_type(struct bcma_erom *erom,
80 uint8_t region_type, bhnd_port_type *port_type);
83 static int bcma_erom_peek32(struct bcma_erom *erom,
86 static bus_size_t bcma_erom_tell(struct bcma_erom *erom);
87 static void bcma_erom_seek(struct bcma_erom *erom,
89 static void bcma_erom_reset(struct bcma_erom *erom);
91 static int bcma_erom_seek_matching_core(struct bcma_erom *sc,
92 const struct bhnd_core_match *desc,
93 struct bhnd_core_info *core);
95 static int bcma_erom_parse_core(struct bcma_erom *erom,
96 struct bcma_erom_core *core);
98 static int bcma_erom_parse_mport(struct bcma_erom *erom,
99 struct bcma_erom_mport *mport);
101 static int bcma_erom_parse_sport_region(struct bcma_erom *erom,
102 struct bcma_erom_sport_region *region);
104 static void bcma_erom_to_core_info(const struct bcma_erom_core *core,
105 u_int core_idx, int core_unit,
106 struct bhnd_core_info *info);
109 * BCMA EROM per-instance state.
112 struct bhnd_erom obj;
113 device_t dev; /**< parent device, or NULL if none. */
114 struct bhnd_erom_io *eio; /**< bus I/O callbacks */
115 bhnd_size_t offset; /**< current read offset */
118 #define EROM_LOG(erom, fmt, ...) do { \
119 printf("%s erom[0x%llx]: " fmt, __FUNCTION__, \
120 (unsigned long long)(erom->offset), ##__VA_ARGS__); \
123 /** Return the type name for an EROM entry */
125 bcma_erom_entry_type_name (uint8_t entry)
127 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
128 case BCMA_EROM_ENTRY_TYPE_CORE:
130 case BCMA_EROM_ENTRY_TYPE_MPORT:
132 case BCMA_EROM_ENTRY_TYPE_REGION:
139 /* BCMA implementation of BHND_EROM_INIT() */
141 bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
142 struct bhnd_erom_io *eio)
144 struct bcma_erom *sc;
145 bhnd_addr_t table_addr;
148 sc = (struct bcma_erom *)erom;
152 /* Determine erom table address */
153 if (BHND_ADDR_MAX - BCMA_EROM_TABLE_START < cid->enum_addr)
154 return (ENXIO); /* would overflow */
156 table_addr = cid->enum_addr + BCMA_EROM_TABLE_START;
158 /* Try to map the erom table */
159 error = bhnd_erom_io_map(sc->eio, table_addr, BCMA_EROM_TABLE_SIZE);
166 /* BCMA implementation of BHND_EROM_PROBE() */
168 bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio,
169 const struct bhnd_chipid *hint, struct bhnd_chipid *cid)
171 uint32_t idreg, eromptr;
173 /* Hints aren't supported; all BCMA devices have a ChipCommon
178 /* Confirm CHIPC_EROMPTR availability */
179 idreg = bhnd_erom_io_read(eio, CHIPC_ID, 4);
180 if (!BHND_CHIPTYPE_HAS_EROM(CHIPC_GET_BITS(idreg, CHIPC_ID_BUS)))
183 /* Fetch EROM address */
184 eromptr = bhnd_erom_io_read(eio, CHIPC_EROMPTR, 4);
186 /* Parse chip identifier */
187 *cid = bhnd_parse_chipid(idreg, eromptr);
189 /* Verify chip type */
190 switch (cid->chip_type) {
191 case BHND_CHIPTYPE_BCMA:
192 return (BUS_PROBE_DEFAULT);
194 case BHND_CHIPTYPE_BCMA_ALT:
195 case BHND_CHIPTYPE_UBUS:
196 return (BUS_PROBE_GENERIC);
204 bcma_erom_fini(bhnd_erom_t *erom)
206 struct bcma_erom *sc = (struct bcma_erom *)erom;
208 bhnd_erom_io_fini(sc->eio);
212 bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
213 struct bhnd_core_info *core)
215 struct bcma_erom *sc = (struct bcma_erom *)erom;
217 /* Search for the first matching core */
218 return (bcma_erom_seek_matching_core(sc, desc, core));
222 bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
223 bhnd_port_type port_type, u_int port_num, u_int region_num,
224 struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size)
226 struct bcma_erom *sc;
227 struct bcma_erom_core ec;
229 uint8_t region_port, region_type;
233 sc = (struct bcma_erom *)erom;
235 /* Seek to the first matching core and provide the core info
237 if ((error = bcma_erom_seek_matching_core(sc, desc, core)))
240 if ((error = bcma_erom_parse_core(sc, &ec)))
243 /* Skip master ports */
244 for (u_long i = 0; i < ec.num_mport; i++) {
245 if ((error = bcma_erom_skip_mport(sc)))
249 /* Seek to the region block for the given port type */
252 bhnd_port_type p_type;
255 if ((error = bcma_erom_peek32(sc, &entry)))
258 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
261 /* Expected region type? */
262 r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
263 error = bcma_erom_region_to_port_type(sc, r_type, &p_type);
267 if (p_type == port_type) {
272 /* Skip to next entry */
273 if ((error = bcma_erom_skip_sport_region(sc)))
280 /* Found the appropriate port type block; now find the region records
281 * for the given port number */
283 for (u_int i = 0; i <= port_num; i++) {
284 bhnd_port_type p_type;
286 if ((error = bcma_erom_peek32(sc, &entry)))
289 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
292 /* Fetch the type/port of the first region entry */
293 region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
294 region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
296 /* Have we found the region entries for the desired port? */
298 error = bcma_erom_region_to_port_type(sc, region_type,
303 if (p_type == port_type)
309 /* Otherwise, seek to next block of region records */
311 uint8_t next_type, next_port;
313 if ((error = bcma_erom_skip_sport_region(sc)))
316 if ((error = bcma_erom_peek32(sc, &entry)))
319 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
322 next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
323 next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
325 if (next_type != region_type ||
326 next_port != region_port)
334 /* Finally, search for the requested region number */
335 for (u_int i = 0; i <= region_num; i++) {
336 struct bcma_erom_sport_region region;
337 uint8_t next_port, next_type;
339 if ((error = bcma_erom_peek32(sc, &entry)))
342 if (!BCMA_EROM_ENTRY_IS(entry, REGION))
345 /* Check for the end of the region block */
346 next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
347 next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
349 if (next_type != region_type ||
350 next_port != region_port)
353 /* Parse the region */
354 if ((error = bcma_erom_parse_sport_region(sc, ®ion)))
357 /* Is this our target region_num? */
358 if (i == region_num) {
360 *addr = region.base_addr;
371 bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores,
374 struct bcma_erom *sc;
375 struct bhnd_core_info *buffer;
376 bus_size_t initial_offset;
380 sc = (struct bcma_erom *)erom;
383 initial_offset = bcma_erom_tell(sc);
385 /* Determine the core count */
387 for (count = 0, error = 0; !error; count++) {
388 struct bcma_erom_core core;
390 /* Seek to the first readable core entry */
391 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
397 /* Read past the core descriptor */
398 if ((error = bcma_erom_parse_core(sc, &core)))
402 /* Allocate our output buffer */
403 buffer = malloc(sizeof(struct bhnd_core_info) * count, M_BHND,
405 if (buffer == NULL) {
410 /* Parse all core descriptors */
412 for (u_int i = 0; i < count; i++) {
413 struct bcma_erom_core core;
417 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
421 error = bcma_erom_parse_core(sc, &core);
425 /* Determine the unit number */
427 for (u_int j = 0; j < i; j++) {
428 if (buffer[i].vendor == buffer[j].vendor &&
429 buffer[i].device == buffer[j].device)
433 /* Convert to a bhnd info record */
434 bcma_erom_to_core_info(&core, i, unit, &buffer[i]);
443 free(buffer, M_BHND);
446 /* Restore the initial position */
447 bcma_erom_seek(sc, initial_offset);
452 bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores)
458 * Return the current read position.
461 bcma_erom_tell(struct bcma_erom *erom)
463 return (erom->offset);
467 * Seek to an absolute read position.
470 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset)
472 erom->offset = offset;
476 * Read a 32-bit entry value from the EROM table without advancing the
479 * @param erom EROM read state.
480 * @param entry Will contain the read result on success.
482 * @retval ENOENT The end of the EROM table was reached.
483 * @retval non-zero The read could not be completed.
486 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry)
488 if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) {
489 EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n");
493 *entry = bhnd_erom_io_read(erom->eio, erom->offset, 4);
498 * Read a 32-bit entry value from the EROM table.
500 * @param erom EROM read state.
501 * @param entry Will contain the read result on success.
503 * @retval ENOENT The end of the EROM table was reached.
504 * @retval non-zero The read could not be completed.
507 bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry)
511 if ((error = bcma_erom_peek32(erom, entry)) == 0)
518 * Read and discard 32-bit entry value from the EROM table.
520 * @param erom EROM read state.
522 * @retval ENOENT The end of the EROM table was reached.
523 * @retval non-zero The read could not be completed.
526 bcma_erom_skip32(struct bcma_erom *erom)
530 return bcma_erom_read32(erom, &entry);
534 * Read and discard a core descriptor from the EROM table.
536 * @param erom EROM read state.
538 * @retval ENOENT The end of the EROM table was reached.
539 * @retval non-zero The read could not be completed.
542 bcma_erom_skip_core(struct bcma_erom *erom)
544 struct bcma_erom_core core;
545 return (bcma_erom_parse_core(erom, &core));
549 * Read and discard a master port descriptor from the EROM table.
551 * @param erom EROM read state.
553 * @retval ENOENT The end of the EROM table was reached.
554 * @retval non-zero The read could not be completed.
557 bcma_erom_skip_mport(struct bcma_erom *erom)
559 struct bcma_erom_mport mp;
560 return (bcma_erom_parse_mport(erom, &mp));
564 * Read and discard a port region descriptor from the EROM table.
566 * @param erom EROM read state.
568 * @retval ENOENT The end of the EROM table was reached.
569 * @retval non-zero The read could not be completed.
572 bcma_erom_skip_sport_region(struct bcma_erom *erom)
574 struct bcma_erom_sport_region r;
575 return (bcma_erom_parse_sport_region(erom, &r));
579 * Seek to the next entry matching the given EROM entry type.
581 * @param erom EROM read state.
582 * @param etype One of BCMA_EROM_ENTRY_TYPE_CORE,
583 * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION.
585 * @retval ENOENT The end of the EROM table was reached.
586 * @retval non-zero Reading or parsing the descriptor failed.
589 bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype)
594 /* Iterate until we hit an entry matching the requested type. */
595 while (!(error = bcma_erom_peek32(erom, &entry))) {
597 if (entry == BCMA_EROM_TABLE_EOF)
601 if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID))
604 /* Entry type matches? */
605 if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype)
608 /* Skip non-matching entry types. */
609 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
610 case BCMA_EROM_ENTRY_TYPE_CORE:
611 if ((error = bcma_erom_skip_core(erom)))
616 case BCMA_EROM_ENTRY_TYPE_MPORT:
617 if ((error = bcma_erom_skip_mport(erom)))
622 case BCMA_EROM_ENTRY_TYPE_REGION:
623 if ((error = bcma_erom_skip_sport_region(erom)))
628 /* Unknown entry type! */
637 * Return the read position to the start of the EROM table.
639 * @param erom EROM read state.
642 bcma_erom_reset(struct bcma_erom *erom)
648 * Seek to the first core entry matching @p desc.
650 * @param erom EROM read state.
651 * @param desc The core match descriptor.
652 * @param[out] core On success, the matching core info. If the core info
653 * is not desired, a NULL pointer may be provided.
655 * @retval ENOENT The end of the EROM table was reached before @p index was
657 * @retval non-zero Reading or parsing failed.
660 bcma_erom_seek_matching_core(struct bcma_erom *sc,
661 const struct bhnd_core_match *desc, struct bhnd_core_info *core)
663 struct bhnd_core_match imatch;
664 bus_size_t core_offset, next_offset;
667 /* Seek to table start. */
670 /* We can't determine a core's unit number during the initial scan. */
672 imatch.m.match.core_unit = 0;
674 /* Locate the first matching core */
675 for (u_int i = 0; i < UINT_MAX; i++) {
676 struct bcma_erom_core ec;
677 struct bhnd_core_info ci;
679 /* Seek to the next core */
680 error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
684 /* Save the core offset */
685 core_offset = bcma_erom_tell(sc);
688 if ((error = bcma_erom_parse_core(sc, &ec)))
691 bcma_erom_to_core_info(&ec, i, 0, &ci);
693 /* Check for initial match */
694 if (!bhnd_core_matches(&ci, &imatch))
697 /* Re-scan preceding cores to determine the unit number. */
698 next_offset = bcma_erom_tell(sc);
700 for (u_int j = 0; j < i; j++) {
702 error = bcma_erom_seek_next(sc,
703 BCMA_EROM_ENTRY_TYPE_CORE);
707 if ((error = bcma_erom_parse_core(sc, &ec)))
710 /* Bump the unit number? */
711 if (ec.vendor == ci.vendor && ec.device == ci.device)
715 /* Check for full match against now-valid unit number */
716 if (!bhnd_core_matches(&ci, desc)) {
717 /* Reposition to allow reading the next core */
718 bcma_erom_seek(sc, next_offset);
722 /* Found; seek to the core's initial offset and provide
723 * the core info to the caller */
724 bcma_erom_seek(sc, core_offset);
731 /* Not found, or a parse error occured */
736 * Read the next core descriptor from the EROM table.
738 * @param erom EROM read state.
739 * @param[out] core On success, will be populated with the parsed core
742 * @retval ENOENT The end of the EROM table was reached.
743 * @retval non-zero Reading or parsing the core descriptor failed.
746 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core)
751 /* Parse CoreDescA */
752 if ((error = bcma_erom_read32(erom, &entry)))
756 if (entry == BCMA_EROM_TABLE_EOF)
759 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
760 EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n",
761 entry, bcma_erom_entry_type_name(entry));
766 core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER);
767 core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID);
769 /* Parse CoreDescB */
770 if ((error = bcma_erom_read32(erom, &entry)))
773 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
777 core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV);
778 core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP);
779 core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP);
780 core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP);
781 core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP);
787 * Read the next master port descriptor from the EROM table.
789 * @param erom EROM read state.
790 * @param[out] mport On success, will be populated with the parsed
793 * @retval non-zero Reading or parsing the descriptor failed.
796 bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport)
801 /* Parse the master port descriptor */
802 if ((error = bcma_erom_read32(erom, &entry)))
805 if (!BCMA_EROM_ENTRY_IS(entry, MPORT))
808 mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID);
809 mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM);
815 * Read the next slave port region descriptor from the EROM table.
817 * @param erom EROM read state.
818 * @param[out] mport On success, will be populated with the parsed
821 * @retval ENOENT The end of the region descriptor table was reached.
822 * @retval non-zero Reading or parsing the descriptor failed.
825 bcma_erom_parse_sport_region(struct bcma_erom *erom,
826 struct bcma_erom_sport_region *region)
832 /* Peek at the region descriptor */
833 if (bcma_erom_peek32(erom, &entry))
836 /* A non-region entry signals the end of the region table */
837 if (!BCMA_EROM_ENTRY_IS(entry, REGION)) {
840 bcma_erom_skip32(erom);
843 region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE);
844 region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
845 region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
846 size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
848 /* If region address is 64-bit, fetch the high bits. */
849 if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) {
850 if ((error = bcma_erom_read32(erom, &entry)))
853 region->base_addr |= ((bhnd_addr_t) entry << 32);
856 /* Parse the region size; it's either encoded as the binary logarithm
857 * of the number of 4K pages (i.e. log2 n), or its encoded as a
858 * 32-bit/64-bit literal value directly following the current entry. */
859 if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
860 if ((error = bcma_erom_read32(erom, &entry)))
863 region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL);
865 if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) {
866 if ((error = bcma_erom_read32(erom, &entry)))
868 region->size |= ((bhnd_size_t) entry << 32);
871 region->size = BCMA_EROM_REGION_SIZE_BASE << size_type;
874 /* Verify that addr+size does not overflow. */
875 if (region->size != 0 &&
876 BHND_ADDR_MAX - (region->size - 1) < region->base_addr)
878 EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n",
879 bcma_erom_entry_type_name(region->region_type),
881 (unsigned long long) region->base_addr,
882 (unsigned long long) region->size);
891 * Convert a bcma_erom_core record to its bhnd_core_info representation.
893 * @param core EROM core record to convert.
894 * @param core_idx The core index of @p core.
895 * @param core_unit The core unit of @p core.
896 * @param[out] info The populated bhnd_core_info representation.
899 bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx,
900 int core_unit, struct bhnd_core_info *info)
902 info->vendor = core->vendor;
903 info->device = core->device;
904 info->hwrev = core->rev;
905 info->core_idx = core_idx;
906 info->unit = core_unit;
910 * Map an EROM region type to its corresponding port type.
912 * @param region_type Region type value.
913 * @param[out] port_type On success, the corresponding port type.
916 bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type,
917 bhnd_port_type *port_type)
919 switch (region_type) {
920 case BCMA_EROM_REGION_TYPE_DEVICE:
921 *port_type = BHND_PORT_DEVICE;
923 case BCMA_EROM_REGION_TYPE_BRIDGE:
924 *port_type = BHND_PORT_BRIDGE;
926 case BCMA_EROM_REGION_TYPE_MWRAP:
927 case BCMA_EROM_REGION_TYPE_SWRAP:
928 *port_type = BHND_PORT_AGENT;
931 EROM_LOG(erom, "unsupported region type %hhx\n",
938 * Register all MMIO region descriptors for the given slave port.
940 * @param erom EROM read state.
941 * @param corecfg Core info to be populated with the scanned port regions.
942 * @param port_num Port index for which regions will be parsed.
943 * @param region_type The region type to be parsed.
944 * @param[out] offset The offset at which to perform parsing. On success, this
945 * will be updated to point to the next EROM table entry.
948 bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom,
949 struct bcma_corecfg *corecfg, bcma_pid_t port_num,
952 struct bcma_sport *sport;
953 struct bcma_sport_list *sports;
954 bus_size_t entry_offset;
956 bhnd_port_type port_type;
960 /* Determine the port type for this region type. */
961 error = bcma_erom_region_to_port_type(erom, region_type, &port_type);
965 /* Fetch the list to be populated */
966 sports = bcma_corecfg_get_port_list(corecfg, port_type);
968 /* Allocate a new port descriptor */
969 sport = bcma_alloc_sport(port_num, port_type);
973 /* Read all address regions defined for this port */
974 for (bcma_rmid_t region_num = 0;; region_num++) {
975 struct bcma_map *map;
976 struct bcma_erom_sport_region spr;
978 /* No valid port definition should come anywhere near
980 if (region_num == BCMA_RMID_MAX) {
981 EROM_LOG(erom, "core%u %s%u: region count reached "
982 "upper limit of %u\n",
983 corecfg->core_info.core_idx,
984 bhnd_port_type_name(port_type),
985 port_num, BCMA_RMID_MAX);
991 /* Parse the next region entry. */
992 entry_offset = bcma_erom_tell(erom);
993 error = bcma_erom_parse_sport_region(erom, &spr);
994 if (error && error != ENOENT) {
995 EROM_LOG(erom, "core%u %s%u.%u: invalid slave port "
997 corecfg->core_info.core_idx,
998 bhnd_port_type_name(port_type),
999 port_num, region_num);
1003 /* ENOENT signals no further region entries */
1004 if (error == ENOENT) {
1005 /* No further entries */
1010 /* A region or type mismatch also signals no further region
1012 if (spr.region_port != port_num ||
1013 spr.region_type != region_type)
1015 /* We don't want to consume this entry */
1016 bcma_erom_seek(erom, entry_offset);
1023 * Create the map entry.
1025 map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT);
1031 map->m_region_num = region_num;
1032 map->m_base = spr.base_addr;
1033 map->m_size = spr.size;
1036 /* Add the region map to the port */
1037 STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link);
1038 sport->sp_num_maps++;
1042 /* Append the new port descriptor on success, or deallocate the
1043 * partially parsed descriptor on failure. */
1045 STAILQ_INSERT_TAIL(sports, sport, sp_link);
1046 } else if (sport != NULL) {
1047 bcma_free_sport(sport);
1054 * Parse the next core entry from the EROM table and produce a bcma_corecfg
1055 * to be owned by the caller.
1057 * @param erom A bcma EROM instance.
1058 * @param[out] result On success, the core's device info. The caller inherits
1059 * ownership of this allocation.
1061 * @return If successful, returns 0. If the end of the EROM table is hit,
1062 * ENOENT will be returned. On error, returns a non-zero error value.
1065 bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
1067 struct bcma_corecfg *cfg;
1068 struct bcma_erom_core core;
1069 uint8_t first_region_type;
1070 bus_size_t initial_offset;
1076 initial_offset = bcma_erom_tell(erom);
1078 /* Parse the next core entry */
1079 if ((error = bcma_erom_parse_core(erom, &core)))
1082 /* Determine the core's index and unit numbers */
1083 bcma_erom_reset(erom);
1086 for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
1087 struct bcma_erom_core prev_core;
1089 /* Parse next core */
1090 error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1094 if ((error = bcma_erom_parse_core(erom, &prev_core)))
1097 /* Is earlier unit? */
1098 if (core.vendor == prev_core.vendor &&
1099 core.device == prev_core.device)
1104 /* Seek to next core */
1105 error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1110 /* We already parsed the core descriptor */
1111 if ((error = bcma_erom_skip_core(erom)))
1114 /* Allocate our corecfg */
1115 cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
1116 core.device, core.rev);
1120 /* These are 5-bit values in the EROM table, and should never be able
1121 * to overflow BCMA_PID_MAX. */
1122 KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
1123 KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
1124 KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
1125 ("unsupported wport count"));
1129 "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
1131 bhnd_vendor_name(core.vendor),
1132 bhnd_find_core_name(core.vendor, core.device),
1133 core.device, core.rev, core_unit);
1136 cfg->num_master_ports = core.num_mport;
1137 cfg->num_dev_ports = 0; /* determined below */
1138 cfg->num_bridge_ports = 0; /* determined blow */
1139 cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
1141 /* Parse Master Port Descriptors */
1142 for (uint8_t i = 0; i < core.num_mport; i++) {
1143 struct bcma_mport *mport;
1144 struct bcma_erom_mport mpd;
1146 /* Parse the master port descriptor */
1147 error = bcma_erom_parse_mport(erom, &mpd);
1151 /* Initialize a new bus mport structure */
1152 mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
1153 if (mport == NULL) {
1158 mport->mp_vid = mpd.port_vid;
1159 mport->mp_num = mpd.port_num;
1162 STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
1167 * Determine whether this is a bridge device; if so, we can
1168 * expect the first sequence of address region descriptors to
1169 * be of EROM_REGION_TYPE_BRIDGE instead of
1170 * BCMA_EROM_REGION_TYPE_DEVICE.
1172 * It's unclear whether this is the correct mechanism by which we
1173 * should detect/handle bridge devices, but this approach matches
1174 * that of (some of) Broadcom's published drivers.
1176 if (core.num_dport > 0) {
1179 if ((error = bcma_erom_peek32(erom, &entry)))
1182 if (BCMA_EROM_ENTRY_IS(entry, REGION) &&
1183 BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
1185 first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
1186 cfg->num_dev_ports = 0;
1187 cfg->num_bridge_ports = core.num_dport;
1189 first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
1190 cfg->num_dev_ports = core.num_dport;
1191 cfg->num_bridge_ports = 0;
1195 /* Device/bridge port descriptors */
1196 for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
1197 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1204 /* Wrapper (aka device management) descriptors (for master ports). */
1205 for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
1206 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1207 BCMA_EROM_REGION_TYPE_MWRAP);
1214 /* Wrapper (aka device management) descriptors (for slave ports). */
1215 for (uint8_t i = 0; i < core.num_swrap; i++) {
1216 /* Slave wrapper ports are not numbered distinctly from master
1220 * Broadcom DDR1/DDR2 Memory Controller
1221 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) ->
1222 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2)
1224 * ARM BP135 AMBA3 AXI to APB Bridge
1225 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) ->
1226 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2)
1230 * (core.num_mwrap > 0) ?
1232 * ((core.vendor == BHND_MFGID_BCM) ? 1 : 0)
1235 sp_num = (core.num_mwrap > 0) ?
1237 ((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
1238 error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1239 BCMA_EROM_REGION_TYPE_SWRAP);
1246 * Seek to the next core entry (if any), skipping any dangling/invalid
1249 * On the BCM4706, the EROM entry for the memory controller core
1250 * (0x4bf/0x52E) contains a dangling/unused slave wrapper port region
1253 if ((error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) {
1254 if (error != ENOENT)
1263 bcma_free_corecfg(cfg);
1269 bcma_erom_dump(bhnd_erom_t *erom)
1271 struct bcma_erom *sc;
1275 sc = (struct bcma_erom *)erom;
1277 bcma_erom_reset(sc);
1279 while (!(error = bcma_erom_read32(sc, &entry))) {
1281 if (entry == BCMA_EROM_TABLE_EOF) {
1282 EROM_LOG(sc, "EOF\n");
1287 if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) {
1288 EROM_LOG(sc, "invalid EROM entry %#x\n", entry);
1292 switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
1293 case BCMA_EROM_ENTRY_TYPE_CORE: {
1295 EROM_LOG(sc, "coreA (0x%x)\n", entry);
1296 EROM_LOG(sc, "\tdesigner:\t0x%x\n",
1297 BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER));
1298 EROM_LOG(sc, "\tid:\t\t0x%x\n",
1299 BCMA_EROM_GET_ATTR(entry, COREA_ID));
1300 EROM_LOG(sc, "\tclass:\t\t0x%x\n",
1301 BCMA_EROM_GET_ATTR(entry, COREA_CLASS));
1304 if ((error = bcma_erom_read32(sc, &entry))) {
1305 EROM_LOG(sc, "error reading CoreDescB: %d\n",
1310 if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
1311 EROM_LOG(sc, "invalid core descriptor; found "
1312 "unexpected entry %#x (type=%s)\n",
1313 entry, bcma_erom_entry_type_name(entry));
1317 EROM_LOG(sc, "coreB (0x%x)\n", entry);
1318 EROM_LOG(sc, "\trev:\t0x%x\n",
1319 BCMA_EROM_GET_ATTR(entry, COREB_REV));
1320 EROM_LOG(sc, "\tnummp:\t0x%x\n",
1321 BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP));
1322 EROM_LOG(sc, "\tnumdp:\t0x%x\n",
1323 BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP));
1324 EROM_LOG(sc, "\tnumwmp:\t0x%x\n",
1325 BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1326 EROM_LOG(sc, "\tnumwsp:\t0x%x\n",
1327 BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1331 case BCMA_EROM_ENTRY_TYPE_MPORT:
1332 EROM_LOG(sc, "\tmport 0x%x\n", entry);
1333 EROM_LOG(sc, "\t\tport:\t0x%x\n",
1334 BCMA_EROM_GET_ATTR(entry, MPORT_NUM));
1335 EROM_LOG(sc, "\t\tid:\t\t0x%x\n",
1336 BCMA_EROM_GET_ATTR(entry, MPORT_ID));
1339 case BCMA_EROM_ENTRY_TYPE_REGION: {
1343 addr64 = (BCMA_EROM_GET_ATTR(entry, REGION_64BIT) != 0);
1344 size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
1346 EROM_LOG(sc, "\tregion 0x%x:\n", entry);
1347 EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1348 addr64 ? "baselo" : "base",
1349 BCMA_EROM_GET_ATTR(entry, REGION_BASE));
1350 EROM_LOG(sc, "\t\tport:\t0x%x\n",
1351 BCMA_EROM_GET_ATTR(entry, REGION_PORT));
1352 EROM_LOG(sc, "\t\ttype:\t0x%x\n",
1353 BCMA_EROM_GET_ATTR(entry, REGION_TYPE));
1354 EROM_LOG(sc, "\t\tsztype:\t0x%hhx\n", size_type);
1356 /* Read the base address high bits */
1358 if ((error = bcma_erom_read32(sc, &entry))) {
1359 EROM_LOG(sc, "error reading region "
1360 "base address high bits %d\n",
1365 EROM_LOG(sc, "\t\tbasehi:\t0x%x\n", entry);
1368 /* Read extended size descriptor */
1369 if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
1372 if ((error = bcma_erom_read32(sc, &entry))) {
1373 EROM_LOG(sc, "error reading region "
1374 "size descriptor %d\n",
1379 if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT))
1384 EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1385 size64 ? "sizelo" : "size",
1386 BCMA_EROM_GET_ATTR(entry, RSIZE_VAL));
1389 error = bcma_erom_read32(sc, &entry);
1391 EROM_LOG(sc, "error reading "
1392 "region size high bits: "
1397 EROM_LOG(sc, "\t\tsizehi:\t0x%x\n",
1405 EROM_LOG(sc, "unknown EROM entry 0x%x (type=%s)\n",
1406 entry, bcma_erom_entry_type_name(entry));
1411 if (error == ENOENT)
1412 EROM_LOG(sc, "BCMA EROM table missing terminating EOF\n");
1414 EROM_LOG(sc, "EROM read failed: %d\n", error);
1419 static kobj_method_t bcma_erom_methods[] = {
1420 KOBJMETHOD(bhnd_erom_probe, bcma_erom_probe),
1421 KOBJMETHOD(bhnd_erom_init, bcma_erom_init),
1422 KOBJMETHOD(bhnd_erom_fini, bcma_erom_fini),
1423 KOBJMETHOD(bhnd_erom_get_core_table, bcma_erom_get_core_table),
1424 KOBJMETHOD(bhnd_erom_free_core_table, bcma_erom_free_core_table),
1425 KOBJMETHOD(bhnd_erom_lookup_core, bcma_erom_lookup_core),
1426 KOBJMETHOD(bhnd_erom_lookup_core_addr, bcma_erom_lookup_core_addr),
1427 KOBJMETHOD(bhnd_erom_dump, bcma_erom_dump),
1432 BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom));