2 * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3 * Copyright (c) 2017 The FreeBSD Foundation
6 * Portions of this software were developed by Landon Fuller
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17 * redistribution must be conditioned upon including a substantially
18 * similar Disclaimer requirement for further binary redistribution.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGES.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/refcount.h>
43 #include <sys/systm.h>
45 #include <machine/bus.h>
47 #include <dev/bhnd/cores/chipc/chipc.h>
48 #include <dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.h>
53 static bhnd_erom_class_t *
54 siba_get_erom_class(driver_t *driver)
56 return (&siba_erom_parser);
60 siba_probe(device_t dev)
62 device_set_desc(dev, "SIBA BHND bus");
63 return (BUS_PROBE_DEFAULT);
67 * Default siba(4) bus driver implementation of DEVICE_ATTACH().
69 * This implementation initializes internal siba(4) state and performs
70 * bus enumeration, and must be called by subclassing drivers in
71 * DEVICE_ATTACH() before any other bus methods.
74 siba_attach(device_t dev)
76 struct siba_softc *sc;
79 sc = device_get_softc(dev);
84 /* Enumerate children */
85 if ((error = siba_add_children(dev))) {
86 device_delete_children(dev);
87 SIBA_LOCK_DESTROY(sc);
95 siba_detach(device_t dev)
97 struct siba_softc *sc;
100 sc = device_get_softc(dev);
102 if ((error = bhnd_generic_detach(dev)))
105 SIBA_LOCK_DESTROY(sc);
111 siba_resume(device_t dev)
113 return (bhnd_generic_resume(dev));
117 siba_suspend(device_t dev)
119 return (bhnd_generic_suspend(dev));
123 siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
125 struct siba_softc *sc;
126 const struct siba_devinfo *dinfo;
127 const struct bhnd_core_info *cfg;
129 sc = device_get_softc(dev);
130 dinfo = device_get_ivars(child);
131 cfg = &dinfo->core_id.core_info;
134 case BHND_IVAR_VENDOR:
135 *result = cfg->vendor;
137 case BHND_IVAR_DEVICE:
138 *result = cfg->device;
140 case BHND_IVAR_HWREV:
141 *result = cfg->hwrev;
143 case BHND_IVAR_DEVICE_CLASS:
144 *result = bhnd_core_class(cfg);
146 case BHND_IVAR_VENDOR_NAME:
147 *result = (uintptr_t) bhnd_vendor_name(cfg->vendor);
149 case BHND_IVAR_DEVICE_NAME:
150 *result = (uintptr_t) bhnd_core_name(cfg);
152 case BHND_IVAR_CORE_INDEX:
153 *result = cfg->core_idx;
155 case BHND_IVAR_CORE_UNIT:
158 case BHND_IVAR_PMU_INFO:
160 switch (dinfo->pmu_state) {
162 *result = (uintptr_t)NULL;
167 *result = (uintptr_t)dinfo->pmu.bhnd_info;
171 case SIBA_PMU_PWRCTL:
173 panic("bhnd_get_pmu_info() called with siba PMU state "
174 "%d", dinfo->pmu_state);
178 panic("invalid PMU state: %d", dinfo->pmu_state);
187 siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
189 struct siba_softc *sc;
190 struct siba_devinfo *dinfo;
192 sc = device_get_softc(dev);
193 dinfo = device_get_ivars(child);
196 case BHND_IVAR_VENDOR:
197 case BHND_IVAR_DEVICE:
198 case BHND_IVAR_HWREV:
199 case BHND_IVAR_DEVICE_CLASS:
200 case BHND_IVAR_VENDOR_NAME:
201 case BHND_IVAR_DEVICE_NAME:
202 case BHND_IVAR_CORE_INDEX:
203 case BHND_IVAR_CORE_UNIT:
205 case BHND_IVAR_PMU_INFO:
207 switch (dinfo->pmu_state) {
210 dinfo->pmu.bhnd_info = (void *)value;
211 dinfo->pmu_state = SIBA_PMU_BHND;
215 case SIBA_PMU_PWRCTL:
217 panic("bhnd_set_pmu_info() called with siba PMU state "
218 "%d", dinfo->pmu_state);
222 panic("invalid PMU state: %d", dinfo->pmu_state);
230 static struct resource_list *
231 siba_get_resource_list(device_t dev, device_t child)
233 struct siba_devinfo *dinfo = device_get_ivars(child);
234 return (&dinfo->resources);
237 /* BHND_BUS_ALLOC_PMU() */
239 siba_alloc_pmu(device_t dev, device_t child)
241 struct siba_softc *sc;
242 struct siba_devinfo *dinfo;
245 struct chipc_caps ccaps;
246 siba_pmu_state pmu_state;
249 if (device_get_parent(child) != dev)
252 sc = device_get_softc(dev);
253 dinfo = device_get_ivars(child);
256 /* Fetch ChipCommon capability flags */
257 chipc = bhnd_retain_provider(child, BHND_SERVICE_CHIPC);
259 ccaps = *BHND_CHIPC_GET_CAPS(chipc);
260 bhnd_release_provider(child, chipc, BHND_SERVICE_CHIPC);
262 memset(&ccaps, 0, sizeof(ccaps));
265 /* Defer to bhnd(4)'s PMU implementation if ChipCommon exists and
266 * advertises PMU support */
268 if ((error = bhnd_generic_alloc_pmu(dev, child)))
271 KASSERT(dinfo->pmu_state == SIBA_PMU_BHND,
272 ("unexpected PMU state: %d", dinfo->pmu_state));
278 * This is either a legacy PWRCTL chipset, or the device does not
279 * support dynamic clock control.
281 * We need to map all bhnd(4) bus PMU to PWRCTL or no-op operations.
283 if (ccaps.pwr_ctrl) {
284 pmu_state = SIBA_PMU_PWRCTL;
285 pwrctl = bhnd_retain_provider(child, BHND_SERVICE_PWRCTL);
286 if (pwrctl == NULL) {
287 device_printf(dev, "PWRCTL not found\n");
291 pmu_state = SIBA_PMU_FIXED;
297 /* Per-core PMU state already allocated? */
298 if (dinfo->pmu_state != SIBA_PMU_NONE) {
299 panic("duplicate PMU allocation for %s",
300 device_get_nameunit(child));
303 /* Update the child's PMU allocation state, and transfer ownership of
304 * the PWRCTL provider reference (if any) */
305 dinfo->pmu_state = pmu_state;
306 dinfo->pmu.pwrctl = pwrctl;
313 /* BHND_BUS_RELEASE_PMU() */
315 siba_release_pmu(device_t dev, device_t child)
317 struct siba_softc *sc;
318 struct siba_devinfo *dinfo;
322 if (device_get_parent(child) != dev)
325 sc = device_get_softc(dev);
326 dinfo = device_get_ivars(child);
329 switch(dinfo->pmu_state) {
331 panic("pmu over-release for %s", device_get_nameunit(child));
337 return (bhnd_generic_release_pmu(dev, child));
339 case SIBA_PMU_PWRCTL:
340 /* Requesting BHND_CLOCK_DYN releases any outstanding clock
342 pwrctl = dinfo->pmu.pwrctl;
343 error = bhnd_pwrctl_request_clock(pwrctl, child,
350 /* Clean up the child's PMU state */
351 dinfo->pmu_state = SIBA_PMU_NONE;
352 dinfo->pmu.pwrctl = NULL;
355 /* Release the provider reference */
356 bhnd_release_provider(child, pwrctl, BHND_SERVICE_PWRCTL);
360 /* Clean up the child's PMU state */
361 KASSERT(dinfo->pmu.pwrctl == NULL,
362 ("PWRCTL reference with FIXED state"));
364 dinfo->pmu_state = SIBA_PMU_NONE;
365 dinfo->pmu.pwrctl = NULL;
369 panic("invalid PMU state: %d", dinfo->pmu_state);
372 /* BHND_BUS_GET_CLOCK_LATENCY() */
374 siba_get_clock_latency(device_t dev, device_t child, bhnd_clock clock,
377 struct siba_softc *sc;
378 struct siba_devinfo *dinfo;
381 if (device_get_parent(child) != dev)
384 sc = device_get_softc(dev);
385 dinfo = device_get_ivars(child);
388 switch(dinfo->pmu_state) {
390 panic("no active PMU request state");
397 return (bhnd_generic_get_clock_latency(dev, child, clock,
400 case SIBA_PMU_PWRCTL:
401 error = bhnd_pwrctl_get_clock_latency(dinfo->pmu.pwrctl, clock,
410 /* HT clock is always available, and incurs no transition
424 panic("invalid PMU state: %d", dinfo->pmu_state);
427 /* BHND_BUS_GET_CLOCK_FREQ() */
429 siba_get_clock_freq(device_t dev, device_t child, bhnd_clock clock,
432 struct siba_softc *sc;
433 struct siba_devinfo *dinfo;
436 if (device_get_parent(child) != dev)
439 sc = device_get_softc(dev);
440 dinfo = device_get_ivars(child);
443 switch(dinfo->pmu_state) {
445 panic("no active PMU request state");
452 return (bhnd_generic_get_clock_freq(dev, child, clock, freq));
454 case SIBA_PMU_PWRCTL:
455 error = bhnd_pwrctl_get_clock_freq(dinfo->pmu.pwrctl, clock,
467 panic("invalid PMU state: %d", dinfo->pmu_state);
470 /* BHND_BUS_REQUEST_EXT_RSRC() */
472 siba_request_ext_rsrc(device_t dev, device_t child, u_int rsrc)
474 struct siba_softc *sc;
475 struct siba_devinfo *dinfo;
477 if (device_get_parent(child) != dev)
480 sc = device_get_softc(dev);
481 dinfo = device_get_ivars(child);
484 switch(dinfo->pmu_state) {
486 panic("no active PMU request state");
493 return (bhnd_generic_request_ext_rsrc(dev, child, rsrc));
495 case SIBA_PMU_PWRCTL:
497 /* HW does not support per-core external resources */
502 panic("invalid PMU state: %d", dinfo->pmu_state);
505 /* BHND_BUS_RELEASE_EXT_RSRC() */
507 siba_release_ext_rsrc(device_t dev, device_t child, u_int rsrc)
509 struct siba_softc *sc;
510 struct siba_devinfo *dinfo;
512 if (device_get_parent(child) != dev)
515 sc = device_get_softc(dev);
516 dinfo = device_get_ivars(child);
519 switch(dinfo->pmu_state) {
521 panic("no active PMU request state");
528 return (bhnd_generic_release_ext_rsrc(dev, child, rsrc));
530 case SIBA_PMU_PWRCTL:
532 /* HW does not support per-core external resources */
537 panic("invalid PMU state: %d", dinfo->pmu_state);
540 /* BHND_BUS_REQUEST_CLOCK() */
542 siba_request_clock(device_t dev, device_t child, bhnd_clock clock)
544 struct siba_softc *sc;
545 struct siba_devinfo *dinfo;
548 if (device_get_parent(child) != dev)
551 sc = device_get_softc(dev);
552 dinfo = device_get_ivars(child);
555 switch(dinfo->pmu_state) {
557 panic("no active PMU request state");
564 return (bhnd_generic_request_clock(dev, child, clock));
566 case SIBA_PMU_PWRCTL:
567 error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
576 /* HT clock is always available, and fulfills any of the
577 * following clock requests */
590 panic("invalid PMU state: %d", dinfo->pmu_state);
593 /* BHND_BUS_ENABLE_CLOCKS() */
595 siba_enable_clocks(device_t dev, device_t child, uint32_t clocks)
597 struct siba_softc *sc;
598 struct siba_devinfo *dinfo;
600 if (device_get_parent(child) != dev)
603 sc = device_get_softc(dev);
604 dinfo = device_get_ivars(child);
607 switch(dinfo->pmu_state) {
609 panic("no active PMU request state");
616 return (bhnd_generic_enable_clocks(dev, child, clocks));
618 case SIBA_PMU_PWRCTL:
622 /* All (supported) clocks are already enabled by default */
623 clocks &= ~(BHND_CLOCK_DYN |
629 device_printf(dev, "%s requested unknown clocks: %#x\n",
630 device_get_nameunit(child), clocks);
637 panic("invalid PMU state: %d", dinfo->pmu_state);
641 siba_read_iost(device_t dev, device_t child, uint16_t *iost)
646 error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4);
650 *iost = (SIBA_REG_GET(tmhigh, TMH_SISF));
655 siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl)
660 if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4)))
663 *ioctl = (SIBA_REG_GET(ts_low, TML_SICF));
668 siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask)
670 struct siba_devinfo *dinfo;
671 struct bhnd_resource *r;
672 uint32_t ts_low, ts_mask;
674 if (device_get_parent(child) != dev)
677 /* Fetch CFG0 mapping */
678 dinfo = device_get_ivars(child);
679 if ((r = dinfo->cfg_res[0]) == NULL)
682 /* Mask and set TMSTATELOW core flag bits */
683 ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK;
684 ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask;
686 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
692 siba_is_hw_suspended(device_t dev, device_t child)
698 /* Fetch target state */
699 error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4);
701 device_printf(child, "error reading HW reset state: %d\n",
706 /* Is core held in RESET? */
707 if (ts_low & SIBA_TML_RESET)
710 /* Is target reject enabled? */
711 if (ts_low & SIBA_TML_REJ_MASK)
714 /* Is core clocked? */
715 ioctl = SIBA_REG_GET(ts_low, TML_SICF);
716 if (!(ioctl & BHND_IOCTL_CLK_EN))
723 siba_reset_hw(device_t dev, device_t child, uint16_t ioctl,
724 uint16_t reset_ioctl)
726 struct siba_devinfo *dinfo;
727 struct bhnd_resource *r;
728 uint32_t ts_low, imstate;
732 if (device_get_parent(child) != dev)
735 dinfo = device_get_ivars(child);
737 /* Can't suspend the core without access to the CFG0 registers */
738 if ((r = dinfo->cfg_res[0]) == NULL)
741 /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
742 clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
743 if (ioctl & clkflags)
746 /* Place core into known RESET state */
747 if ((error = bhnd_suspend_hw(child, reset_ioctl)))
750 /* Set RESET, clear REJ, set the caller's IOCTL flags, and
751 * force clocks to ensure the signal propagates throughout the
753 ts_low = SIBA_TML_RESET |
754 (ioctl << SIBA_TML_SICF_SHIFT) |
755 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
756 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
758 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
761 /* Clear any target errors */
762 if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) {
763 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
767 /* Clear any initiator errors */
768 imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE);
769 if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) {
770 siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
771 SIBA_IM_IBE|SIBA_IM_TO);
774 /* Release from RESET while leaving clocks forced, ensuring the
775 * signal propagates throughout the core */
776 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
779 /* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE
780 * bit and allow the core to manage clock gating. */
781 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
782 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT));
788 siba_suspend_hw(device_t dev, device_t child, uint16_t ioctl)
790 struct siba_softc *sc;
791 struct siba_devinfo *dinfo;
792 struct bhnd_resource *r;
793 uint32_t idl, ts_low, ts_mask;
794 uint16_t cflags, clkflags;
797 if (device_get_parent(child) != dev)
800 sc = device_get_softc(dev);
801 dinfo = device_get_ivars(child);
803 /* Can't suspend the core without access to the CFG0 registers */
804 if ((r = dinfo->cfg_res[0]) == NULL)
807 /* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
808 clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
809 if (ioctl & clkflags)
812 /* Already in RESET? */
813 ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW);
814 if (ts_low & SIBA_TML_RESET)
817 /* If clocks are already disabled, we can place the core directly
818 * into RESET|REJ while setting the caller's IOCTL flags. */
819 cflags = SIBA_REG_GET(ts_low, TML_SICF);
820 if (!(cflags & BHND_IOCTL_CLK_EN)) {
821 ts_low = SIBA_TML_RESET | SIBA_TML_REJ |
822 (ioctl << SIBA_TML_SICF_SHIFT);
823 ts_mask = SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK;
825 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
830 /* Reject further transactions reaching this core */
831 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
832 SIBA_TML_REJ, SIBA_TML_REJ);
834 /* Wait for transaction busy flag to clear for all transactions
835 * initiated by this core */
836 error = siba_wait_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
837 0x0, SIBA_TMH_BUSY, 100000);
841 /* If this is an initiator core, we need to reject initiator
842 * transactions too. */
843 idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW);
844 if (idl & SIBA_IDL_INIT) {
845 /* Reject further initiator transactions */
846 siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
847 SIBA_IM_RJ, SIBA_IM_RJ);
849 /* Wait for initiator busy flag to clear */
850 error = siba_wait_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
851 0x0, SIBA_IM_BY, 100000);
856 /* Put the core into RESET, set the caller's IOCTL flags, and
857 * force clocks to ensure the RESET signal propagates throughout the
859 ts_low = SIBA_TML_RESET |
860 (ioctl << SIBA_TML_SICF_SHIFT) |
861 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
862 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
863 ts_mask = SIBA_TML_RESET |
866 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low,
869 /* Give RESET ample time */
872 /* Clear previously asserted initiator reject */
873 if (idl & SIBA_IDL_INIT) {
874 siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
878 /* Disable all clocks, leaving RESET and REJ asserted */
879 siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
880 (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT);
883 * Core is now in RESET.
885 * If the core holds any PWRCTL clock reservations, we need to release
886 * those now. This emulates the standard bhnd(4) PMU behavior of RESET
887 * automatically clearing clkctl
890 if (dinfo->pmu_state == SIBA_PMU_PWRCTL) {
891 error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
896 device_printf(child, "failed to release clock request: "
909 siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value,
912 struct siba_devinfo *dinfo;
915 /* Must be directly attached */
916 if (device_get_parent(child) != dev)
919 /* CFG0 registers must be available */
920 dinfo = device_get_ivars(child);
921 if (dinfo->cfg_res[0] == NULL)
924 /* Offset must fall within CFG0 */
925 r_size = rman_get_size(dinfo->cfg_res[0]->res);
926 if (r_size < offset || r_size - offset < width)
931 *((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg_res[0],
935 *((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg_res[0],
939 *((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg_res[0],
948 siba_write_config(device_t dev, device_t child, bus_size_t offset,
949 const void *value, u_int width)
951 struct siba_devinfo *dinfo;
952 struct bhnd_resource *r;
955 /* Must be directly attached */
956 if (device_get_parent(child) != dev)
959 /* CFG0 registers must be available */
960 dinfo = device_get_ivars(child);
961 if ((r = dinfo->cfg_res[0]) == NULL)
964 /* Offset must fall within CFG0 */
965 r_size = rman_get_size(r->res);
966 if (r_size < offset || r_size - offset < width)
971 bhnd_bus_write_1(r, offset, *(const uint8_t *)value);
974 bhnd_bus_write_2(r, offset, *(const uint8_t *)value);
977 bhnd_bus_write_4(r, offset, *(const uint8_t *)value);
985 siba_get_port_count(device_t dev, device_t child, bhnd_port_type type)
987 struct siba_devinfo *dinfo;
989 /* delegate non-bus-attached devices to our parent */
990 if (device_get_parent(child) != dev)
991 return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child,
994 dinfo = device_get_ivars(child);
995 return (siba_port_count(&dinfo->core_id, type));
999 siba_get_region_count(device_t dev, device_t child, bhnd_port_type type,
1002 struct siba_devinfo *dinfo;
1004 /* delegate non-bus-attached devices to our parent */
1005 if (device_get_parent(child) != dev)
1006 return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child,
1009 dinfo = device_get_ivars(child);
1010 return (siba_port_region_count(&dinfo->core_id, type, port));
1014 siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type,
1015 u_int port_num, u_int region_num)
1017 struct siba_devinfo *dinfo;
1018 struct siba_addrspace *addrspace;
1019 struct siba_cfg_block *cfg;
1021 /* delegate non-bus-attached devices to our parent */
1022 if (device_get_parent(child) != dev)
1023 return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child,
1024 port_type, port_num, region_num));
1026 dinfo = device_get_ivars(child);
1028 /* Look for a matching addrspace entry */
1029 addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1030 if (addrspace != NULL)
1031 return (addrspace->sa_rid);
1033 /* Try the config blocks */
1034 cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1036 return (cfg->cb_rid);
1043 siba_decode_port_rid(device_t dev, device_t child, int type, int rid,
1044 bhnd_port_type *port_type, u_int *port_num, u_int *region_num)
1046 struct siba_devinfo *dinfo;
1048 /* delegate non-bus-attached devices to our parent */
1049 if (device_get_parent(child) != dev)
1050 return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child,
1051 type, rid, port_type, port_num, region_num));
1053 dinfo = device_get_ivars(child);
1055 /* Ports are always memory mapped */
1056 if (type != SYS_RES_MEMORY)
1059 /* Look for a matching addrspace entry */
1060 for (u_int i = 0; i < dinfo->core_id.num_addrspace; i++) {
1061 if (dinfo->addrspace[i].sa_rid != rid)
1064 *port_type = BHND_PORT_DEVICE;
1065 *port_num = siba_addrspace_device_port(i);
1066 *region_num = siba_addrspace_device_region(i);
1070 /* Try the config blocks */
1071 for (u_int i = 0; i < dinfo->core_id.num_cfg_blocks; i++) {
1072 if (dinfo->cfg[i].cb_rid != rid)
1075 *port_type = BHND_PORT_AGENT;
1076 *port_num = siba_cfg_agent_port(i);
1077 *region_num = siba_cfg_agent_region(i);
1086 siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type,
1087 u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size)
1089 struct siba_devinfo *dinfo;
1090 struct siba_addrspace *addrspace;
1091 struct siba_cfg_block *cfg;
1093 /* delegate non-bus-attached devices to our parent */
1094 if (device_get_parent(child) != dev) {
1095 return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child,
1096 port_type, port_num, region_num, addr, size));
1099 dinfo = device_get_ivars(child);
1101 /* Look for a matching addrspace */
1102 addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1103 if (addrspace != NULL) {
1104 *addr = addrspace->sa_base;
1105 *size = addrspace->sa_size - addrspace->sa_bus_reserved;
1109 /* Look for a matching cfg block */
1110 cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1112 *addr = cfg->cb_base;
1113 *size = cfg->cb_size;
1122 * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT().
1125 siba_get_intr_count(device_t dev, device_t child)
1127 struct siba_devinfo *dinfo;
1129 /* delegate non-bus-attached devices to our parent */
1130 if (device_get_parent(child) != dev)
1131 return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child));
1133 dinfo = device_get_ivars(child);
1134 if (!dinfo->intr_en) {
1138 /* One assigned interrupt */
1144 * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC().
1147 siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec)
1149 struct siba_devinfo *dinfo;
1151 /* delegate non-bus-attached devices to our parent */
1152 if (device_get_parent(child) != dev)
1153 return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child,
1156 /* Must be a valid interrupt ID */
1157 if (intr >= siba_get_intr_count(dev, child))
1160 KASSERT(intr == 0, ("invalid ivec %u", intr));
1162 dinfo = device_get_ivars(child);
1164 KASSERT(dinfo->intr_en, ("core does not have an interrupt assigned"));
1165 *ivec = dinfo->intr.flag;
1170 * Register all address space mappings for @p di.
1172 * @param dev The siba bus device.
1173 * @param di The device info instance on which to register all address
1175 * @param r A resource mapping the enumeration table block for @p di.
1178 siba_register_addrspaces(device_t dev, struct siba_devinfo *di,
1179 struct bhnd_resource *r)
1181 struct siba_core_id *cid;
1189 /* Register the device address space entries */
1190 for (uint8_t i = 0; i < di->core_id.num_addrspace; i++) {
1193 uint32_t bus_reserved;
1195 /* Determine the register offset */
1196 adm_offset = siba_admatch_offset(i);
1197 if (adm_offset == 0) {
1198 device_printf(dev, "addrspace %hhu is unsupported", i);
1202 /* Fetch the address match register value */
1203 adm = bhnd_bus_read_4(r, adm_offset);
1205 /* Parse the value */
1206 if ((error = siba_parse_admatch(adm, &addr, &size))) {
1207 device_printf(dev, "failed to decode address "
1208 " match register value 0x%x\n", adm);
1212 /* If this is the device's core/enumeration addrespace,
1213 * reserve the Sonics configuration register blocks for the
1214 * use of our bus. */
1216 if (i == SIBA_CORE_ADDRSPACE)
1217 bus_reserved = cid->num_cfg_blocks * SIBA_CFG_SIZE;
1219 /* Append the region info */
1220 error = siba_append_dinfo_region(di, i, addr, size,
1231 * Register all interrupt descriptors for @p dinfo. Must be called after
1232 * configuration blocks have been mapped.
1234 * @param dev The siba bus device.
1235 * @param child The siba child device.
1236 * @param dinfo The device info instance on which to register all interrupt
1237 * descriptor entries.
1238 * @param r A resource mapping the enumeration table block for @p di.
1241 siba_register_interrupts(device_t dev, device_t child,
1242 struct siba_devinfo *dinfo, struct bhnd_resource *r)
1247 /* Is backplane interrupt distribution enabled for this core? */
1248 tpsflag = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_TPSFLAG));
1249 if ((tpsflag & SIBA_TPS_F0EN0) == 0) {
1250 dinfo->intr_en = false;
1254 /* Have one interrupt */
1255 dinfo->intr_en = true;
1256 dinfo->intr.flag = SIBA_REG_GET(tpsflag, TPS_NUM0);
1257 dinfo->intr.mapped = false;
1258 dinfo->intr.irq = 0;
1259 dinfo->intr.rid = -1;
1261 /* Map the interrupt */
1262 error = BHND_BUS_MAP_INTR(dev, child, 0 /* single intr is always 0 */,
1265 device_printf(dev, "failed mapping interrupt line for core %u: "
1266 "%d\n", dinfo->core_id.core_info.core_idx, error);
1269 dinfo->intr.mapped = true;
1271 /* Update the resource list */
1272 dinfo->intr.rid = resource_list_add_next(&dinfo->resources, SYS_RES_IRQ,
1273 dinfo->intr.irq, dinfo->intr.irq, 1);
1279 * Map per-core configuration blocks for @p dinfo.
1281 * @param dev The siba bus device.
1282 * @param dinfo The device info instance on which to map all per-core
1283 * configuration blocks.
1286 siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo)
1288 struct siba_addrspace *addrspace;
1289 rman_res_t r_start, r_count, r_end;
1293 num_cfg = dinfo->core_id.num_cfg_blocks;
1294 if (num_cfg > SIBA_MAX_CFG) {
1295 device_printf(dev, "config block count %hhu out of range\n",
1300 /* Fetch the core register address space */
1301 addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0);
1302 if (addrspace == NULL) {
1303 device_printf(dev, "missing device registers\n");
1308 * Map the per-core configuration blocks
1310 for (uint8_t i = 0; i < num_cfg; i++) {
1311 /* Add to child's resource list */
1312 r_start = addrspace->sa_base + SIBA_CFG_OFFSET(i);
1313 r_count = SIBA_CFG_SIZE;
1314 r_end = r_start + r_count - 1;
1316 rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY,
1317 r_start, r_end, r_count);
1319 /* Initialize config block descriptor */
1320 dinfo->cfg[i] = ((struct siba_cfg_block) {
1322 .cb_size = SIBA_CFG_SIZE,
1326 /* Map the config resource for bus-level access */
1327 dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i);
1328 dinfo->cfg_res[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev,
1329 SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end,
1330 r_count, RF_ACTIVE|RF_SHAREABLE);
1332 if (dinfo->cfg_res[i] == NULL) {
1333 device_printf(dev, "failed to allocate SIBA_CFG%hhu\n",
1343 siba_add_child(device_t dev, u_int order, const char *name, int unit)
1345 struct siba_devinfo *dinfo;
1348 child = device_add_child_ordered(dev, order, name, unit);
1352 if ((dinfo = siba_alloc_dinfo(dev)) == NULL) {
1353 device_delete_child(dev, child);
1357 device_set_ivars(child, dinfo);
1363 siba_child_deleted(device_t dev, device_t child)
1365 struct bhnd_softc *sc;
1366 struct siba_devinfo *dinfo;
1368 sc = device_get_softc(dev);
1370 /* Call required bhnd(4) implementation */
1371 bhnd_generic_child_deleted(dev, child);
1373 /* Free siba device info */
1374 if ((dinfo = device_get_ivars(child)) != NULL)
1375 siba_free_dinfo(dev, child, dinfo);
1377 device_set_ivars(child, NULL);
1381 * Scan the core table and add all valid discovered cores to
1384 * @param dev The siba bus device.
1387 siba_add_children(device_t dev)
1389 const struct bhnd_chipid *chipid;
1390 struct siba_core_id *cores;
1391 struct bhnd_resource *r;
1399 chipid = BHND_BUS_GET_CHIPID(dev, dev);
1401 /* Allocate our temporary core and device table */
1402 cores = malloc(sizeof(*cores) * chipid->ncores, M_BHND, M_WAITOK);
1403 children = malloc(sizeof(*children) * chipid->ncores, M_BHND,
1407 * Add child devices for all discovered cores.
1409 * On bridged devices, we'll exhaust our available register windows if
1410 * we map config blocks on unpopulated/disabled cores. To avoid this, we
1411 * defer mapping of the per-core siba(4) config blocks until all cores
1412 * have been enumerated and otherwise configured.
1414 for (u_int i = 0; i < chipid->ncores; i++) {
1415 struct siba_devinfo *dinfo;
1417 uint32_t idhigh, idlow;
1418 rman_res_t r_count, r_end, r_start;
1420 /* Map the core's register block */
1422 r_start = SIBA_CORE_ADDR(i);
1423 r_count = SIBA_CORE_SIZE;
1424 r_end = r_start + SIBA_CORE_SIZE - 1;
1425 r = bhnd_alloc_resource(dev, SYS_RES_MEMORY, &rid, r_start,
1426 r_end, r_count, RF_ACTIVE);
1432 /* Read the core info */
1433 idhigh = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDHIGH));
1434 idlow = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDLOW));
1436 cores[i] = siba_parse_core_id(idhigh, idlow, i, 0);
1438 /* Determine and set unit number */
1439 for (u_int j = 0; j < i; j++) {
1440 struct bhnd_core_info *cur = &cores[i].core_info;
1441 struct bhnd_core_info *prev = &cores[j].core_info;
1443 if (prev->vendor == cur->vendor &&
1444 prev->device == cur->device)
1448 /* Add the child device */
1449 child = BUS_ADD_CHILD(dev, 0, NULL, -1);
1450 if (child == NULL) {
1455 children[i] = child;
1457 /* Initialize per-device bus info */
1458 if ((dinfo = device_get_ivars(child)) == NULL) {
1463 if ((error = siba_init_dinfo(dev, dinfo, &cores[i])))
1466 /* Register the core's address space(s). */
1467 if ((error = siba_register_addrspaces(dev, dinfo, r)))
1470 /* Register the core's interrupts */
1471 if ((error = siba_register_interrupts(dev, child, dinfo, r)))
1474 /* Unmap the core's register block */
1475 bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r);
1478 /* If pins are floating or the hardware is otherwise
1479 * unpopulated, the device shouldn't be used. */
1480 if (bhnd_is_hw_disabled(child))
1481 device_disable(child);
1484 /* Map all valid core's config register blocks and perform interrupt
1486 for (u_int i = 0; i < chipid->ncores; i++) {
1487 struct siba_devinfo *dinfo;
1490 child = children[i];
1492 /* Skip if core is disabled */
1493 if (bhnd_is_hw_disabled(child))
1496 dinfo = device_get_ivars(child);
1498 /* Map the core's config blocks */
1499 if ((error = siba_map_cfg_resources(dev, dinfo)))
1502 /* Issue bus callback for fully initialized child. */
1503 BHND_BUS_CHILD_ADDED(dev, child);
1506 free(cores, M_BHND);
1507 free(children, M_BHND);
1512 for (u_int i = 0; i < chipid->ncores; i++) {
1513 if (children[i] == NULL)
1516 device_delete_child(dev, children[i]);
1519 free(cores, M_BHND);
1520 free(children, M_BHND);
1523 bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r);
1528 static device_method_t siba_methods[] = {
1529 /* Device interface */
1530 DEVMETHOD(device_probe, siba_probe),
1531 DEVMETHOD(device_attach, siba_attach),
1532 DEVMETHOD(device_detach, siba_detach),
1533 DEVMETHOD(device_resume, siba_resume),
1534 DEVMETHOD(device_suspend, siba_suspend),
1537 DEVMETHOD(bus_add_child, siba_add_child),
1538 DEVMETHOD(bus_child_deleted, siba_child_deleted),
1539 DEVMETHOD(bus_read_ivar, siba_read_ivar),
1540 DEVMETHOD(bus_write_ivar, siba_write_ivar),
1541 DEVMETHOD(bus_get_resource_list, siba_get_resource_list),
1543 /* BHND interface */
1544 DEVMETHOD(bhnd_bus_get_erom_class, siba_get_erom_class),
1545 DEVMETHOD(bhnd_bus_alloc_pmu, siba_alloc_pmu),
1546 DEVMETHOD(bhnd_bus_release_pmu, siba_release_pmu),
1547 DEVMETHOD(bhnd_bus_request_clock, siba_request_clock),
1548 DEVMETHOD(bhnd_bus_enable_clocks, siba_enable_clocks),
1549 DEVMETHOD(bhnd_bus_request_ext_rsrc, siba_request_ext_rsrc),
1550 DEVMETHOD(bhnd_bus_release_ext_rsrc, siba_release_ext_rsrc),
1551 DEVMETHOD(bhnd_bus_get_clock_freq, siba_get_clock_freq),
1552 DEVMETHOD(bhnd_bus_get_clock_latency, siba_get_clock_latency),
1553 DEVMETHOD(bhnd_bus_read_ioctl, siba_read_ioctl),
1554 DEVMETHOD(bhnd_bus_write_ioctl, siba_write_ioctl),
1555 DEVMETHOD(bhnd_bus_read_iost, siba_read_iost),
1556 DEVMETHOD(bhnd_bus_is_hw_suspended, siba_is_hw_suspended),
1557 DEVMETHOD(bhnd_bus_reset_hw, siba_reset_hw),
1558 DEVMETHOD(bhnd_bus_suspend_hw, siba_suspend_hw),
1559 DEVMETHOD(bhnd_bus_read_config, siba_read_config),
1560 DEVMETHOD(bhnd_bus_write_config, siba_write_config),
1561 DEVMETHOD(bhnd_bus_get_port_count, siba_get_port_count),
1562 DEVMETHOD(bhnd_bus_get_region_count, siba_get_region_count),
1563 DEVMETHOD(bhnd_bus_get_port_rid, siba_get_port_rid),
1564 DEVMETHOD(bhnd_bus_decode_port_rid, siba_decode_port_rid),
1565 DEVMETHOD(bhnd_bus_get_region_addr, siba_get_region_addr),
1566 DEVMETHOD(bhnd_bus_get_intr_count, siba_get_intr_count),
1567 DEVMETHOD(bhnd_bus_get_intr_ivec, siba_get_intr_ivec),
1572 DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver);
1574 MODULE_VERSION(siba, 1);
1575 MODULE_DEPEND(siba, bhnd, 1, 1, 1);