2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2006 Semihalf, Rafal Jaworowski <raj@semihalf.com>
5 * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
41 #include <sys/systm.h>
46 #include <sys/endian.h>
48 #include <machine/bus.h>
49 #include <machine/pio.h>
50 #include <machine/md_var.h>
52 #define TODO panic("%s: not implemented", __func__)
54 #define MAX_EARLYBOOT_MAPPINGS 6
61 } earlyboot_mappings[MAX_EARLYBOOT_MAPPINGS];
62 static int earlyboot_map_idx = 0;
64 void bs_remap_earlyboot(void);
66 static __inline void *
67 __ppc_ba(bus_space_handle_t bsh, bus_size_t ofs)
69 return ((void *)(bsh + ofs));
73 bs_gen_map(bus_addr_t addr, bus_size_t size, int flags,
74 bus_space_handle_t *bshp)
79 * Record what we did if we haven't enabled the MMU yet. We
80 * will need to remap it as soon as the MMU comes up.
82 if (!pmap_bootstrapped) {
83 KASSERT(earlyboot_map_idx < MAX_EARLYBOOT_MAPPINGS,
84 ("%s: too many early boot mapping requests", __func__));
85 earlyboot_mappings[earlyboot_map_idx].addr = addr;
86 earlyboot_mappings[earlyboot_map_idx].virt =
87 pmap_early_io_map(addr, size);
88 earlyboot_mappings[earlyboot_map_idx].size = size;
89 earlyboot_mappings[earlyboot_map_idx].flags = flags;
90 *bshp = earlyboot_mappings[earlyboot_map_idx].virt;
93 ma = VM_MEMATTR_DEFAULT;
95 case BUS_SPACE_MAP_CACHEABLE:
96 ma = VM_MEMATTR_CACHEABLE;
98 case BUS_SPACE_MAP_PREFETCHABLE:
99 ma = VM_MEMATTR_PREFETCHABLE;
102 *bshp = (bus_space_handle_t)pmap_mapdev_attr(addr, size, ma);
109 bs_remap_earlyboot(void)
116 for (i = 0; i < earlyboot_map_idx; i++) {
117 spa = earlyboot_mappings[i].addr;
118 if (spa == earlyboot_mappings[i].virt &&
119 pmap_dev_direct_mapped(spa, earlyboot_mappings[i].size) == 0)
122 ma = VM_MEMATTR_DEFAULT;
123 switch (earlyboot_mappings[i].flags) {
124 case BUS_SPACE_MAP_CACHEABLE:
125 ma = VM_MEMATTR_CACHEABLE;
127 case BUS_SPACE_MAP_PREFETCHABLE:
128 ma = VM_MEMATTR_PREFETCHABLE;
132 pa = trunc_page(spa);
133 va = trunc_page(earlyboot_mappings[i].virt);
134 while (pa < spa + earlyboot_mappings[i].size) {
135 pmap_kenter_attr(va, pa, ma);
143 bs_gen_unmap(bus_size_t size __unused)
148 bs_gen_subregion(bus_space_handle_t bsh, bus_size_t ofs,
149 bus_size_t size __unused, bus_space_handle_t *nbshp)
156 bs_gen_alloc(bus_addr_t rstart __unused, bus_addr_t rend __unused,
157 bus_size_t size __unused, bus_size_t alignment __unused,
158 bus_size_t boundary __unused, int flags __unused,
159 bus_addr_t *bpap __unused, bus_space_handle_t *bshp __unused)
165 bs_gen_free(bus_space_handle_t bsh __unused, bus_size_t size __unused)
171 bs_gen_barrier(bus_space_handle_t bsh __unused, bus_size_t ofs __unused,
172 bus_size_t size __unused, int flags __unused)
179 * Big-endian access functions
182 bs_be_rs_1(bus_space_handle_t bsh, bus_size_t ofs)
184 volatile uint8_t *addr;
187 addr = __ppc_ba(bsh, ofs);
190 CTR4(KTR_BE_IO, "%s(bsh=%#x, ofs=%#x) = %#x", __func__, bsh, ofs, res);
195 bs_be_rs_2(bus_space_handle_t bsh, bus_size_t ofs)
197 volatile uint16_t *addr;
200 addr = __ppc_ba(bsh, ofs);
203 CTR4(KTR_BE_IO, "%s(bsh=%#x, ofs=%#x) = %#x", __func__, bsh, ofs, res);
208 bs_be_rs_4(bus_space_handle_t bsh, bus_size_t ofs)
210 volatile uint32_t *addr;
213 addr = __ppc_ba(bsh, ofs);
216 CTR4(KTR_BE_IO, "%s(bsh=%#x, ofs=%#x) = %#x", __func__, bsh, ofs, res);
221 bs_be_rs_8(bus_space_handle_t bsh, bus_size_t ofs)
223 volatile uint64_t *addr;
226 addr = __ppc_ba(bsh, ofs);
233 bs_be_rm_1(bus_space_handle_t bsh, bus_size_t ofs, uint8_t *addr, size_t cnt)
235 ins8(__ppc_ba(bsh, ofs), addr, cnt);
239 bs_be_rm_2(bus_space_handle_t bsh, bus_size_t ofs, uint16_t *addr, size_t cnt)
241 ins16(__ppc_ba(bsh, ofs), addr, cnt);
245 bs_be_rm_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t *addr, size_t cnt)
247 ins32(__ppc_ba(bsh, ofs), addr, cnt);
251 bs_be_rm_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t *addr, size_t cnt)
253 ins64(__ppc_ba(bsh, ofs), addr, cnt);
257 bs_be_rr_1(bus_space_handle_t bsh, bus_size_t ofs, uint8_t *addr, size_t cnt)
259 volatile uint8_t *s = __ppc_ba(bsh, ofs);
267 bs_be_rr_2(bus_space_handle_t bsh, bus_size_t ofs, uint16_t *addr, size_t cnt)
269 volatile uint16_t *s = __ppc_ba(bsh, ofs);
277 bs_be_rr_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t *addr, size_t cnt)
279 volatile uint32_t *s = __ppc_ba(bsh, ofs);
287 bs_be_rr_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t *addr, size_t cnt)
289 volatile uint64_t *s = __ppc_ba(bsh, ofs);
297 bs_be_ws_1(bus_space_handle_t bsh, bus_size_t ofs, uint8_t val)
299 volatile uint8_t *addr;
301 addr = __ppc_ba(bsh, ofs);
304 CTR4(KTR_BE_IO, "%s(bsh=%#x, ofs=%#x, val=%#x)", __func__, bsh, ofs, val);
308 bs_be_ws_2(bus_space_handle_t bsh, bus_size_t ofs, uint16_t val)
310 volatile uint16_t *addr;
312 addr = __ppc_ba(bsh, ofs);
315 CTR4(KTR_BE_IO, "%s(bsh=%#x, ofs=%#x, val=%#x)", __func__, bsh, ofs, val);
319 bs_be_ws_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t val)
321 volatile uint32_t *addr;
323 addr = __ppc_ba(bsh, ofs);
326 CTR4(KTR_BE_IO, "%s(bsh=%#x, ofs=%#x, val=%#x)", __func__, bsh, ofs, val);
330 bs_be_ws_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t val)
332 volatile uint64_t *addr;
334 addr = __ppc_ba(bsh, ofs);
337 CTR4(KTR_BE_IO, "%s(bsh=%#x, ofs=%#x, val=%#x)", __func__, bsh, ofs, val);
341 bs_be_wm_1(bus_space_handle_t bsh, bus_size_t ofs, const uint8_t *addr,
344 outsb(__ppc_ba(bsh, ofs), addr, cnt);
348 bs_be_wm_2(bus_space_handle_t bsh, bus_size_t ofs, const uint16_t *addr,
351 outsw(__ppc_ba(bsh, ofs), addr, cnt);
355 bs_be_wm_4(bus_space_handle_t bsh, bus_size_t ofs, const uint32_t *addr,
358 outsl(__ppc_ba(bsh, ofs), addr, cnt);
362 bs_be_wm_8(bus_space_handle_t bsh, bus_size_t ofs, const uint64_t *addr,
365 outsll(__ppc_ba(bsh, ofs), addr, cnt);
369 bs_be_wr_1(bus_space_handle_t bsh, bus_size_t ofs, const uint8_t *addr,
372 volatile uint8_t *d = __ppc_ba(bsh, ofs);
380 bs_be_wr_2(bus_space_handle_t bsh, bus_size_t ofs, const uint16_t *addr,
383 volatile uint16_t *d = __ppc_ba(bsh, ofs);
391 bs_be_wr_4(bus_space_handle_t bsh, bus_size_t ofs, const uint32_t *addr,
394 volatile uint32_t *d = __ppc_ba(bsh, ofs);
402 bs_be_wr_8(bus_space_handle_t bsh, bus_size_t ofs, const uint64_t *addr,
405 volatile uint64_t *d = __ppc_ba(bsh, ofs);
413 bs_be_sm_1(bus_space_handle_t bsh, bus_size_t ofs, uint8_t val, size_t cnt)
415 volatile uint8_t *d = __ppc_ba(bsh, ofs);
423 bs_be_sm_2(bus_space_handle_t bsh, bus_size_t ofs, uint16_t val, size_t cnt)
425 volatile uint16_t *d = __ppc_ba(bsh, ofs);
433 bs_be_sm_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t val, size_t cnt)
435 volatile uint32_t *d = __ppc_ba(bsh, ofs);
443 bs_be_sm_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t val, size_t cnt)
445 volatile uint64_t *d = __ppc_ba(bsh, ofs);
453 bs_be_sr_1(bus_space_handle_t bsh, bus_size_t ofs, uint8_t val, size_t cnt)
455 volatile uint8_t *d = __ppc_ba(bsh, ofs);
463 bs_be_sr_2(bus_space_handle_t bsh, bus_size_t ofs, uint16_t val, size_t cnt)
465 volatile uint16_t *d = __ppc_ba(bsh, ofs);
473 bs_be_sr_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t val, size_t cnt)
475 volatile uint32_t *d = __ppc_ba(bsh, ofs);
483 bs_be_sr_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t val, size_t cnt)
485 volatile uint64_t *d = __ppc_ba(bsh, ofs);
493 * Little-endian access functions
496 bs_le_rs_1(bus_space_handle_t bsh, bus_size_t ofs)
498 volatile uint8_t *addr;
501 addr = __ppc_ba(bsh, ofs);
504 CTR4(KTR_LE_IO, "%s(bsh=%#x, ofs=%#x) = %#x", __func__, bsh, ofs, res);
509 bs_le_rs_2(bus_space_handle_t bsh, bus_size_t ofs)
511 volatile uint16_t *addr;
514 addr = __ppc_ba(bsh, ofs);
515 __asm __volatile("lhbrx %0, 0, %1" : "=r"(res) : "r"(addr));
517 CTR4(KTR_LE_IO, "%s(bsh=%#x, ofs=%#x) = %#x", __func__, bsh, ofs, res);
522 bs_le_rs_4(bus_space_handle_t bsh, bus_size_t ofs)
524 volatile uint32_t *addr;
527 addr = __ppc_ba(bsh, ofs);
528 __asm __volatile("lwbrx %0, 0, %1" : "=r"(res) : "r"(addr));
530 CTR4(KTR_LE_IO, "%s(bsh=%#x, ofs=%#x) = %#x", __func__, bsh, ofs, res);
535 bs_le_rs_8(bus_space_handle_t bsh, bus_size_t ofs)
537 volatile uint64_t *addr;
540 addr = __ppc_ba(bsh, ofs);
541 res = le64toh(*addr);
543 CTR4(KTR_LE_IO, "%s(bsh=%#x, ofs=%#x) = %#x", __func__, bsh, ofs, res);
548 bs_le_rm_1(bus_space_handle_t bsh, bus_size_t ofs, uint8_t *addr, size_t cnt)
550 ins8(__ppc_ba(bsh, ofs), addr, cnt);
554 bs_le_rm_2(bus_space_handle_t bsh, bus_size_t ofs, uint16_t *addr, size_t cnt)
556 ins16rb(__ppc_ba(bsh, ofs), addr, cnt);
560 bs_le_rm_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t *addr, size_t cnt)
562 ins32rb(__ppc_ba(bsh, ofs), addr, cnt);
566 bs_le_rm_8(bus_space_handle_t bshh, bus_size_t ofs, uint64_t *addr, size_t cnt)
572 bs_le_rr_1(bus_space_handle_t bsh, bus_size_t ofs, uint8_t *addr, size_t cnt)
574 volatile uint8_t *s = __ppc_ba(bsh, ofs);
582 bs_le_rr_2(bus_space_handle_t bsh, bus_size_t ofs, uint16_t *addr, size_t cnt)
584 volatile uint16_t *s = __ppc_ba(bsh, ofs);
587 *addr++ = in16rb(s++);
592 bs_le_rr_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t *addr, size_t cnt)
594 volatile uint32_t *s = __ppc_ba(bsh, ofs);
597 *addr++ = in32rb(s++);
602 bs_le_rr_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t *addr, size_t cnt)
608 bs_le_ws_1(bus_space_handle_t bsh, bus_size_t ofs, uint8_t val)
610 volatile uint8_t *addr;
612 addr = __ppc_ba(bsh, ofs);
615 CTR4(KTR_LE_IO, "%s(bsh=%#x, ofs=%#x, val=%#x)", __func__, bsh, ofs, val);
619 bs_le_ws_2(bus_space_handle_t bsh, bus_size_t ofs, uint16_t val)
621 volatile uint16_t *addr;
623 addr = __ppc_ba(bsh, ofs);
624 __asm __volatile("sthbrx %0, 0, %1" :: "r"(val), "r"(addr));
626 CTR4(KTR_LE_IO, "%s(bsh=%#x, ofs=%#x, val=%#x)", __func__, bsh, ofs, val);
630 bs_le_ws_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t val)
632 volatile uint32_t *addr;
634 addr = __ppc_ba(bsh, ofs);
635 __asm __volatile("stwbrx %0, 0, %1" :: "r"(val), "r"(addr));
637 CTR4(KTR_LE_IO, "%s(bsh=%#x, ofs=%#x, val=%#x)", __func__, bsh, ofs, val);
641 bs_le_ws_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t val)
643 volatile uint64_t *addr;
645 addr = __ppc_ba(bsh, ofs);
646 *addr = htole64(val);
648 CTR4(KTR_LE_IO, "%s(bsh=%#x, ofs=%#x, val=%#x)", __func__, bsh, ofs, val);
652 bs_le_wm_1(bus_space_handle_t bsh, bus_size_t ofs, const uint8_t *addr,
655 outs8(__ppc_ba(bsh, ofs), addr, cnt);
659 bs_le_wm_2(bus_space_handle_t bsh, bus_size_t ofs, const uint16_t *addr,
662 outs16rb(__ppc_ba(bsh, ofs), addr, cnt);
666 bs_le_wm_4(bus_space_handle_t bsh, bus_size_t ofs, const uint32_t *addr,
669 outs32rb(__ppc_ba(bsh, ofs), addr, cnt);
673 bs_le_wm_8(bus_space_handle_t bsh, bus_size_t ofs, const uint64_t *addr,
680 bs_le_wr_1(bus_space_handle_t bsh, bus_size_t ofs, const uint8_t *addr,
683 volatile uint8_t *d = __ppc_ba(bsh, ofs);
691 bs_le_wr_2(bus_space_handle_t bsh, bus_size_t ofs, const uint16_t *addr,
694 volatile uint16_t *d = __ppc_ba(bsh, ofs);
697 out16rb(d++, *addr++);
702 bs_le_wr_4(bus_space_handle_t bsh, bus_size_t ofs, const uint32_t *addr,
705 volatile uint32_t *d = __ppc_ba(bsh, ofs);
708 out32rb(d++, *addr++);
713 bs_le_wr_8(bus_space_handle_t bsh, bus_size_t ofs, const uint64_t *addr,
720 bs_le_sm_1(bus_space_handle_t bsh, bus_size_t ofs, uint8_t val, size_t cnt)
722 volatile uint8_t *d = __ppc_ba(bsh, ofs);
730 bs_le_sm_2(bus_space_handle_t bsh, bus_size_t ofs, uint16_t val, size_t cnt)
732 volatile uint16_t *d = __ppc_ba(bsh, ofs);
740 bs_le_sm_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t val, size_t cnt)
742 volatile uint32_t *d = __ppc_ba(bsh, ofs);
750 bs_le_sm_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t val, size_t cnt)
756 bs_le_sr_1(bus_space_handle_t bsh, bus_size_t ofs, uint8_t val, size_t cnt)
758 volatile uint8_t *d = __ppc_ba(bsh, ofs);
766 bs_le_sr_2(bus_space_handle_t bsh, bus_size_t ofs, uint16_t val, size_t cnt)
768 volatile uint16_t *d = __ppc_ba(bsh, ofs);
776 bs_le_sr_4(bus_space_handle_t bsh, bus_size_t ofs, uint32_t val, size_t cnt)
778 volatile uint32_t *d = __ppc_ba(bsh, ofs);
786 bs_le_sr_8(bus_space_handle_t bsh, bus_size_t ofs, uint64_t val, size_t cnt)
791 struct bus_space bs_be_tag = {
792 /* mapping/unmapping */
797 /* allocation/deallocation */
885 struct bus_space bs_le_tag = {
886 /* mapping/unmapping */
891 /* allocation/deallocation */