2 * Copyright (c) 2004 Marcel Moolenaar
3 * Copyright (c) 2001 Doug Rabson
4 * Copyright (c) 2016 The FreeBSD Foundation
5 * Copyright (c) 2017 Andrew Turner
8 * Portions of this software were developed by Konstantin Belousov
9 * under sponsorship from the FreeBSD Foundation.
11 * This software was developed by SRI International and the University of
12 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
13 * ("CTSRD"), as part of the DARPA CRASH research programme.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/param.h>
39 #include <sys/kernel.h>
40 #include <sys/linker.h>
42 #include <sys/mutex.h>
44 #include <sys/rwlock.h>
45 #include <sys/systm.h>
46 #include <sys/vmmeter.h>
48 #include <machine/pte.h>
49 #include <machine/vmparam.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_page.h>
56 #include <vm/vm_pager.h>
58 static vm_object_t obj_1t1_pt;
59 static vm_pindex_t efi_1t1_idx;
60 static pd_entry_t *efi_l0;
61 static uint64_t efi_ttbr0;
64 efi_destroy_1t1_map(void)
68 if (obj_1t1_pt != NULL) {
69 VM_OBJECT_RLOCK(obj_1t1_pt);
70 TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
71 m->ref_count = VPRC_OBJREF;
72 vm_wire_sub(obj_1t1_pt->resident_page_count);
73 VM_OBJECT_RUNLOCK(obj_1t1_pt);
74 vm_object_deallocate(obj_1t1_pt);
87 return (vm_page_grab(obj_1t1_pt, efi_1t1_idx++, VM_ALLOC_NOBUSY |
88 VM_ALLOC_WIRED | VM_ALLOC_ZERO));
92 efi_1t1_l3(vm_offset_t va)
94 pd_entry_t *l0, *l1, *l2;
96 vm_pindex_t l0_idx, l1_idx, l2_idx;
100 l0_idx = pmap_l0_index(va);
101 l0 = &efi_l0[l0_idx];
104 mphys = VM_PAGE_TO_PHYS(m);
105 *l0 = PHYS_TO_PTE(mphys) | L0_TABLE;
107 mphys = PTE_TO_PHYS(*l0);
110 l1 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
111 l1_idx = pmap_l1_index(va);
115 mphys = VM_PAGE_TO_PHYS(m);
116 *l1 = PHYS_TO_PTE(mphys) | L1_TABLE;
118 mphys = PTE_TO_PHYS(*l1);
121 l2 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
122 l2_idx = pmap_l2_index(va);
126 mphys = VM_PAGE_TO_PHYS(m);
127 *l2 = PHYS_TO_PTE(mphys) | L2_TABLE;
129 mphys = PTE_TO_PHYS(*l2);
132 l3 = (pt_entry_t *)PHYS_TO_DMAP(mphys);
133 l3 += pmap_l3_index(va);
134 KASSERT(*l3 == 0, ("%s: Already mapped: va %#jx *pt %#jx", __func__,
141 * Map a physical address from EFI runtime space into KVA space. Returns 0 to
142 * indicate a failed mapping so that the caller may handle error.
145 efi_phys_to_kva(vm_paddr_t paddr)
147 if (PHYS_IN_DMAP(paddr))
148 return (PHYS_TO_DMAP(paddr));
150 /* TODO: Map memory not in the DMAP */
156 * Create the 1:1 virtual to physical map for EFI
159 efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
162 pt_entry_t *l3, l3_attr;
164 vm_page_t efi_l0_page;
168 obj_1t1_pt = vm_pager_allocate(OBJT_PHYS, NULL, L0_ENTRIES +
169 L0_ENTRIES * Ln_ENTRIES + L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES +
170 L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES * Ln_ENTRIES,
171 VM_PROT_ALL, 0, NULL);
172 VM_OBJECT_WLOCK(obj_1t1_pt);
173 efi_l0_page = efi_1t1_page();
174 VM_OBJECT_WUNLOCK(obj_1t1_pt);
175 efi_l0 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_l0_page));
176 efi_ttbr0 = ASID_TO_OPERAND(ASID_RESERVED_FOR_EFI) |
177 VM_PAGE_TO_PHYS(efi_l0_page);
179 for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p,
181 if ((p->md_attr & EFI_MD_ATTR_RT) == 0)
183 if (p->md_virt != 0 && p->md_virt != p->md_phys) {
185 printf("EFI Runtime entry %d is mapped\n", i);
188 if ((p->md_phys & EFI_PAGE_MASK) != 0) {
190 printf("EFI Runtime entry %d is not aligned\n",
194 if (p->md_phys + p->md_pages * EFI_PAGE_SIZE < p->md_phys ||
195 p->md_phys + p->md_pages * EFI_PAGE_SIZE >=
196 VM_MAXUSER_ADDRESS) {
197 printf("EFI Runtime entry %d is not in mappable for RT:"
198 "base %#016jx %#jx pages\n",
199 i, (uintmax_t)p->md_phys,
200 (uintmax_t)p->md_pages);
203 if ((p->md_attr & EFI_MD_ATTR_WB) != 0)
204 mode = VM_MEMATTR_WRITE_BACK;
205 else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
206 mode = VM_MEMATTR_WRITE_THROUGH;
207 else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
208 mode = VM_MEMATTR_WRITE_COMBINING;
210 mode = VM_MEMATTR_DEVICE;
213 printf("MAP %lx mode %x pages %lu\n",
214 p->md_phys, mode, p->md_pages);
217 l3_attr = ATTR_DEFAULT | ATTR_S1_IDX(mode) |
218 ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE;
219 if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP)
220 l3_attr |= ATTR_S1_XN;
222 VM_OBJECT_WLOCK(obj_1t1_pt);
223 for (va = p->md_phys, idx = 0; idx < p->md_pages;
224 idx += (PAGE_SIZE / EFI_PAGE_SIZE), va += PAGE_SIZE) {
228 VM_OBJECT_WUNLOCK(obj_1t1_pt);
233 efi_destroy_1t1_map();
241 CRITICAL_ASSERT(curthread);
244 * Temporarily switch to EFI's page table. However, we leave curpmap
245 * unchanged in order to prevent its ASID from being reclaimed before
246 * we switch back to its page table in efi_arch_leave().
248 set_ttbr0(efi_ttbr0);
249 if (PCPU_GET(bcast_tlbi_workaround) != 0)
250 invalidate_local_icache();
260 * Restore the pcpu pointer. Some UEFI implementations trash it and
261 * we don't store it before calling into them. To fix this we need
262 * to restore it after returning to the kernel context. As reading
263 * curpmap will access x18 we need to restore it before loading
267 "mrs x18, tpidr_el1 \n"
269 set_ttbr0(pmap_to_ttbr0(PCPU_GET(curpmap)));
270 if (PCPU_GET(bcast_tlbi_workaround) != 0)
271 invalidate_local_icache();
275 efi_rt_arch_call(struct efirt_callinfo *ec)
278 panic("not implemented");