3 * Copyright (c) 2004 Christian Limpach.
4 * Copyright (c) 2004,2005 Kip Macy
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Christian Limpach.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #ifndef _XEN_XENPMAP_H_
37 #define _XEN_XENPMAP_H_
38 void _xen_queue_pt_update(vm_paddr_t, vm_paddr_t, char *, int);
39 void xen_pt_switch(vm_paddr_t);
40 void xen_set_ldt(vm_paddr_t, unsigned long);
41 void xen_pgdpt_pin(vm_paddr_t);
42 void xen_pgd_pin(vm_paddr_t);
43 void xen_pgd_unpin(vm_paddr_t);
44 void xen_pt_pin(vm_paddr_t);
45 void xen_pt_unpin(vm_paddr_t);
46 void xen_flush_queue(void);
47 void pmap_ref(pt_entry_t *pte, vm_paddr_t ma);
48 void xen_check_queue(void);
51 #define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), __FILE__, __LINE__)
53 #define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), NULL, 0)
57 #include <sys/param.h>
61 #define PMAP_REF pmap_ref
62 #define PMAP_DEC_REF_PAGE pmap_dec_ref_page
63 #define PMAP_MARK_PRIV pmap_mark_privileged
64 #define PMAP_MARK_UNPRIV pmap_mark_unprivileged
66 #define PMAP_MARK_PRIV(a)
67 #define PMAP_MARK_UNPRIV(a)
68 #define PMAP_REF(a, b)
69 #define PMAP_DEC_REF_PAGE(a)
75 #define PT_LOG() printk("WP PT_SET %s:%d\n", __FILE__, __LINE__)
80 #define INVALID_P2M_ENTRY (~0UL)
82 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
84 #define SH_PD_SET_VA 1
85 #define SH_PD_SET_VA_MA 2
86 #define SH_PD_SET_VA_CLEAR 3
89 void pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type);
92 vptetomachpte(vm_paddr_t *pte)
94 vm_offset_t offset, ppte;
95 vm_paddr_t pgoffset, retval, *pdir_shadow_ptr;
98 ppte = (vm_offset_t)pte;
99 pgoffset = (ppte & PAGE_MASK);
100 offset = ppte - (vm_offset_t)PTmap;
101 pgindex = ppte >> PDRSHIFT;
103 pdir_shadow_ptr = (vm_paddr_t *)PCPU_GET(pdir_shadow);
104 retval = (pdir_shadow_ptr[pgindex] & ~PAGE_MASK) + pgoffset;
108 #define PT_GET(_ptp) \
109 (pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : (0))
111 #ifdef WRITABLE_PAGETABLES
113 #define PT_SET_VA(_ptp,_npte,sync) do { \
114 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
116 *(_ptp) = xpmap_ptom((_npte)); \
117 } while (/*CONSTCOND*/0)
118 #define PT_SET_VA_MA(_ptp,_npte,sync) do { \
119 PMAP_REF((_ptp), (_npte)); \
122 } while (/*CONSTCOND*/0)
123 #define PT_CLEAR_VA(_ptp, sync) do { \
124 PMAP_REF((pt_entry_t *)(_ptp), 0); \
127 } while (/*CONSTCOND*/0)
129 #define PD_SET_VA(_pmap, _ptp, _npte, sync) do { \
130 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
131 pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA); \
132 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
133 } while (/*CONSTCOND*/0)
134 #define PD_SET_VA_MA(_pmap, _ptp, _npte, sync) do { \
135 PMAP_REF((_ptp), (_npte)); \
136 pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA_MA); \
137 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
138 } while (/*CONSTCOND*/0)
139 #define PD_CLEAR_VA(_pmap, _ptp, sync) do { \
140 PMAP_REF((pt_entry_t *)(_ptp), 0); \
141 pd_set((_pmap),(_ptp), 0, SH_PD_SET_VA_CLEAR); \
142 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
143 } while (/*CONSTCOND*/0)
145 #else /* !WRITABLE_PAGETABLES */
147 #define PT_SET_VA(_ptp,_npte,sync) do { \
148 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
149 xen_queue_pt_update(vtomach(_ptp), \
150 xpmap_ptom(_npte)); \
151 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
152 } while (/*CONSTCOND*/0)
153 #define PT_SET_VA_MA(_ptp,_npte,sync) do { \
154 PMAP_REF((_ptp), (_npte)); \
155 xen_queue_pt_update(vtomach(_ptp), _npte); \
156 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
157 } while (/*CONSTCOND*/0)
158 #define PT_CLEAR_VA(_ptp, sync) do { \
159 PMAP_REF((pt_entry_t *)(_ptp), 0); \
160 xen_queue_pt_update(vtomach(_ptp), 0); \
161 if (sync || ALWAYS_SYNC) \
163 } while (/*CONSTCOND*/0)
165 #define PD_SET_VA(_pmap, _ptepindex,_npte,sync) do { \
166 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
167 pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA); \
168 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
169 } while (/*CONSTCOND*/0)
170 #define PD_SET_VA_MA(_pmap, _ptepindex,_npte,sync) do { \
171 PMAP_REF((_ptp), (_npte)); \
172 pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA_MA); \
173 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
174 } while (/*CONSTCOND*/0)
175 #define PD_CLEAR_VA(_pmap, _ptepindex, sync) do { \
176 PMAP_REF((pt_entry_t *)(_ptp), 0); \
177 pd_set((_pmap),(_ptepindex), 0, SH_PD_SET_VA_CLEAR); \
178 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
179 } while (/*CONSTCOND*/0)
183 #define PT_SET_MA(_va, _ma) \
185 PANIC_IF(HYPERVISOR_update_va_mapping(((unsigned long)(_va)),\
187 UVMF_INVLPG| UVMF_ALL) < 0); \
188 } while (/*CONSTCOND*/0)
190 #define PT_UPDATES_FLUSH() do { \
192 } while (/*CONSTCOND*/0)
194 static __inline vm_paddr_t
195 xpmap_mtop(vm_paddr_t mpa)
197 vm_paddr_t tmp = (mpa & PG_FRAME);
199 return machtophys(tmp) | (mpa & ~PG_FRAME);
202 static __inline vm_paddr_t
203 xpmap_ptom(vm_paddr_t ppa)
205 vm_paddr_t tmp = (ppa & PG_FRAME);
207 return phystomach(tmp) | (ppa & ~PG_FRAME);
211 set_phys_to_machine(unsigned long pfn, unsigned long mfn)
214 PANIC_IF(max_mapnr && pfn >= max_mapnr);
216 if (xen_feature(XENFEAT_auto_translated_physmap)) {
218 PANIC_IF((pfn != mfn && mfn != INVALID_P2M_ENTRY));
222 xen_phys_machine[pfn] = mfn;
228 #endif /* _XEN_XENPMAP_H_ */