3 * Copyright (c) 2004 Christian Limpach.
4 * Copyright (c) 2004,2005 Kip Macy
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Christian Limpach.
18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _XEN_XENPMAP_H_
35 #define _XEN_XENPMAP_H_
37 #include <machine/xen/features.h>
39 void _xen_queue_pt_update(vm_paddr_t, vm_paddr_t, char *, int);
40 void xen_pt_switch(vm_paddr_t);
41 void xen_set_ldt(vm_paddr_t, unsigned long);
42 void xen_pgdpt_pin(vm_paddr_t);
43 void xen_pgd_pin(vm_paddr_t);
44 void xen_pgd_unpin(vm_paddr_t);
45 void xen_pt_pin(vm_paddr_t);
46 void xen_pt_unpin(vm_paddr_t);
47 void xen_flush_queue(void);
48 void xen_check_queue(void);
50 void pmap_ref(pt_entry_t *pte, vm_paddr_t ma);
54 #define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), __FILE__, __LINE__)
56 #define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), NULL, 0)
60 #define PMAP_REF pmap_ref
61 #define PMAP_DEC_REF_PAGE pmap_dec_ref_page
62 #define PMAP_MARK_PRIV pmap_mark_privileged
63 #define PMAP_MARK_UNPRIV pmap_mark_unprivileged
65 #define PMAP_MARK_PRIV(a)
66 #define PMAP_MARK_UNPRIV(a)
67 #define PMAP_REF(a, b)
68 #define PMAP_DEC_REF_PAGE(a)
74 #define PT_LOG() printk("WP PT_SET %s:%d\n", __FILE__, __LINE__)
79 #define INVALID_P2M_ENTRY (~0UL)
81 #define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */
83 #define SH_PD_SET_VA 1
84 #define SH_PD_SET_VA_MA 2
85 #define SH_PD_SET_VA_CLEAR 3
88 void pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type);
91 vptetomachpte(vm_paddr_t *pte)
93 vm_offset_t offset, ppte;
94 vm_paddr_t pgoffset, retval, *pdir_shadow_ptr;
97 ppte = (vm_offset_t)pte;
98 pgoffset = (ppte & PAGE_MASK);
99 offset = ppte - (vm_offset_t)PTmap;
100 pgindex = ppte >> PDRSHIFT;
102 pdir_shadow_ptr = (vm_paddr_t *)PCPU_GET(pdir_shadow);
103 retval = (pdir_shadow_ptr[pgindex] & ~PAGE_MASK) + pgoffset;
107 #define PT_GET(_ptp) \
108 (pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : (0))
110 #ifdef WRITABLE_PAGETABLES
112 #define PT_SET_VA(_ptp,_npte,sync) do { \
113 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
115 *(_ptp) = xpmap_ptom((_npte)); \
116 } while (/*CONSTCOND*/0)
117 #define PT_SET_VA_MA(_ptp,_npte,sync) do { \
118 PMAP_REF((_ptp), (_npte)); \
121 } while (/*CONSTCOND*/0)
122 #define PT_CLEAR_VA(_ptp, sync) do { \
123 PMAP_REF((pt_entry_t *)(_ptp), 0); \
126 } while (/*CONSTCOND*/0)
128 #define PD_SET_VA(_pmap, _ptp, _npte, sync) do { \
129 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
130 pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA); \
131 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
132 } while (/*CONSTCOND*/0)
133 #define PD_SET_VA_MA(_pmap, _ptp, _npte, sync) do { \
134 PMAP_REF((_ptp), (_npte)); \
135 pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA_MA); \
136 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
137 } while (/*CONSTCOND*/0)
138 #define PD_CLEAR_VA(_pmap, _ptp, sync) do { \
139 PMAP_REF((pt_entry_t *)(_ptp), 0); \
140 pd_set((_pmap),(_ptp), 0, SH_PD_SET_VA_CLEAR); \
141 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
142 } while (/*CONSTCOND*/0)
144 #else /* !WRITABLE_PAGETABLES */
146 #define PT_SET_VA(_ptp,_npte,sync) do { \
147 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
148 xen_queue_pt_update(vtomach(_ptp), \
149 xpmap_ptom(_npte)); \
150 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
151 } while (/*CONSTCOND*/0)
152 #define PT_SET_VA_MA(_ptp,_npte,sync) do { \
153 PMAP_REF((_ptp), (_npte)); \
154 xen_queue_pt_update(vtomach(_ptp), _npte); \
155 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
156 } while (/*CONSTCOND*/0)
157 #define PT_CLEAR_VA(_ptp, sync) do { \
158 PMAP_REF((pt_entry_t *)(_ptp), 0); \
159 xen_queue_pt_update(vtomach(_ptp), 0); \
160 if (sync || ALWAYS_SYNC) \
162 } while (/*CONSTCOND*/0)
164 #define PD_SET_VA(_pmap, _ptepindex,_npte,sync) do { \
165 PMAP_REF((_ptp), xpmap_ptom(_npte)); \
166 pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA); \
167 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
168 } while (/*CONSTCOND*/0)
169 #define PD_SET_VA_MA(_pmap, _ptepindex,_npte,sync) do { \
170 PMAP_REF((_ptp), (_npte)); \
171 pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA_MA); \
172 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
173 } while (/*CONSTCOND*/0)
174 #define PD_CLEAR_VA(_pmap, _ptepindex, sync) do { \
175 PMAP_REF((pt_entry_t *)(_ptp), 0); \
176 pd_set((_pmap),(_ptepindex), 0, SH_PD_SET_VA_CLEAR); \
177 if (sync || ALWAYS_SYNC) xen_flush_queue(); \
178 } while (/*CONSTCOND*/0)
182 #define PT_SET_MA(_va, _ma) \
184 PANIC_IF(HYPERVISOR_update_va_mapping(((unsigned long)(_va)),\
186 UVMF_INVLPG| UVMF_ALL) < 0); \
187 } while (/*CONSTCOND*/0)
189 #define PT_UPDATES_FLUSH() do { \
191 } while (/*CONSTCOND*/0)
193 static __inline vm_paddr_t
194 xpmap_mtop(vm_paddr_t mpa)
196 vm_paddr_t tmp = (mpa & PG_FRAME);
198 return machtophys(tmp) | (mpa & ~PG_FRAME);
201 static __inline vm_paddr_t
202 xpmap_ptom(vm_paddr_t ppa)
204 vm_paddr_t tmp = (ppa & PG_FRAME);
206 return phystomach(tmp) | (ppa & ~PG_FRAME);
210 set_phys_to_machine(unsigned long pfn, unsigned long mfn)
213 PANIC_IF(max_mapnr && pfn >= max_mapnr);
215 if (xen_feature(XENFEAT_auto_translated_physmap)) {
217 PANIC_IF((pfn != mfn && mfn != INVALID_P2M_ENTRY));
221 xen_phys_machine[pfn] = mfn;
227 #endif /* _XEN_XENPMAP_H_ */