1 /******************************************************************************
4 * random collection of macros and definition
11 #define CONFIG_X86_PAE
14 #if !defined(__XEN_INTERFACE_VERSION__)
16 * Can update to a more recent version when we implement
19 #define __XEN_INTERFACE_VERSION__ 0x00030204
22 #include <xen/interface/xen.h>
24 /* Force a proper event-channel callback from Xen. */
25 void force_evtchn_callback(void);
29 extern shared_info_t *HYPERVISOR_shared_info;
31 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
32 static inline void rep_nop(void)
34 __asm__ __volatile__ ( "rep;nop" : : : "memory" );
36 #define cpu_relax() rep_nop()
38 /* crude memory allocator for memory allocation early in
41 void *bootmem_alloc(unsigned int size);
42 void bootmem_free(void *ptr, unsigned int size);
45 /* Everything below this point is not included by assembler (.S) files. */
48 void printk(const char *fmt, ...);
50 /* some function prototypes */
53 #define likely(x) __builtin_expect((x),1)
54 #define unlikely(x) __builtin_expect((x),0)
59 * STI/CLI equivalents. These basically set and clear the virtual
60 * event_enable flag in teh shared_info structure. Note that when
61 * the enable bit is set, there may be pending events to be handled.
62 * We may therefore call into do_hypervisor_callback() directly.
68 _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)]; \
69 _vcpu->evtchn_upcall_mask = 1; \
77 _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)]; \
78 _vcpu->evtchn_upcall_mask = 0; \
79 barrier(); /* unmask then check (avoid races) */ \
80 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
81 force_evtchn_callback(); \
84 #define __restore_flags(x) \
88 _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)]; \
89 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
90 barrier(); /* unmask then check (avoid races) */ \
91 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
92 force_evtchn_callback(); \
97 * Add critical_{enter, exit}?
100 #define __save_and_cli(x) \
102 vcpu_info_t *_vcpu; \
103 _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)]; \
104 (x) = _vcpu->evtchn_upcall_mask; \
105 _vcpu->evtchn_upcall_mask = 1; \
110 #define cli() __cli()
111 #define sti() __sti()
112 #define save_flags(x) __save_flags(x)
113 #define restore_flags(x) __restore_flags(x)
114 #define save_and_cli(x) __save_and_cli(x)
116 #define local_irq_save(x) __save_and_cli(x)
117 #define local_irq_restore(x) __restore_flags(x)
118 #define local_irq_disable() __cli()
119 #define local_irq_enable() __sti()
121 #define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));}
122 #define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); }
123 #define spin_lock_irqsave mtx_lock_irqsave
124 #define spin_unlock_irqrestore mtx_unlock_irqrestore
130 #define mb() __asm__ __volatile__("mfence":::"memory")
133 #define rmb() __asm__ __volatile__("lfence":::"memory");
136 #define wmb() barrier()
139 #define smp_mb() mb()
140 #define smp_rmb() rmb()
141 #define smp_wmb() wmb()
142 #define smp_read_barrier_depends() read_barrier_depends()
143 #define set_mb(var, value) do { xchg(&var, value); } while (0)
145 #define smp_mb() barrier()
146 #define smp_rmb() barrier()
147 #define smp_wmb() barrier()
148 #define smp_read_barrier_depends() do { } while(0)
149 #define set_mb(var, value) do { var = value; barrier(); } while (0)
153 /* This is a barrier for the compiler only, NOT the processor! */
154 #define barrier() __asm__ __volatile__("": : :"memory")
156 #define LOCK_PREFIX ""
158 #define ADDR (*(volatile long *) addr)
160 * Make sure gcc doesn't try to be clever and move things around
161 * on us. We need to use _exactly_ the address the user gave us,
162 * not some alias that contains the same information.
164 typedef struct { volatile int counter; } atomic_t;
168 #define xen_xchg(ptr,v) \
169 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
170 struct __xchg_dummy { unsigned long a[100]; };
171 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
172 static __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
177 __asm__ __volatile__("xchgb %b0,%1"
179 :"m" (*__xg(ptr)), "0" (x)
183 __asm__ __volatile__("xchgw %w0,%1"
185 :"m" (*__xg(ptr)), "0" (x)
189 __asm__ __volatile__("xchgl %0,%1"
191 :"m" (*__xg(ptr)), "0" (x)
199 * test_and_clear_bit - Clear a bit and return its old value
201 * @addr: Address to count from
203 * This operation is atomic and cannot be reordered.
204 * It also implies a memory barrier.
206 static __inline int test_and_clear_bit(int nr, volatile void * addr)
210 __asm__ __volatile__( LOCK_PREFIX
211 "btrl %2,%1\n\tsbbl %0,%0"
212 :"=r" (oldbit),"=m" (ADDR)
213 :"Ir" (nr) : "memory");
217 static __inline int constant_test_bit(int nr, const volatile void * addr)
219 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
222 static __inline int variable_test_bit(int nr, volatile void * addr)
226 __asm__ __volatile__(
227 "btl %2,%1\n\tsbbl %0,%0"
229 :"m" (ADDR),"Ir" (nr));
233 #define test_bit(nr,addr) \
234 (__builtin_constant_p(nr) ? \
235 constant_test_bit((nr),(addr)) : \
236 variable_test_bit((nr),(addr)))
240 * set_bit - Atomically set a bit in memory
241 * @nr: the bit to set
242 * @addr: the address to start counting from
244 * This function is atomic and may not be reordered. See __set_bit()
245 * if you do not require the atomic guarantees.
246 * Note that @nr may be almost arbitrarily large; this function is not
247 * restricted to acting on a single-word quantity.
249 static __inline__ void set_bit(int nr, volatile void * addr)
251 __asm__ __volatile__( LOCK_PREFIX
258 * clear_bit - Clears a bit in memory
260 * @addr: Address to start counting from
262 * clear_bit() is atomic and may not be reordered. However, it does
263 * not contain a memory barrier, so if it is used for locking purposes,
264 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
265 * in order to ensure changes are visible on other processors.
267 static __inline__ void clear_bit(int nr, volatile void * addr)
269 __asm__ __volatile__( LOCK_PREFIX
276 * atomic_inc - increment atomic variable
277 * @v: pointer of type atomic_t
279 * Atomically increments @v by 1. Note that the guaranteed
280 * useful range of an atomic_t is only 24 bits.
282 static __inline__ void atomic_inc(atomic_t *v)
284 __asm__ __volatile__(
291 #define rdtscll(val) \
292 __asm__ __volatile__("rdtsc" : "=A" (val))
294 #endif /* !__ASSEMBLY__ */