1 /******************************************************************************
4 * random collection of macros and definition
9 #include <machine/param.h>
11 #define CONFIG_X86_PAE
14 #if defined(XEN) && !defined(__XEN_INTERFACE_VERSION__)
16 * Can update to a more recent version when we implement
19 #define __XEN_INTERFACE_VERSION__ 0x00030204
22 #include <xen/interface/xen.h>
24 /* Force a proper event-channel callback from Xen. */
25 void force_evtchn_callback(void);
29 #include <vm/vm_param.h>
35 #include <sys/time.h> /* XXX for pcpu.h */
36 #include <sys/pcpu.h> /* XXX for PCPU_GET */
38 smp_processor_id(void)
40 if (__predict_true(gdtset))
41 return PCPU_GET(cpuid);
46 #define smp_processor_id() 0
50 #define NULL (void *)0
54 #define PANIC_IF(exp) if (unlikely(exp)) {printk("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);}
57 extern shared_info_t *HYPERVISOR_shared_info;
59 /* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
60 a mechanism by which the user can annotate likely branch directions and
61 expect the blocks to be reordered appropriately. Define __builtin_expect
62 to nothing for earlier compilers. */
64 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
65 static inline void rep_nop(void)
67 __asm__ __volatile__ ( "rep;nop" : : : "memory" );
69 #define cpu_relax() rep_nop()
72 #if __GNUC__ == 2 && __GNUC_MINOR__ < 96
73 #define __builtin_expect(x, expected_value) (x)
76 #define per_cpu(var, cpu) (pcpu_find((cpu))->pc_ ## var)
78 /* crude memory allocator for memory allocation early in
81 void *bootmem_alloc(unsigned int size);
82 void bootmem_free(void *ptr, unsigned int size);
85 /* Everything below this point is not included by assembler (.S) files. */
87 #include <sys/types.h>
89 void printk(const char *fmt, ...);
91 /* some function prototypes */
95 * STI/CLI equivalents. These basically set and clear the virtual
96 * event_enable flag in teh shared_info structure. Note that when
97 * the enable bit is set, there may be pending events to be handled.
98 * We may therefore call into do_hypervisor_callback() directly.
100 #define likely(x) __builtin_expect((x),1)
101 #define unlikely(x) __builtin_expect((x),0)
107 vcpu_info_t *_vcpu; \
108 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
109 _vcpu->evtchn_upcall_mask = 1; \
115 vcpu_info_t *_vcpu; \
117 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
118 _vcpu->evtchn_upcall_mask = 0; \
119 barrier(); /* unmask then check (avoid races) */ \
120 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
121 force_evtchn_callback(); \
124 #define __restore_flags(x) \
126 vcpu_info_t *_vcpu; \
128 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
129 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
130 barrier(); /* unmask then check (avoid races) */ \
131 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
132 force_evtchn_callback(); \
137 * Add critical_{enter, exit}?
140 #define __save_and_cli(x) \
142 vcpu_info_t *_vcpu; \
143 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
144 (x) = _vcpu->evtchn_upcall_mask; \
145 _vcpu->evtchn_upcall_mask = 1; \
150 #define cli() __cli()
151 #define sti() __sti()
152 #define save_flags(x) __save_flags(x)
153 #define restore_flags(x) __restore_flags(x)
154 #define save_and_cli(x) __save_and_cli(x)
156 #define local_irq_save(x) __save_and_cli(x)
157 #define local_irq_restore(x) __restore_flags(x)
158 #define local_irq_disable() __cli()
159 #define local_irq_enable() __sti()
161 #define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));}
162 #define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); }
163 #define spin_lock_irqsave mtx_lock_irqsave
164 #define spin_unlock_irqrestore mtx_unlock_irqrestore
168 #define mb() __asm__ __volatile__("lock; addl $0, 0(%%esp)": : :"memory")
174 #define wmb() barrier()
177 #define smp_mb() mb()
178 #define smp_rmb() rmb()
179 #define smp_wmb() wmb()
180 #define smp_read_barrier_depends() read_barrier_depends()
181 #define set_mb(var, value) do { xchg(&var, value); } while (0)
183 #define smp_mb() barrier()
184 #define smp_rmb() barrier()
185 #define smp_wmb() barrier()
186 #define smp_read_barrier_depends() do { } while(0)
187 #define set_mb(var, value) do { var = value; barrier(); } while (0)
191 /* This is a barrier for the compiler only, NOT the processor! */
192 #define barrier() __asm__ __volatile__("": : :"memory")
194 #define LOCK_PREFIX ""
196 #define ADDR (*(volatile long *) addr)
198 * Make sure gcc doesn't try to be clever and move things around
199 * on us. We need to use _exactly_ the address the user gave us,
200 * not some alias that contains the same information.
202 typedef struct { volatile int counter; } atomic_t;
206 #define xen_xchg(ptr,v) \
207 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
208 struct __xchg_dummy { unsigned long a[100]; };
209 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
210 static __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
215 __asm__ __volatile__("xchgb %b0,%1"
217 :"m" (*__xg(ptr)), "0" (x)
221 __asm__ __volatile__("xchgw %w0,%1"
223 :"m" (*__xg(ptr)), "0" (x)
227 __asm__ __volatile__("xchgl %0,%1"
229 :"m" (*__xg(ptr)), "0" (x)
237 * test_and_clear_bit - Clear a bit and return its old value
239 * @addr: Address to count from
241 * This operation is atomic and cannot be reordered.
242 * It also implies a memory barrier.
244 static __inline int test_and_clear_bit(int nr, volatile void * addr)
248 __asm__ __volatile__( LOCK_PREFIX
249 "btrl %2,%1\n\tsbbl %0,%0"
250 :"=r" (oldbit),"=m" (ADDR)
251 :"Ir" (nr) : "memory");
255 static __inline int constant_test_bit(int nr, const volatile void * addr)
257 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
260 static __inline int variable_test_bit(int nr, volatile void * addr)
264 __asm__ __volatile__(
265 "btl %2,%1\n\tsbbl %0,%0"
267 :"m" (ADDR),"Ir" (nr));
271 #define test_bit(nr,addr) \
272 (__builtin_constant_p(nr) ? \
273 constant_test_bit((nr),(addr)) : \
274 variable_test_bit((nr),(addr)))
278 * set_bit - Atomically set a bit in memory
279 * @nr: the bit to set
280 * @addr: the address to start counting from
282 * This function is atomic and may not be reordered. See __set_bit()
283 * if you do not require the atomic guarantees.
284 * Note that @nr may be almost arbitrarily large; this function is not
285 * restricted to acting on a single-word quantity.
287 static __inline__ void set_bit(int nr, volatile void * addr)
289 __asm__ __volatile__( LOCK_PREFIX
296 * clear_bit - Clears a bit in memory
298 * @addr: Address to start counting from
300 * clear_bit() is atomic and may not be reordered. However, it does
301 * not contain a memory barrier, so if it is used for locking purposes,
302 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
303 * in order to ensure changes are visible on other processors.
305 static __inline__ void clear_bit(int nr, volatile void * addr)
307 __asm__ __volatile__( LOCK_PREFIX
314 * atomic_inc - increment atomic variable
315 * @v: pointer of type atomic_t
317 * Atomically increments @v by 1. Note that the guaranteed
318 * useful range of an atomic_t is only 24 bits.
320 static __inline__ void atomic_inc(atomic_t *v)
322 __asm__ __volatile__(
329 #define rdtscll(val) \
330 __asm__ __volatile__("rdtsc" : "=A" (val))
335 * Kernel pointers have redundant information, so we can use a
336 * scheme where we can return either an error code or a dentry
337 * pointer with the same return value.
339 * This should be a per-architecture thing, to allow different
340 * error and pointer decisions.
342 #define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L)
344 static inline void *ERR_PTR(long error)
346 return (void *) error;
349 static inline long PTR_ERR(const void *ptr)
354 static inline long IS_ERR(const void *ptr)
356 return IS_ERR_VALUE((unsigned long)ptr);
359 #endif /* !__ASSEMBLY__ */