1 /******************************************************************************
4 * Random collection of macros and definition
6 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
9 * Permission is hereby granted, free of charge, to any person obtaining a copy
10 * of this software and associated documentation files (the "Software"), to
11 * deal in the Software without restriction, including without limitation the
12 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
13 * sell copies of the Software, and to permit persons to whom the Software is
14 * furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
22 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
30 #ifndef _MACHINE_XEN_XEN_OS_H_
31 #define _MACHINE_XEN_XEN_OS_H_
34 #define CONFIG_X86_PAE
37 /* Everything below this point is not included by assembler (.S) files. */
40 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
41 static inline void rep_nop(void)
43 __asm__ __volatile__ ( "rep;nop" : : : "memory" );
45 #define cpu_relax() rep_nop()
47 /* This is a barrier for the compiler only, NOT the processor! */
48 #define barrier() __asm__ __volatile__("": : :"memory")
50 #define LOCK_PREFIX ""
52 #define ADDR (*(volatile long *) addr)
55 * test_and_clear_bit - Clear a bit and return its old value
57 * @addr: Address to count from
59 * This operation is atomic and cannot be reordered.
60 * It also implies a memory barrier.
62 static __inline int test_and_clear_bit(int nr, volatile void * addr)
66 __asm__ __volatile__( LOCK_PREFIX
67 "btrl %2,%1\n\tsbbl %0,%0"
68 :"=r" (oldbit),"=m" (ADDR)
69 :"Ir" (nr) : "memory");
73 static __inline int constant_test_bit(int nr, const volatile void * addr)
75 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
78 static __inline int variable_test_bit(int nr, volatile void * addr)
83 "btl %2,%1\n\tsbbl %0,%0"
85 :"m" (ADDR),"Ir" (nr));
89 #define test_bit(nr,addr) \
90 (__builtin_constant_p(nr) ? \
91 constant_test_bit((nr),(addr)) : \
92 variable_test_bit((nr),(addr)))
95 * set_bit - Atomically set a bit in memory
97 * @addr: the address to start counting from
99 * This function is atomic and may not be reordered. See __set_bit()
100 * if you do not require the atomic guarantees.
101 * Note that @nr may be almost arbitrarily large; this function is not
102 * restricted to acting on a single-word quantity.
104 static __inline__ void set_bit(int nr, volatile void * addr)
106 __asm__ __volatile__( LOCK_PREFIX
113 * clear_bit - Clears a bit in memory
115 * @addr: Address to start counting from
117 * clear_bit() is atomic and may not be reordered. However, it does
118 * not contain a memory barrier, so if it is used for locking purposes,
119 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
120 * in order to ensure changes are visible on other processors.
122 static __inline__ void clear_bit(int nr, volatile void * addr)
124 __asm__ __volatile__( LOCK_PREFIX
130 #endif /* !__ASSEMBLY__ */
132 #endif /* _MACHINE_XEN_XEN_OS_H_ */