2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef INFINIBAND_ARCH_H
34 #define INFINIBAND_ARCH_H
37 #include <infiniband/endian.h>
38 #include <infiniband/byteswap.h>
40 #if __BYTE_ORDER == __LITTLE_ENDIAN
41 static inline uint64_t htonll(uint64_t x) { return bswap_64(x); }
42 static inline uint64_t ntohll(uint64_t x) { return bswap_64(x); }
43 #elif __BYTE_ORDER == __BIG_ENDIAN
44 static inline uint64_t htonll(uint64_t x) { return x; }
45 static inline uint64_t ntohll(uint64_t x) { return x; }
47 #error __BYTE_ORDER is neither __LITTLE_ENDIAN nor __BIG_ENDIAN
51 * Architecture-specific defines. Currently, an architecture is
52 * required to implement the following operations:
54 * mb() - memory barrier. No loads or stores may be reordered across
55 * this macro by either the compiler or the CPU.
56 * rmb() - read memory barrier. No loads may be reordered across this
57 * macro by either the compiler or the CPU.
58 * wmb() - write memory barrier. No stores may be reordered across
59 * this macro by either the compiler or the CPU.
60 * wc_wmb() - flush write combine buffers. No write-combined writes
61 * will be reordered across this macro by either the compiler or
67 #define mb() asm volatile("lock; addl $0,0(%%esp) " ::: "memory")
69 #define wmb() asm volatile("" ::: "memory")
72 #elif defined(__x86_64__)
75 * Only use lfence for mb() and rmb() because we don't care about
76 * ordering against non-temporal stores (for now at least).
78 #define mb() asm volatile("lfence" ::: "memory")
80 #define wmb() asm volatile("" ::: "memory")
81 #define wc_wmb() asm volatile("sfence" ::: "memory")
83 #elif defined(__PPC64__)
85 #define mb() asm volatile("sync" ::: "memory")
86 #define rmb() asm volatile("lwsync" ::: "memory")
88 #define wc_wmb() wmb()
90 #elif defined(__ia64__)
92 #define mb() asm volatile("mf" ::: "memory")
95 #define wc_wmb() asm volatile("fwb" ::: "memory")
97 #elif defined(__PPC__)
99 #define mb() asm volatile("sync" ::: "memory")
102 #define wc_wmb() wmb()
104 #elif defined(__sparc_v9__)
106 #define mb() asm volatile("membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad" ::: "memory")
107 #define rmb() asm volatile("membar #LoadLoad" ::: "memory")
108 #define wmb() asm volatile("membar #StoreStore" ::: "memory")
109 #define wc_wmb() wmb()
111 #elif defined(__sparc__)
113 #define mb() asm volatile("" ::: "memory")
116 #define wc_wmb() wmb()
120 #warning No architecture specific defines found. Using generic implementation.
122 #define mb() asm volatile("" ::: "memory")
125 #define wc_wmb() wmb()
129 #endif /* INFINIBAND_ARCH_H */