2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <machine/vm.h>
35 #include <sys/endian.h>
36 #include <sys/types.h>
38 #include <linux/compiler.h>
39 #include <linux/types.h>
42 * XXX This is all x86 specific. It should be bus space access.
46 /* rmb and wmb are declared in machine/atomic.h, so should be included first. */
48 #define __io_br() __compiler_membar()
53 #define __io_ar() rmb()
55 #define __io_ar() __compiler_membar()
61 #define __io_bw() wmb()
63 #define __io_bw() __compiler_membar()
68 #define __io_aw() __compiler_membar()
71 /* Access MMIO registers atomically without barriers and byte swapping. */
74 __raw_readb(const volatile void *addr)
76 return (*(const volatile uint8_t *)addr);
78 #define __raw_readb(addr) __raw_readb(addr)
81 __raw_writeb(uint8_t v, volatile void *addr)
83 *(volatile uint8_t *)addr = v;
85 #define __raw_writeb(v, addr) __raw_writeb(v, addr)
87 static inline uint16_t
88 __raw_readw(const volatile void *addr)
90 return (*(const volatile uint16_t *)addr);
92 #define __raw_readw(addr) __raw_readw(addr)
95 __raw_writew(uint16_t v, volatile void *addr)
97 *(volatile uint16_t *)addr = v;
99 #define __raw_writew(v, addr) __raw_writew(v, addr)
101 static inline uint32_t
102 __raw_readl(const volatile void *addr)
104 return (*(const volatile uint32_t *)addr);
106 #define __raw_readl(addr) __raw_readl(addr)
109 __raw_writel(uint32_t v, volatile void *addr)
111 *(volatile uint32_t *)addr = v;
113 #define __raw_writel(v, addr) __raw_writel(v, addr)
116 static inline uint64_t
117 __raw_readq(const volatile void *addr)
119 return (*(const volatile uint64_t *)addr);
121 #define __raw_readq(addr) __raw_readq(addr)
124 __raw_writeq(uint64_t v, volatile void *addr)
126 *(volatile uint64_t *)addr = v;
128 #define __raw_writeq(v, addr) __raw_writeq(v, addr)
131 #define mmiowb() barrier()
133 /* Access little-endian MMIO registers atomically with memory barriers. */
136 static inline uint8_t
137 readb(const volatile void *addr)
142 v = *(const volatile uint8_t *)addr;
146 #define readb(addr) readb(addr)
150 writeb(uint8_t v, volatile void *addr)
153 *(volatile uint8_t *)addr = v;
156 #define writeb(v, addr) writeb(v, addr)
159 static inline uint16_t
160 readw(const volatile void *addr)
165 v = le16toh(__raw_readw(addr));
169 #define readw(addr) readw(addr)
173 writew(uint16_t v, volatile void *addr)
176 __raw_writew(htole16(v), addr);
179 #define writew(v, addr) writew(v, addr)
182 static inline uint32_t
183 readl(const volatile void *addr)
188 v = le32toh(__raw_readl(addr));
192 #define readl(addr) readl(addr)
196 writel(uint32_t v, volatile void *addr)
199 __raw_writel(htole32(v), addr);
202 #define writel(v, addr) writel(v, addr)
207 static inline uint64_t
208 readq(const volatile void *addr)
213 v = le64toh(__raw_readq(addr));
217 #define readq(addr) readq(addr)
220 writeq(uint64_t v, volatile void *addr)
223 __raw_writeq(htole64(v), addr);
226 #define writeq(v, addr) writeq(v, addr)
229 /* Access little-endian MMIO registers atomically without memory barriers. */
232 static inline uint8_t
233 readb_relaxed(const volatile void *addr)
235 return (__raw_readb(addr));
237 #define readb_relaxed(addr) readb_relaxed(addr)
239 #undef writeb_relaxed
241 writeb_relaxed(uint8_t v, volatile void *addr)
243 __raw_writeb(v, addr);
245 #define writeb_relaxed(v, addr) writeb_relaxed(v, addr)
248 static inline uint16_t
249 readw_relaxed(const volatile void *addr)
251 return (le16toh(__raw_readw(addr)));
253 #define readw_relaxed(addr) readw_relaxed(addr)
255 #undef writew_relaxed
257 writew_relaxed(uint16_t v, volatile void *addr)
259 __raw_writew(htole16(v), addr);
261 #define writew_relaxed(v, addr) writew_relaxed(v, addr)
264 static inline uint32_t
265 readl_relaxed(const volatile void *addr)
267 return (le32toh(__raw_readl(addr)));
269 #define readl_relaxed(addr) readl_relaxed(addr)
271 #undef writel_relaxed
273 writel_relaxed(uint32_t v, volatile void *addr)
275 __raw_writel(htole32(v), addr);
277 #define writel_relaxed(v, addr) writel_relaxed(v, addr)
280 #undef writeq_relaxed
282 static inline uint64_t
283 readq_relaxed(const volatile void *addr)
285 return (le64toh(__raw_readq(addr)));
287 #define readq_relaxed(addr) readq_relaxed(addr)
290 writeq_relaxed(uint64_t v, volatile void *addr)
292 __raw_writeq(htole64(v), addr);
294 #define writeq_relaxed(v, addr) writeq_relaxed(v, addr)
297 /* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
300 static inline uint8_t
301 ioread8(const volatile void *addr)
303 return (readb(addr));
305 #define ioread8(addr) ioread8(addr)
308 static inline uint16_t
309 ioread16(const volatile void *addr)
311 return (readw(addr));
313 #define ioread16(addr) ioread16(addr)
316 static inline uint16_t
317 ioread16be(const volatile void *addr)
322 v = (be16toh(__raw_readw(addr)));
327 #define ioread16be(addr) ioread16be(addr)
330 static inline uint32_t
331 ioread32(const volatile void *addr)
333 return (readl(addr));
335 #define ioread32(addr) ioread32(addr)
338 static inline uint32_t
339 ioread32be(const volatile void *addr)
344 v = (be32toh(__raw_readl(addr)));
349 #define ioread32be(addr) ioread32be(addr)
353 iowrite8(uint8_t v, volatile void *addr)
357 #define iowrite8(v, addr) iowrite8(v, addr)
361 iowrite16(uint16_t v, volatile void *addr)
365 #define iowrite16 iowrite16
369 iowrite32(uint32_t v, volatile void *addr)
373 #define iowrite32(v, addr) iowrite32(v, addr)
377 iowrite32be(uint32_t v, volatile void *addr)
380 __raw_writel(htobe32(v), addr);
383 #define iowrite32be(v, addr) iowrite32be(v, addr)
385 #if defined(__i386__) || defined(__amd64__)
387 _outb(u_char data, u_int port)
389 __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
393 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__)
394 void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
396 #define _ioremap_attr(...) NULL
399 #ifdef VM_MEMATTR_DEVICE
400 #define ioremap_nocache(addr, size) \
401 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
402 #define ioremap_wt(addr, size) \
403 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
404 #define ioremap(addr, size) \
405 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
407 #define ioremap_nocache(addr, size) \
408 _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
409 #define ioremap_wt(addr, size) \
410 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
411 #define ioremap(addr, size) \
412 _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
414 #define ioremap_wc(addr, size) \
415 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
416 #define ioremap_wb(addr, size) \
417 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
418 void iounmap(void *addr);
420 #define memset_io(a, b, c) memset((a), (b), (c))
421 #define memcpy_fromio(a, b, c) memcpy((a), (b), (c))
422 #define memcpy_toio(a, b, c) memcpy((a), (b), (c))
425 __iowrite32_copy(void *to, void *from, size_t count)
431 for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
432 __raw_writel(*src, dst);
436 __iowrite64_copy(void *to, void *from, size_t count)
443 for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
444 __raw_writeq(*src, dst);
446 __iowrite32_copy(to, from, count * 2);
451 MEMREMAP_WB = 1 << 0,
452 MEMREMAP_WT = 1 << 1,
453 MEMREMAP_WC = 1 << 2,
457 memremap(resource_size_t offset, size_t size, unsigned long flags)
461 if ((flags & MEMREMAP_WB) &&
462 (addr = ioremap_wb(offset, size)) != NULL)
464 if ((flags & MEMREMAP_WT) &&
465 (addr = ioremap_wt(offset, size)) != NULL)
467 if ((flags & MEMREMAP_WC) &&
468 (addr = ioremap_wc(offset, size)) != NULL)
477 /* XXX May need to check if this is RAM */
481 #endif /* _LINUX_IO_H_ */