io.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * Based on arch/arm/include/asm/io.h
  3. *
  4. * Copyright (C) 1996-2000 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #ifndef __ASM_IO_H
  20. #define __ASM_IO_H
  21. #ifdef __KERNEL__
  22. #include <linux/types.h>
  23. #include <linux/blk_types.h>
  24. #include <asm/byteorder.h>
  25. #include <asm/barrier.h>
  26. #include <asm/memory.h>
  27. #include <asm/pgtable.h>
  28. #include <asm/early_ioremap.h>
  29. #include <asm/alternative.h>
  30. #include <asm/cpufeature.h>
  31. #include <xen/xen.h>
  32. /*
  33. * Generic IO read/write. These perform native-endian accesses.
  34. */
  35. #define __raw_writeb __raw_writeb
  36. static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
  37. {
  38. asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr));
  39. }
  40. #define __raw_writew __raw_writew
  41. static inline void __raw_writew(u16 val, volatile void __iomem *addr)
  42. {
  43. asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr));
  44. }
  45. #define __raw_writel __raw_writel
  46. static inline void __raw_writel(u32 val, volatile void __iomem *addr)
  47. {
  48. asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
  49. }
  50. #define __raw_writeq __raw_writeq
  51. static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
  52. {
  53. asm volatile("str %0, [%1]" : : "r" (val), "r" (addr));
  54. }
  55. #define __raw_readb __raw_readb
  56. static inline u8 __raw_readb(const volatile void __iomem *addr)
  57. {
  58. u8 val;
  59. asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
  60. "ldarb %w0, [%1]",
  61. ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
  62. : "=r" (val) : "r" (addr));
  63. return val;
  64. }
  65. #define __raw_readw __raw_readw
  66. static inline u16 __raw_readw(const volatile void __iomem *addr)
  67. {
  68. u16 val;
  69. asm volatile(ALTERNATIVE("ldrh %w0, [%1]",
  70. "ldarh %w0, [%1]",
  71. ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
  72. : "=r" (val) : "r" (addr));
  73. return val;
  74. }
  75. #define __raw_readl __raw_readl
  76. static inline u32 __raw_readl(const volatile void __iomem *addr)
  77. {
  78. u32 val;
  79. asm volatile(ALTERNATIVE("ldr %w0, [%1]",
  80. "ldar %w0, [%1]",
  81. ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
  82. : "=r" (val) : "r" (addr));
  83. return val;
  84. }
  85. #define __raw_readq __raw_readq
  86. static inline u64 __raw_readq(const volatile void __iomem *addr)
  87. {
  88. u64 val;
  89. asm volatile(ALTERNATIVE("ldr %0, [%1]",
  90. "ldar %0, [%1]",
  91. ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
  92. : "=r" (val) : "r" (addr));
  93. return val;
  94. }
  95. /* IO barriers */
  96. #define __iormb() rmb()
  97. #define __iowmb() wmb()
  98. #define mmiowb() do { } while (0)
  99. /*
  100. * Relaxed I/O memory access primitives. These follow the Device memory
  101. * ordering rules but do not guarantee any ordering relative to Normal memory
  102. * accesses.
  103. */
  104. #define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
  105. #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
  106. #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
  107. #define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
  108. #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
  109. #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
  110. #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
  111. #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
  112. /*
  113. * I/O memory access primitives. Reads are ordered relative to any
  114. * following Normal memory access. Writes are ordered relative to any prior
  115. * Normal memory access.
  116. */
  117. #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
  118. #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
  119. #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
  120. #define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
  121. #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
  122. #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
  123. #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
  124. #define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
  125. /*
  126. * I/O port access primitives.
  127. */
  128. #define arch_has_dev_port() (1)
  129. #define IO_SPACE_LIMIT (PCI_IO_SIZE - 1)
  130. #define PCI_IOBASE ((void __iomem *)PCI_IO_START)
  131. /*
  132. * String version of I/O memory access operations.
  133. */
  134. extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t);
  135. extern void __memcpy_toio(volatile void __iomem *, const void *, size_t);
  136. extern void __memset_io(volatile void __iomem *, int, size_t);
  137. #define memset_io(c,v,l) __memset_io((c),(v),(l))
  138. #define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l))
  139. #define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l))
  140. /*
  141. * I/O memory mapping functions.
  142. */
  143. extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
  144. extern void __iounmap(volatile void __iomem *addr);
  145. extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
  146. #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
  147. #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
  148. #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
  149. #define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
  150. #define iounmap __iounmap
  151. /*
  152. * io{read,write}{16,32}be() macros
  153. */
  154. #define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
  155. #define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
  156. #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
  157. #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
  158. /*
  159. * Convert a physical pointer to a virtual kernel pointer for /dev/mem
  160. * access
  161. */
  162. #define xlate_dev_mem_ptr(p) __va(p)
  163. /*
  164. * Convert a virtual cached pointer to an uncached pointer
  165. */
  166. #define xlate_dev_kmem_ptr(p) p
  167. #include <asm-generic/io.h>
  168. /*
  169. * More restrictive address range checking than the default implementation
  170. * (PHYS_OFFSET and PHYS_MASK taken into account).
  171. */
  172. #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
  173. extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
  174. extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
  175. extern int devmem_is_allowed(unsigned long pfn);
  176. struct bio_vec;
  177. extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
  178. const struct bio_vec *vec2);
  179. #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
  180. (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
  181. (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
  182. #endif /* __KERNEL__ */
  183. #endif /* __ASM_IO_H */