early_ioremap.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*
  2. * Provide common bits of early_ioremap() support for architectures needing
  3. * temporary mappings during boot before ioremap() is available.
  4. *
  5. * This is mostly a direct copy of the x86 early_ioremap implementation.
  6. *
  7. * (C) Copyright 1995 1996, 2014 Linus Torvalds
  8. *
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/init.h>
  12. #include <linux/io.h>
  13. #include <linux/module.h>
  14. #include <linux/slab.h>
  15. #include <linux/mm.h>
  16. #include <linux/vmalloc.h>
  17. #include <asm/fixmap.h>
  18. #include <asm/early_ioremap.h>
  19. #ifdef CONFIG_MMU
  20. static int early_ioremap_debug __initdata;
  21. static int __init early_ioremap_debug_setup(char *str)
  22. {
  23. early_ioremap_debug = 1;
  24. return 0;
  25. }
  26. early_param("early_ioremap_debug", early_ioremap_debug_setup);
  27. static int after_paging_init __initdata;
  28. void __init __weak early_ioremap_shutdown(void)
  29. {
  30. }
  31. void __init early_ioremap_reset(void)
  32. {
  33. early_ioremap_shutdown();
  34. after_paging_init = 1;
  35. }
  36. /*
  37. * Generally, ioremap() is available after paging_init() has been called.
  38. * Architectures wanting to allow early_ioremap after paging_init() can
  39. * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
  40. */
  41. #ifndef __late_set_fixmap
  42. static inline void __init __late_set_fixmap(enum fixed_addresses idx,
  43. phys_addr_t phys, pgprot_t prot)
  44. {
  45. BUG();
  46. }
  47. #endif
  48. #ifndef __late_clear_fixmap
  49. static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
  50. {
  51. BUG();
  52. }
  53. #endif
  54. static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
  55. static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
  56. static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
  57. void __init early_ioremap_setup(void)
  58. {
  59. int i;
  60. for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  61. if (WARN_ON(prev_map[i]))
  62. break;
  63. for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  64. slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
  65. }
  66. static int __init check_early_ioremap_leak(void)
  67. {
  68. int count = 0;
  69. int i;
  70. for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  71. if (prev_map[i])
  72. count++;
  73. if (WARN(count, KERN_WARNING
  74. "Debug warning: early ioremap leak of %d areas detected.\n"
  75. "please boot with early_ioremap_debug and report the dmesg.\n",
  76. count))
  77. return 1;
  78. return 0;
  79. }
  80. late_initcall(check_early_ioremap_leak);
  81. static void __init __iomem *
  82. __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
  83. {
  84. unsigned long offset;
  85. resource_size_t last_addr;
  86. unsigned int nrpages;
  87. enum fixed_addresses idx;
  88. int i, slot;
  89. WARN_ON(system_state >= SYSTEM_RUNNING);
  90. slot = -1;
  91. for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
  92. if (!prev_map[i]) {
  93. slot = i;
  94. break;
  95. }
  96. }
  97. if (WARN(slot < 0, "%s(%08llx, %08lx) not found slot\n",
  98. __func__, (u64)phys_addr, size))
  99. return NULL;
  100. /* Don't allow wraparound or zero size */
  101. last_addr = phys_addr + size - 1;
  102. if (WARN_ON(!size || last_addr < phys_addr))
  103. return NULL;
  104. prev_size[slot] = size;
  105. /*
  106. * Mappings have to be page-aligned
  107. */
  108. offset = offset_in_page(phys_addr);
  109. phys_addr &= PAGE_MASK;
  110. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  111. /*
  112. * Mappings have to fit in the FIX_BTMAP area.
  113. */
  114. nrpages = size >> PAGE_SHIFT;
  115. if (WARN_ON(nrpages > NR_FIX_BTMAPS))
  116. return NULL;
  117. /*
  118. * Ok, go for it..
  119. */
  120. idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
  121. while (nrpages > 0) {
  122. if (after_paging_init)
  123. __late_set_fixmap(idx, phys_addr, prot);
  124. else
  125. __early_set_fixmap(idx, phys_addr, prot);
  126. phys_addr += PAGE_SIZE;
  127. --idx;
  128. --nrpages;
  129. }
  130. WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n",
  131. __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]);
  132. prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
  133. return prev_map[slot];
  134. }
  135. void __init early_iounmap(void __iomem *addr, unsigned long size)
  136. {
  137. unsigned long virt_addr;
  138. unsigned long offset;
  139. unsigned int nrpages;
  140. enum fixed_addresses idx;
  141. int i, slot;
  142. slot = -1;
  143. for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
  144. if (prev_map[i] == addr) {
  145. slot = i;
  146. break;
  147. }
  148. }
  149. if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n",
  150. addr, size))
  151. return;
  152. if (WARN(prev_size[slot] != size,
  153. "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
  154. addr, size, slot, prev_size[slot]))
  155. return;
  156. WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n",
  157. addr, size, slot);
  158. virt_addr = (unsigned long)addr;
  159. if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
  160. return;
  161. offset = offset_in_page(virt_addr);
  162. nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
  163. idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
  164. while (nrpages > 0) {
  165. if (after_paging_init)
  166. __late_clear_fixmap(idx);
  167. else
  168. __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
  169. --idx;
  170. --nrpages;
  171. }
  172. prev_map[slot] = NULL;
  173. }
  174. /* Remap an IO device */
  175. void __init __iomem *
  176. early_ioremap(resource_size_t phys_addr, unsigned long size)
  177. {
  178. return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
  179. }
  180. /* Remap memory */
  181. void __init *
  182. early_memremap(resource_size_t phys_addr, unsigned long size)
  183. {
  184. return (__force void *)__early_ioremap(phys_addr, size,
  185. FIXMAP_PAGE_NORMAL);
  186. }
  187. #ifdef FIXMAP_PAGE_RO
  188. void __init *
  189. early_memremap_ro(resource_size_t phys_addr, unsigned long size)
  190. {
  191. return (__force void *)__early_ioremap(phys_addr, size, FIXMAP_PAGE_RO);
  192. }
  193. #endif
  194. #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
  195. void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
  196. {
  197. unsigned long slop, clen;
  198. char *p;
  199. while (size) {
  200. slop = offset_in_page(src);
  201. clen = size;
  202. if (clen > MAX_MAP_CHUNK - slop)
  203. clen = MAX_MAP_CHUNK - slop;
  204. p = early_memremap(src & PAGE_MASK, clen + slop);
  205. memcpy(dest, p + slop, clen);
  206. early_memunmap(p, clen + slop);
  207. dest += clen;
  208. src += clen;
  209. size -= clen;
  210. }
  211. }
  212. #else /* CONFIG_MMU */
  213. void __init __iomem *
  214. early_ioremap(resource_size_t phys_addr, unsigned long size)
  215. {
  216. return (__force void __iomem *)phys_addr;
  217. }
  218. /* Remap memory */
  219. void __init *
  220. early_memremap(resource_size_t phys_addr, unsigned long size)
  221. {
  222. return (void *)phys_addr;
  223. }
  224. void __init *
  225. early_memremap_ro(resource_size_t phys_addr, unsigned long size)
  226. {
  227. return (void *)phys_addr;
  228. }
  229. void __init early_iounmap(void __iomem *addr, unsigned long size)
  230. {
  231. }
  232. #endif /* CONFIG_MMU */
  233. void __init early_memunmap(void *addr, unsigned long size)
  234. {
  235. early_iounmap((__force void __iomem *)addr, size);
  236. }