memremap.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. /*
  2. * Copyright(c) 2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #include <linux/device.h>
  14. #include <linux/types.h>
  15. #include <linux/io.h>
  16. #include <linux/mm.h>
  17. #include <linux/memory_hotplug.h>
  18. #ifndef ioremap_cache
  19. /* temporary while we convert existing ioremap_cache users to memremap */
  20. __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
  21. {
  22. return ioremap(offset, size);
  23. }
  24. #endif
  25. static void *try_ram_remap(resource_size_t offset, size_t size)
  26. {
  27. struct page *page = pfn_to_page(offset >> PAGE_SHIFT);
  28. /* In the simple case just return the existing linear address */
  29. if (!PageHighMem(page))
  30. return __va(offset);
  31. return NULL; /* fallback to ioremap_cache */
  32. }
  33. /**
  34. * memremap() - remap an iomem_resource as cacheable memory
  35. * @offset: iomem resource start address
  36. * @size: size of remap
  37. * @flags: either MEMREMAP_WB or MEMREMAP_WT
  38. *
  39. * memremap() is "ioremap" for cases where it is known that the resource
  40. * being mapped does not have i/o side effects and the __iomem
  41. * annotation is not applicable.
  42. *
  43. * MEMREMAP_WB - matches the default mapping for "System RAM" on
  44. * the architecture. This is usually a read-allocate write-back cache.
  45. * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
  46. * memremap() will bypass establishing a new mapping and instead return
  47. * a pointer into the direct map.
  48. *
  49. * MEMREMAP_WT - establish a mapping whereby writes either bypass the
  50. * cache or are written through to memory and never exist in a
  51. * cache-dirty state with respect to program visibility. Attempts to
  52. * map "System RAM" with this mapping type will fail.
  53. */
  54. void *memremap(resource_size_t offset, size_t size, unsigned long flags)
  55. {
  56. int is_ram = region_intersects(offset, size, "System RAM");
  57. void *addr = NULL;
  58. if (is_ram == REGION_MIXED) {
  59. WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
  60. &offset, (unsigned long) size);
  61. return NULL;
  62. }
  63. /* Try all mapping types requested until one returns non-NULL */
  64. if (flags & MEMREMAP_WB) {
  65. flags &= ~MEMREMAP_WB;
  66. /*
  67. * MEMREMAP_WB is special in that it can be satisifed
  68. * from the direct map. Some archs depend on the
  69. * capability of memremap() to autodetect cases where
  70. * the requested range is potentially in "System RAM"
  71. */
  72. if (is_ram == REGION_INTERSECTS)
  73. addr = try_ram_remap(offset, size);
  74. if (!addr)
  75. addr = ioremap_cache(offset, size);
  76. }
  77. /*
  78. * If we don't have a mapping yet and more request flags are
  79. * pending then we will be attempting to establish a new virtual
  80. * address mapping. Enforce that this mapping is not aliasing
  81. * "System RAM"
  82. */
  83. if (!addr && is_ram == REGION_INTERSECTS && flags) {
  84. WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
  85. &offset, (unsigned long) size);
  86. return NULL;
  87. }
  88. if (!addr && (flags & MEMREMAP_WT)) {
  89. flags &= ~MEMREMAP_WT;
  90. addr = ioremap_wt(offset, size);
  91. }
  92. return addr;
  93. }
  94. EXPORT_SYMBOL(memremap);
  95. void memunmap(void *addr)
  96. {
  97. if (is_vmalloc_addr(addr))
  98. iounmap((void __iomem *) addr);
  99. }
  100. EXPORT_SYMBOL(memunmap);
  101. static void devm_memremap_release(struct device *dev, void *res)
  102. {
  103. memunmap(*(void **)res);
  104. }
  105. static int devm_memremap_match(struct device *dev, void *res, void *match_data)
  106. {
  107. return *(void **)res == match_data;
  108. }
  109. void *devm_memremap(struct device *dev, resource_size_t offset,
  110. size_t size, unsigned long flags)
  111. {
  112. void **ptr, *addr;
  113. ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
  114. dev_to_node(dev));
  115. if (!ptr)
  116. return ERR_PTR(-ENOMEM);
  117. addr = memremap(offset, size, flags);
  118. if (addr) {
  119. *ptr = addr;
  120. devres_add(dev, ptr);
  121. } else {
  122. devres_free(ptr);
  123. return ERR_PTR(-ENXIO);
  124. }
  125. return addr;
  126. }
  127. EXPORT_SYMBOL(devm_memremap);
  128. void devm_memunmap(struct device *dev, void *addr)
  129. {
  130. WARN_ON(devres_release(dev, devm_memremap_release,
  131. devm_memremap_match, addr));
  132. }
  133. EXPORT_SYMBOL(devm_memunmap);
  134. #ifdef CONFIG_ZONE_DEVICE
  135. struct page_map {
  136. struct resource res;
  137. };
  138. static void devm_memremap_pages_release(struct device *dev, void *res)
  139. {
  140. struct page_map *page_map = res;
  141. /* pages are dead and unused, undo the arch mapping */
  142. mem_hotplug_begin();
  143. arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
  144. mem_hotplug_done();
  145. }
  146. void *devm_memremap_pages(struct device *dev, struct resource *res)
  147. {
  148. int is_ram = region_intersects(res->start, resource_size(res),
  149. "System RAM");
  150. struct page_map *page_map;
  151. int error, nid;
  152. if (is_ram != REGION_DISJOINT) {
  153. WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
  154. is_ram == REGION_MIXED ? "mixed" : "ram", res);
  155. return ERR_PTR(-ENXIO);
  156. }
  157. page_map = devres_alloc_node(devm_memremap_pages_release,
  158. sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
  159. if (!page_map)
  160. return ERR_PTR(-ENOMEM);
  161. memcpy(&page_map->res, res, sizeof(*res));
  162. nid = dev_to_node(dev);
  163. if (nid < 0)
  164. nid = numa_mem_id();
  165. mem_hotplug_begin();
  166. error = arch_add_memory(nid, res->start, resource_size(res), true);
  167. mem_hotplug_done();
  168. if (error) {
  169. devres_free(page_map);
  170. return ERR_PTR(error);
  171. }
  172. devres_add(dev, page_map);
  173. return __va(res->start);
  174. }
  175. EXPORT_SYMBOL_GPL(devm_memremap_pages);
  176. #endif /* CONFIG_ZONE_DEVICE */