devres.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. #include <linux/err.h>
  2. #include <linux/pci.h>
  3. #include <linux/io.h>
  4. #include <linux/gfp.h>
  5. #include <linux/export.h>
  6. void devm_ioremap_release(struct device *dev, void *res)
  7. {
  8. iounmap(*(void __iomem **)res);
  9. }
  10. static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
  11. {
  12. return *(void **)res == match_data;
  13. }
  14. /**
  15. * devm_ioremap - Managed ioremap()
  16. * @dev: Generic device to remap IO address for
  17. * @offset: BUS offset to map
  18. * @size: Size of map
  19. *
  20. * Managed ioremap(). Map is automatically unmapped on driver detach.
  21. */
  22. void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
  23. resource_size_t size)
  24. {
  25. void __iomem **ptr, *addr;
  26. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  27. if (!ptr)
  28. return NULL;
  29. addr = ioremap(offset, size);
  30. if (addr) {
  31. *ptr = addr;
  32. devres_add(dev, ptr);
  33. } else
  34. devres_free(ptr);
  35. return addr;
  36. }
  37. EXPORT_SYMBOL(devm_ioremap);
  38. /**
  39. * devm_ioremap_nocache - Managed ioremap_nocache()
  40. * @dev: Generic device to remap IO address for
  41. * @offset: BUS offset to map
  42. * @size: Size of map
  43. *
  44. * Managed ioremap_nocache(). Map is automatically unmapped on driver
  45. * detach.
  46. */
  47. void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
  48. resource_size_t size)
  49. {
  50. void __iomem **ptr, *addr;
  51. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  52. if (!ptr)
  53. return NULL;
  54. addr = ioremap_nocache(offset, size);
  55. if (addr) {
  56. *ptr = addr;
  57. devres_add(dev, ptr);
  58. } else
  59. devres_free(ptr);
  60. return addr;
  61. }
  62. EXPORT_SYMBOL(devm_ioremap_nocache);
  63. /**
  64. * devm_ioremap_wc - Managed ioremap_wc()
  65. * @dev: Generic device to remap IO address for
  66. * @offset: BUS offset to map
  67. * @size: Size of map
  68. *
  69. * Managed ioremap_wc(). Map is automatically unmapped on driver detach.
  70. */
  71. void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
  72. resource_size_t size)
  73. {
  74. void __iomem **ptr, *addr;
  75. ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
  76. if (!ptr)
  77. return NULL;
  78. addr = ioremap_wc(offset, size);
  79. if (addr) {
  80. *ptr = addr;
  81. devres_add(dev, ptr);
  82. } else
  83. devres_free(ptr);
  84. return addr;
  85. }
  86. EXPORT_SYMBOL(devm_ioremap_wc);
  87. /**
  88. * devm_iounmap - Managed iounmap()
  89. * @dev: Generic device to unmap for
  90. * @addr: Address to unmap
  91. *
  92. * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
  93. */
  94. void devm_iounmap(struct device *dev, void __iomem *addr)
  95. {
  96. WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
  97. (__force void *)addr));
  98. iounmap(addr);
  99. }
  100. EXPORT_SYMBOL(devm_iounmap);
  101. /**
  102. * devm_ioremap_resource() - check, request region, and ioremap resource
  103. * @dev: generic device to handle the resource for
  104. * @res: resource to be handled
  105. *
  106. * Checks that a resource is a valid memory region, requests the memory
  107. * region and ioremaps it. All operations are managed and will be undone
  108. * on driver detach.
  109. *
  110. * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
  111. * on failure. Usage example:
  112. *
  113. * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  114. * base = devm_ioremap_resource(&pdev->dev, res);
  115. * if (IS_ERR(base))
  116. * return PTR_ERR(base);
  117. */
  118. void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
  119. {
  120. resource_size_t size;
  121. const char *name;
  122. void __iomem *dest_ptr;
  123. BUG_ON(!dev);
  124. if (!res || resource_type(res) != IORESOURCE_MEM) {
  125. dev_err(dev, "invalid resource\n");
  126. return IOMEM_ERR_PTR(-EINVAL);
  127. }
  128. size = resource_size(res);
  129. name = res->name ?: dev_name(dev);
  130. if (!devm_request_mem_region(dev, res->start, size, name)) {
  131. dev_err(dev, "can't request region for resource %pR\n", res);
  132. return IOMEM_ERR_PTR(-EBUSY);
  133. }
  134. dest_ptr = devm_ioremap(dev, res->start, size);
  135. if (!dest_ptr) {
  136. dev_err(dev, "ioremap failed for resource %pR\n", res);
  137. devm_release_mem_region(dev, res->start, size);
  138. dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
  139. }
  140. return dest_ptr;
  141. }
  142. EXPORT_SYMBOL(devm_ioremap_resource);
  143. #ifdef CONFIG_HAS_IOPORT_MAP
  144. /*
  145. * Generic iomap devres
  146. */
  147. static void devm_ioport_map_release(struct device *dev, void *res)
  148. {
  149. ioport_unmap(*(void __iomem **)res);
  150. }
  151. static int devm_ioport_map_match(struct device *dev, void *res,
  152. void *match_data)
  153. {
  154. return *(void **)res == match_data;
  155. }
  156. /**
  157. * devm_ioport_map - Managed ioport_map()
  158. * @dev: Generic device to map ioport for
  159. * @port: Port to map
  160. * @nr: Number of ports to map
  161. *
  162. * Managed ioport_map(). Map is automatically unmapped on driver
  163. * detach.
  164. */
  165. void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
  166. unsigned int nr)
  167. {
  168. void __iomem **ptr, *addr;
  169. ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
  170. if (!ptr)
  171. return NULL;
  172. addr = ioport_map(port, nr);
  173. if (addr) {
  174. *ptr = addr;
  175. devres_add(dev, ptr);
  176. } else
  177. devres_free(ptr);
  178. return addr;
  179. }
  180. EXPORT_SYMBOL(devm_ioport_map);
  181. /**
  182. * devm_ioport_unmap - Managed ioport_unmap()
  183. * @dev: Generic device to unmap for
  184. * @addr: Address to unmap
  185. *
  186. * Managed ioport_unmap(). @addr must have been mapped using
  187. * devm_ioport_map().
  188. */
  189. void devm_ioport_unmap(struct device *dev, void __iomem *addr)
  190. {
  191. ioport_unmap(addr);
  192. WARN_ON(devres_destroy(dev, devm_ioport_map_release,
  193. devm_ioport_map_match, (__force void *)addr));
  194. }
  195. EXPORT_SYMBOL(devm_ioport_unmap);
  196. #endif /* CONFIG_HAS_IOPORT_MAP */
  197. #ifdef CONFIG_PCI
  198. /*
  199. * PCI iomap devres
  200. */
  201. #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
  202. struct pcim_iomap_devres {
  203. void __iomem *table[PCIM_IOMAP_MAX];
  204. };
  205. static void pcim_iomap_release(struct device *gendev, void *res)
  206. {
  207. struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
  208. struct pcim_iomap_devres *this = res;
  209. int i;
  210. for (i = 0; i < PCIM_IOMAP_MAX; i++)
  211. if (this->table[i])
  212. pci_iounmap(dev, this->table[i]);
  213. }
  214. /**
  215. * pcim_iomap_table - access iomap allocation table
  216. * @pdev: PCI device to access iomap table for
  217. *
  218. * Access iomap allocation table for @dev. If iomap table doesn't
  219. * exist and @pdev is managed, it will be allocated. All iomaps
  220. * recorded in the iomap table are automatically unmapped on driver
  221. * detach.
  222. *
  223. * This function might sleep when the table is first allocated but can
  224. * be safely called without context and guaranteed to succed once
  225. * allocated.
  226. */
  227. void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
  228. {
  229. struct pcim_iomap_devres *dr, *new_dr;
  230. dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
  231. if (dr)
  232. return dr->table;
  233. new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
  234. if (!new_dr)
  235. return NULL;
  236. dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
  237. return dr->table;
  238. }
  239. EXPORT_SYMBOL(pcim_iomap_table);
  240. /**
  241. * pcim_iomap - Managed pcim_iomap()
  242. * @pdev: PCI device to iomap for
  243. * @bar: BAR to iomap
  244. * @maxlen: Maximum length of iomap
  245. *
  246. * Managed pci_iomap(). Map is automatically unmapped on driver
  247. * detach.
  248. */
  249. void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
  250. {
  251. void __iomem **tbl;
  252. BUG_ON(bar >= PCIM_IOMAP_MAX);
  253. tbl = (void __iomem **)pcim_iomap_table(pdev);
  254. if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
  255. return NULL;
  256. tbl[bar] = pci_iomap(pdev, bar, maxlen);
  257. return tbl[bar];
  258. }
  259. EXPORT_SYMBOL(pcim_iomap);
  260. /**
  261. * pcim_iounmap - Managed pci_iounmap()
  262. * @pdev: PCI device to iounmap for
  263. * @addr: Address to unmap
  264. *
  265. * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
  266. */
  267. void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
  268. {
  269. void __iomem **tbl;
  270. int i;
  271. pci_iounmap(pdev, addr);
  272. tbl = (void __iomem **)pcim_iomap_table(pdev);
  273. BUG_ON(!tbl);
  274. for (i = 0; i < PCIM_IOMAP_MAX; i++)
  275. if (tbl[i] == addr) {
  276. tbl[i] = NULL;
  277. return;
  278. }
  279. WARN_ON(1);
  280. }
  281. EXPORT_SYMBOL(pcim_iounmap);
  282. /**
  283. * pcim_iomap_regions - Request and iomap PCI BARs
  284. * @pdev: PCI device to map IO resources for
  285. * @mask: Mask of BARs to request and iomap
  286. * @name: Name used when requesting regions
  287. *
  288. * Request and iomap regions specified by @mask.
  289. */
  290. int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
  291. {
  292. void __iomem * const *iomap;
  293. int i, rc;
  294. iomap = pcim_iomap_table(pdev);
  295. if (!iomap)
  296. return -ENOMEM;
  297. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  298. unsigned long len;
  299. if (!(mask & (1 << i)))
  300. continue;
  301. rc = -EINVAL;
  302. len = pci_resource_len(pdev, i);
  303. if (!len)
  304. goto err_inval;
  305. rc = pci_request_region(pdev, i, name);
  306. if (rc)
  307. goto err_inval;
  308. rc = -ENOMEM;
  309. if (!pcim_iomap(pdev, i, 0))
  310. goto err_region;
  311. }
  312. return 0;
  313. err_region:
  314. pci_release_region(pdev, i);
  315. err_inval:
  316. while (--i >= 0) {
  317. if (!(mask & (1 << i)))
  318. continue;
  319. pcim_iounmap(pdev, iomap[i]);
  320. pci_release_region(pdev, i);
  321. }
  322. return rc;
  323. }
  324. EXPORT_SYMBOL(pcim_iomap_regions);
  325. /**
  326. * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
  327. * @pdev: PCI device to map IO resources for
  328. * @mask: Mask of BARs to iomap
  329. * @name: Name used when requesting regions
  330. *
  331. * Request all PCI BARs and iomap regions specified by @mask.
  332. */
  333. int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
  334. const char *name)
  335. {
  336. int request_mask = ((1 << 6) - 1) & ~mask;
  337. int rc;
  338. rc = pci_request_selected_regions(pdev, request_mask, name);
  339. if (rc)
  340. return rc;
  341. rc = pcim_iomap_regions(pdev, mask, name);
  342. if (rc)
  343. pci_release_selected_regions(pdev, request_mask);
  344. return rc;
  345. }
  346. EXPORT_SYMBOL(pcim_iomap_regions_request_all);
  347. /**
  348. * pcim_iounmap_regions - Unmap and release PCI BARs
  349. * @pdev: PCI device to map IO resources for
  350. * @mask: Mask of BARs to unmap and release
  351. *
  352. * Unmap and release regions specified by @mask.
  353. */
  354. void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
  355. {
  356. void __iomem * const *iomap;
  357. int i;
  358. iomap = pcim_iomap_table(pdev);
  359. if (!iomap)
  360. return;
  361. for (i = 0; i < PCIM_IOMAP_MAX; i++) {
  362. if (!(mask & (1 << i)))
  363. continue;
  364. pcim_iounmap(pdev, iomap[i]);
  365. pci_release_region(pdev, i);
  366. }
  367. }
  368. EXPORT_SYMBOL(pcim_iounmap_regions);
  369. #endif /* CONFIG_PCI */