ioremap.c 2.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/vmalloc.h>
  9. #include <linux/init.h>
  10. #include <linux/module.h>
  11. #include <linux/io.h>
  12. #include <linux/mm.h>
  13. #include <linux/slab.h>
  14. #include <linux/cache.h>
  15. void __iomem *ioremap(unsigned long paddr, unsigned long size)
  16. {
  17. unsigned long end;
  18. /* Don't allow wraparound or zero size */
  19. end = paddr + size - 1;
  20. if (!size || (end < paddr))
  21. return NULL;
  22. /* If the region is h/w uncached, avoid MMU mappings */
  23. if (paddr >= ARC_UNCACHED_ADDR_SPACE)
  24. return (void __iomem *)paddr;
  25. return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
  26. }
  27. EXPORT_SYMBOL(ioremap);
  28. /*
  29. * ioremap with access flags
  30. * Cache semantics wise it is same as ioremap - "forced" uncached.
  31. * However unline vanilla ioremap which bypasses ARC MMU for addresses in
  32. * ARC hardware uncached region, this one still goes thru the MMU as caller
  33. * might need finer access control (R/W/X)
  34. */
  35. void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
  36. unsigned long flags)
  37. {
  38. void __iomem *vaddr;
  39. struct vm_struct *area;
  40. unsigned long off, end;
  41. pgprot_t prot = __pgprot(flags);
  42. /* Don't allow wraparound, zero size */
  43. end = paddr + size - 1;
  44. if ((!size) || (end < paddr))
  45. return NULL;
  46. /* An early platform driver might end up here */
  47. if (!slab_is_available())
  48. return NULL;
  49. /* force uncached */
  50. prot = pgprot_noncached(prot);
  51. /* Mappings have to be page-aligned */
  52. off = paddr & ~PAGE_MASK;
  53. paddr &= PAGE_MASK;
  54. size = PAGE_ALIGN(end + 1) - paddr;
  55. /*
  56. * Ok, go for it..
  57. */
  58. area = get_vm_area(size, VM_IOREMAP);
  59. if (!area)
  60. return NULL;
  61. area->phys_addr = paddr;
  62. vaddr = (void __iomem *)area->addr;
  63. if (ioremap_page_range((unsigned long)vaddr,
  64. (unsigned long)vaddr + size, paddr, prot)) {
  65. vunmap((void __force *)vaddr);
  66. return NULL;
  67. }
  68. return (void __iomem *)(off + (char __iomem *)vaddr);
  69. }
  70. EXPORT_SYMBOL(ioremap_prot);
  71. void iounmap(const void __iomem *addr)
  72. {
  73. if (addr >= (void __force __iomem *)ARC_UNCACHED_ADDR_SPACE)
  74. return;
  75. vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
  76. }
  77. EXPORT_SYMBOL(iounmap);