cache-flush-icache.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /* Flush dcache and invalidate icache when the dcache is in writeback mode
  2. *
  3. * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/mm.h>
  13. #include <asm/cacheflush.h>
  14. #include <asm/smp.h>
  15. #include "cache-smp.h"
  16. /**
  17. * flush_icache_page - Flush a page from the dcache and invalidate the icache
  18. * @vma: The VMA the page is part of.
  19. * @page: The page to be flushed.
  20. *
  21. * Write a page back from the dcache and invalidate the icache so that we can
  22. * run code from it that we've just written into it
  23. */
  24. void flush_icache_page(struct vm_area_struct *vma, struct page *page)
  25. {
  26. unsigned long start = page_to_phys(page);
  27. unsigned long flags;
  28. flags = smp_lock_cache();
  29. mn10300_local_dcache_flush_page(start);
  30. mn10300_local_icache_inv_page(start);
  31. smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
  32. smp_unlock_cache(flags);
  33. }
  34. EXPORT_SYMBOL(flush_icache_page);
  35. /**
  36. * flush_icache_page_range - Flush dcache and invalidate icache for part of a
  37. * single page
  38. * @start: The starting virtual address of the page part.
  39. * @end: The ending virtual address of the page part.
  40. *
  41. * Flush the dcache and invalidate the icache for part of a single page, as
  42. * determined by the virtual addresses given. The page must be in the paged
  43. * area.
  44. */
  45. static void flush_icache_page_range(unsigned long start, unsigned long end)
  46. {
  47. unsigned long addr, size, off;
  48. struct page *page;
  49. pgd_t *pgd;
  50. pud_t *pud;
  51. pmd_t *pmd;
  52. pte_t *ppte, pte;
  53. /* work out how much of the page to flush */
  54. off = start & ~PAGE_MASK;
  55. size = end - start;
  56. /* get the physical address the page is mapped to from the page
  57. * tables */
  58. pgd = pgd_offset(current->mm, start);
  59. if (!pgd || !pgd_val(*pgd))
  60. return;
  61. pud = pud_offset(pgd, start);
  62. if (!pud || !pud_val(*pud))
  63. return;
  64. pmd = pmd_offset(pud, start);
  65. if (!pmd || !pmd_val(*pmd))
  66. return;
  67. ppte = pte_offset_map(pmd, start);
  68. if (!ppte)
  69. return;
  70. pte = *ppte;
  71. pte_unmap(ppte);
  72. if (pte_none(pte))
  73. return;
  74. page = pte_page(pte);
  75. if (!page)
  76. return;
  77. addr = page_to_phys(page);
  78. /* flush the dcache and invalidate the icache coverage on that
  79. * region */
  80. mn10300_local_dcache_flush_range2(addr + off, size);
  81. mn10300_local_icache_inv_range2(addr + off, size);
  82. smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
  83. }
  84. /**
  85. * flush_icache_range - Globally flush dcache and invalidate icache for region
  86. * @start: The starting virtual address of the region.
  87. * @end: The ending virtual address of the region.
  88. *
  89. * This is used by the kernel to globally flush some code it has just written
  90. * from the dcache back to RAM and then to globally invalidate the icache over
  91. * that region so that that code can be run on all CPUs in the system.
  92. */
  93. void flush_icache_range(unsigned long start, unsigned long end)
  94. {
  95. unsigned long start_page, end_page;
  96. unsigned long flags;
  97. flags = smp_lock_cache();
  98. if (end > 0x80000000UL) {
  99. /* addresses above 0xa0000000 do not go through the cache */
  100. if (end > 0xa0000000UL) {
  101. end = 0xa0000000UL;
  102. if (start >= end)
  103. goto done;
  104. }
  105. /* kernel addresses between 0x80000000 and 0x9fffffff do not
  106. * require page tables, so we just map such addresses
  107. * directly */
  108. start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
  109. mn10300_local_dcache_flush_range(start_page, end);
  110. mn10300_local_icache_inv_range(start_page, end);
  111. smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
  112. if (start_page == start)
  113. goto done;
  114. end = start_page;
  115. }
  116. start_page = start & PAGE_MASK;
  117. end_page = (end - 1) & PAGE_MASK;
  118. if (start_page == end_page) {
  119. /* the first and last bytes are on the same page */
  120. flush_icache_page_range(start, end);
  121. } else if (start_page + 1 == end_page) {
  122. /* split over two virtually contiguous pages */
  123. flush_icache_page_range(start, end_page);
  124. flush_icache_page_range(end_page, end);
  125. } else {
  126. /* more than 2 pages; just flush the entire cache */
  127. mn10300_dcache_flush();
  128. mn10300_icache_inv();
  129. smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
  130. }
  131. done:
  132. smp_unlock_cache(flags);
  133. }
  134. EXPORT_SYMBOL(flush_icache_range);