tlbflush.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. /*
  2. * linux/arch/unicore32/include/asm/tlbflush.h
  3. *
  4. * Code specific to PKUnity SoC and UniCore ISA
  5. *
  6. * Copyright (C) 2001-2010 GUAN Xue-tao
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #ifndef __UNICORE_TLBFLUSH_H__
  13. #define __UNICORE_TLBFLUSH_H__
  14. #ifndef __ASSEMBLY__
  15. #include <linux/sched.h>
  16. extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long,
  17. struct vm_area_struct *);
  18. extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
  19. /*
  20. * TLB Management
  21. * ==============
  22. *
  23. * The arch/unicore/mm/tlb-*.S files implement these methods.
  24. *
  25. * The TLB specific code is expected to perform whatever tests it
  26. * needs to determine if it should invalidate the TLB for each
  27. * call. Start addresses are inclusive and end addresses are
  28. * exclusive; it is safe to round these addresses down.
  29. *
  30. * flush_tlb_all()
  31. *
  32. * Invalidate the entire TLB.
  33. *
  34. * flush_tlb_mm(mm)
  35. *
  36. * Invalidate all TLB entries in a particular address
  37. * space.
  38. * - mm - mm_struct describing address space
  39. *
  40. * flush_tlb_range(mm,start,end)
  41. *
  42. * Invalidate a range of TLB entries in the specified
  43. * address space.
  44. * - mm - mm_struct describing address space
  45. * - start - start address (may not be aligned)
  46. * - end - end address (exclusive, may not be aligned)
  47. *
  48. * flush_tlb_page(vaddr,vma)
  49. *
  50. * Invalidate the specified page in the specified address range.
  51. * - vaddr - virtual address (may not be aligned)
  52. * - vma - vma_struct describing address range
  53. *
  54. * flush_kern_tlb_page(kaddr)
  55. *
  56. * Invalidate the TLB entry for the specified page. The address
  57. * will be in the kernels virtual memory space. Current uses
  58. * only require the D-TLB to be invalidated.
  59. * - kaddr - Kernel virtual memory address
  60. */
  61. static inline void local_flush_tlb_all(void)
  62. {
  63. const int zero = 0;
  64. /* TLB invalidate all */
  65. asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
  66. : : "r" (zero) : "cc");
  67. }
  68. static inline void local_flush_tlb_mm(struct mm_struct *mm)
  69. {
  70. const int zero = 0;
  71. if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
  72. /* TLB invalidate all */
  73. asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
  74. : : "r" (zero) : "cc");
  75. }
  76. put_cpu();
  77. }
  78. static inline void
  79. local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
  80. {
  81. if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
  82. #ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
  83. /* iTLB invalidate page */
  84. asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
  85. : : "r" (uaddr & PAGE_MASK) : "cc");
  86. /* dTLB invalidate page */
  87. asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
  88. : : "r" (uaddr & PAGE_MASK) : "cc");
  89. #else
  90. /* TLB invalidate all */
  91. asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
  92. : : "r" (uaddr & PAGE_MASK) : "cc");
  93. #endif
  94. }
  95. }
  96. static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
  97. {
  98. #ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
  99. /* iTLB invalidate page */
  100. asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
  101. : : "r" (kaddr & PAGE_MASK) : "cc");
  102. /* dTLB invalidate page */
  103. asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
  104. : : "r" (kaddr & PAGE_MASK) : "cc");
  105. #else
  106. /* TLB invalidate all */
  107. asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
  108. : : "r" (kaddr & PAGE_MASK) : "cc");
  109. #endif
  110. }
  111. /*
  112. * flush_pmd_entry
  113. *
  114. * Flush a PMD entry (word aligned, or double-word aligned) to
  115. * RAM if the TLB for the CPU we are running on requires this.
  116. * This is typically used when we are creating PMD entries.
  117. *
  118. * clean_pmd_entry
  119. *
  120. * Clean (but don't drain the write buffer) if the CPU requires
  121. * these operations. This is typically used when we are removing
  122. * PMD entries.
  123. */
  124. static inline void flush_pmd_entry(pmd_t *pmd)
  125. {
  126. #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
  127. /* flush dcache line, see dcacheline_flush in proc-macros.S */
  128. asm("mov r1, %0 << #20\n"
  129. "ldw r2, =_stext\n"
  130. "add r2, r2, r1 >> #20\n"
  131. "ldw r1, [r2+], #0x0000\n"
  132. "ldw r1, [r2+], #0x1000\n"
  133. "ldw r1, [r2+], #0x2000\n"
  134. "ldw r1, [r2+], #0x3000\n"
  135. : : "r" (pmd) : "r1", "r2");
  136. #else
  137. /* flush dcache all */
  138. asm("movc p0.c5, %0, #14; nop; nop; nop; nop; nop; nop; nop; nop"
  139. : : "r" (pmd) : "cc");
  140. #endif
  141. }
  142. static inline void clean_pmd_entry(pmd_t *pmd)
  143. {
  144. #ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
  145. /* clean dcache line */
  146. asm("movc p0.c5, %0, #11; nop; nop; nop; nop; nop; nop; nop; nop"
  147. : : "r" (__pa(pmd) & ~(L1_CACHE_BYTES - 1)) : "cc");
  148. #else
  149. /* clean dcache all */
  150. asm("movc p0.c5, %0, #10; nop; nop; nop; nop; nop; nop; nop; nop"
  151. : : "r" (pmd) : "cc");
  152. #endif
  153. }
  154. /*
  155. * Convert calls to our calling convention.
  156. */
  157. #define local_flush_tlb_range(vma, start, end) \
  158. __cpu_flush_user_tlb_range(start, end, vma)
  159. #define local_flush_tlb_kernel_range(s, e) \
  160. __cpu_flush_kern_tlb_range(s, e)
  161. #define flush_tlb_all local_flush_tlb_all
  162. #define flush_tlb_mm local_flush_tlb_mm
  163. #define flush_tlb_page local_flush_tlb_page
  164. #define flush_tlb_kernel_page local_flush_tlb_kernel_page
  165. #define flush_tlb_range local_flush_tlb_range
  166. #define flush_tlb_kernel_range local_flush_tlb_kernel_range
  167. /*
  168. * if PG_dcache_clean is not set for the page, we need to ensure that any
  169. * cache entries for the kernels virtual memory range are written
  170. * back to the page.
  171. */
  172. extern void update_mmu_cache(struct vm_area_struct *vma,
  173. unsigned long addr, pte_t *ptep);
  174. extern void do_bad_area(unsigned long addr, unsigned int fsr,
  175. struct pt_regs *regs);
  176. #endif
  177. #endif