tlb-v6.S 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. /*
  2. * linux/arch/arm/mm/tlb-v6.S
  3. *
  4. * Copyright (C) 1997-2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * ARM architecture version 6 TLB handling functions.
  11. * These assume a split I/D TLB.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/linkage.h>
  15. #include <asm/asm-offsets.h>
  16. #include <asm/assembler.h>
  17. #include <asm/page.h>
  18. #include <asm/tlbflush.h>
  19. #include "proc-macros.S"
  20. #define HARVARD_TLB
  21. /*
  22. * v6wbi_flush_user_tlb_range(start, end, vma)
  23. *
  24. * Invalidate a range of TLB entries in the specified address space.
  25. *
  26. * - start - start address (may not be aligned)
  27. * - end - end address (exclusive, may not be aligned)
  28. * - vma - vma_struct describing address range
  29. *
  30. * It is assumed that:
  31. * - the "Invalidate single entry" instruction will invalidate
  32. * both the I and the D TLBs on Harvard-style TLBs
  33. */
  34. ENTRY(v6wbi_flush_user_tlb_range)
  35. vma_vm_mm r3, r2 @ get vma->vm_mm
  36. mov ip, #0
  37. mmid r3, r3 @ get vm_mm->context.id
  38. mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
  39. mov r0, r0, lsr #PAGE_SHIFT @ align address
  40. mov r1, r1, lsr #PAGE_SHIFT
  41. asid r3, r3 @ mask ASID
  42. orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
  43. mov r1, r1, lsl #PAGE_SHIFT
  44. vma_vm_flags r2, r2 @ get vma->vm_flags
  45. 1:
  46. #ifdef HARVARD_TLB
  47. mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1)
  48. tst r2, #VM_EXEC @ Executable area ?
  49. mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1)
  50. #else
  51. mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA (was 1)
  52. #endif
  53. add r0, r0, #PAGE_SZ
  54. cmp r0, r1
  55. blo 1b
  56. mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
  57. ret lr
  58. /*
  59. * v6wbi_flush_kern_tlb_range(start,end)
  60. *
  61. * Invalidate a range of kernel TLB entries
  62. *
  63. * - start - start address (may not be aligned)
  64. * - end - end address (exclusive, may not be aligned)
  65. */
  66. ENTRY(v6wbi_flush_kern_tlb_range)
  67. mov r2, #0
  68. mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
  69. mov r0, r0, lsr #PAGE_SHIFT @ align address
  70. mov r1, r1, lsr #PAGE_SHIFT
  71. mov r0, r0, lsl #PAGE_SHIFT
  72. mov r1, r1, lsl #PAGE_SHIFT
  73. 1:
  74. #ifdef HARVARD_TLB
  75. mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA
  76. mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA
  77. #else
  78. mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA
  79. #endif
  80. add r0, r0, #PAGE_SZ
  81. cmp r0, r1
  82. blo 1b
  83. mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
  84. mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
  85. ret lr
  86. __INIT
  87. /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
  88. define_tlb_functions v6wbi, v6wbi_tlb_flags