tlbex_64.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. /*
  2. * The SH64 TLB miss.
  3. *
  4. * Original code from fault.c
  5. * Copyright (C) 2000, 2001 Paolo Alberelli
  6. *
  7. * Fast PTE->TLB refill path
  8. * Copyright (C) 2003 Richard.Curnow@superh.com
  9. *
  10. * IMPORTANT NOTES :
  11. * The do_fast_page_fault function is called from a context in entry.S
  12. * where very few registers have been saved. In particular, the code in
  13. * this file must be compiled not to use ANY caller-save registers that
  14. * are not part of the restricted save set. Also, it means that code in
  15. * this file must not make calls to functions elsewhere in the kernel, or
  16. * else the excepting context will see corruption in its caller-save
  17. * registers. Plus, the entry.S save area is non-reentrant, so this code
  18. * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
  19. * on any exception.
  20. *
  21. * This file is subject to the terms and conditions of the GNU General Public
  22. * License. See the file "COPYING" in the main directory of this archive
  23. * for more details.
  24. */
  25. #include <linux/signal.h>
  26. #include <linux/sched.h>
  27. #include <linux/kernel.h>
  28. #include <linux/errno.h>
  29. #include <linux/string.h>
  30. #include <linux/types.h>
  31. #include <linux/ptrace.h>
  32. #include <linux/mman.h>
  33. #include <linux/mm.h>
  34. #include <linux/smp.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/kprobes.h>
  37. #include <asm/tlb.h>
  38. #include <asm/io.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/pgalloc.h>
  41. #include <asm/mmu_context.h>
  42. static int handle_tlbmiss(unsigned long long protection_flags,
  43. unsigned long address)
  44. {
  45. pgd_t *pgd;
  46. pud_t *pud;
  47. pmd_t *pmd;
  48. pte_t *pte;
  49. pte_t entry;
  50. if (is_vmalloc_addr((void *)address)) {
  51. pgd = pgd_offset_k(address);
  52. } else {
  53. if (unlikely(address >= TASK_SIZE || !current->mm))
  54. return 1;
  55. pgd = pgd_offset(current->mm, address);
  56. }
  57. pud = pud_offset(pgd, address);
  58. if (pud_none(*pud) || !pud_present(*pud))
  59. return 1;
  60. pmd = pmd_offset(pud, address);
  61. if (pmd_none(*pmd) || !pmd_present(*pmd))
  62. return 1;
  63. pte = pte_offset_kernel(pmd, address);
  64. entry = *pte;
  65. if (pte_none(entry) || !pte_present(entry))
  66. return 1;
  67. /*
  68. * If the page doesn't have sufficient protection bits set to
  69. * service the kind of fault being handled, there's not much
  70. * point doing the TLB refill. Punt the fault to the general
  71. * handler.
  72. */
  73. if ((pte_val(entry) & protection_flags) != protection_flags)
  74. return 1;
  75. update_mmu_cache(NULL, address, pte);
  76. return 0;
  77. }
  78. /*
  79. * Put all this information into one structure so that everything is just
  80. * arithmetic relative to a single base address. This reduces the number
  81. * of movi/shori pairs needed just to load addresses of static data.
  82. */
  83. struct expevt_lookup {
  84. unsigned short protection_flags[8];
  85. unsigned char is_text_access[8];
  86. unsigned char is_write_access[8];
  87. };
  88. #define PRU (1<<9)
  89. #define PRW (1<<8)
  90. #define PRX (1<<7)
  91. #define PRR (1<<6)
  92. /* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
  93. the fault happened in user mode or privileged mode. */
  94. static struct expevt_lookup expevt_lookup_table = {
  95. .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
  96. .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0}
  97. };
  98. static inline unsigned int
  99. expevt_to_fault_code(unsigned long expevt)
  100. {
  101. if (expevt == 0xa40)
  102. return FAULT_CODE_ITLB;
  103. else if (expevt == 0x060)
  104. return FAULT_CODE_WRITE;
  105. return 0;
  106. }
  107. /*
  108. This routine handles page faults that can be serviced just by refilling a
  109. TLB entry from an existing page table entry. (This case represents a very
  110. large majority of page faults.) Return 1 if the fault was successfully
  111. handled. Return 0 if the fault could not be handled. (This leads into the
  112. general fault handling in fault.c which deals with mapping file-backed
  113. pages, stack growth, segmentation faults, swapping etc etc)
  114. */
  115. asmlinkage int __kprobes
  116. do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
  117. unsigned long address)
  118. {
  119. unsigned long long protection_flags;
  120. unsigned long long index;
  121. unsigned long long expevt4;
  122. unsigned int fault_code;
  123. /* The next few lines implement a way of hashing EXPEVT into a
  124. * small array index which can be used to lookup parameters
  125. * specific to the type of TLBMISS being handled.
  126. *
  127. * Note:
  128. * ITLBMISS has EXPEVT==0xa40
  129. * RTLBMISS has EXPEVT==0x040
  130. * WTLBMISS has EXPEVT==0x060
  131. */
  132. expevt4 = (expevt >> 4);
  133. /* TODO : xor ssr_md into this expression too. Then we can check
  134. * that PRU is set when it needs to be. */
  135. index = expevt4 ^ (expevt4 >> 5);
  136. index &= 7;
  137. fault_code = expevt_to_fault_code(expevt);
  138. protection_flags = expevt_lookup_table.protection_flags[index];
  139. if (expevt_lookup_table.is_text_access[index])
  140. fault_code |= FAULT_CODE_ITLB;
  141. if (!ssr_md)
  142. fault_code |= FAULT_CODE_USER;
  143. set_thread_fault_code(fault_code);
  144. return handle_tlbmiss(protection_flags, address);
  145. }