fault.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * Meta page fault handling.
  3. *
  4. * Copyright (C) 2005-2012 Imagination Technologies Ltd.
  5. */
  6. #include <linux/mman.h>
  7. #include <linux/mm.h>
  8. #include <linux/kernel.h>
  9. #include <linux/ptrace.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/uaccess.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm/mmu.h>
  14. #include <asm/traps.h>
  15. /* Clear any pending catch buffer state. */
  16. static void clear_cbuf_entry(struct pt_regs *regs, unsigned long addr,
  17. unsigned int trapno)
  18. {
  19. PTBICTXEXTCB0 cbuf = regs->extcb0;
  20. switch (trapno) {
  21. /* Instruction fetch faults leave no catch buffer state. */
  22. case TBIXXF_SIGNUM_IGF:
  23. case TBIXXF_SIGNUM_IPF:
  24. return;
  25. default:
  26. if (cbuf[0].CBAddr == addr) {
  27. cbuf[0].CBAddr = 0;
  28. cbuf[0].CBFlags &= ~TXCATCH0_FAULT_BITS;
  29. /* And, as this is the ONLY catch entry, we
  30. * need to clear the cbuf bit from the context!
  31. */
  32. regs->ctx.SaveMask &= ~(TBICTX_CBUF_BIT |
  33. TBICTX_XCBF_BIT);
  34. return;
  35. }
  36. pr_err("Failed to clear cbuf entry!\n");
  37. }
  38. }
  39. int show_unhandled_signals = 1;
  40. int do_page_fault(struct pt_regs *regs, unsigned long address,
  41. unsigned int write_access, unsigned int trapno)
  42. {
  43. struct task_struct *tsk;
  44. struct mm_struct *mm;
  45. struct vm_area_struct *vma, *prev_vma;
  46. siginfo_t info;
  47. int fault;
  48. unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
  49. tsk = current;
  50. if ((address >= VMALLOC_START) && (address < VMALLOC_END)) {
  51. /*
  52. * Synchronize this task's top level page-table
  53. * with the 'reference' page table.
  54. *
  55. * Do _not_ use "tsk" here. We might be inside
  56. * an interrupt in the middle of a task switch..
  57. */
  58. int offset = pgd_index(address);
  59. pgd_t *pgd, *pgd_k;
  60. pud_t *pud, *pud_k;
  61. pmd_t *pmd, *pmd_k;
  62. pte_t *pte_k;
  63. pgd = ((pgd_t *)mmu_get_base()) + offset;
  64. pgd_k = swapper_pg_dir + offset;
  65. /* This will never happen with the folded page table. */
  66. if (!pgd_present(*pgd)) {
  67. if (!pgd_present(*pgd_k))
  68. goto bad_area_nosemaphore;
  69. set_pgd(pgd, *pgd_k);
  70. return 0;
  71. }
  72. pud = pud_offset(pgd, address);
  73. pud_k = pud_offset(pgd_k, address);
  74. if (!pud_present(*pud_k))
  75. goto bad_area_nosemaphore;
  76. set_pud(pud, *pud_k);
  77. pmd = pmd_offset(pud, address);
  78. pmd_k = pmd_offset(pud_k, address);
  79. if (!pmd_present(*pmd_k))
  80. goto bad_area_nosemaphore;
  81. set_pmd(pmd, *pmd_k);
  82. pte_k = pte_offset_kernel(pmd_k, address);
  83. if (!pte_present(*pte_k))
  84. goto bad_area_nosemaphore;
  85. /* May only be needed on Chorus2 */
  86. flush_tlb_all();
  87. return 0;
  88. }
  89. mm = tsk->mm;
  90. if (faulthandler_disabled() || !mm)
  91. goto no_context;
  92. if (user_mode(regs))
  93. flags |= FAULT_FLAG_USER;
  94. retry:
  95. down_read(&mm->mmap_sem);
  96. vma = find_vma_prev(mm, address, &prev_vma);
  97. if (!vma || address < vma->vm_start)
  98. goto check_expansion;
  99. good_area:
  100. if (write_access) {
  101. if (!(vma->vm_flags & VM_WRITE))
  102. goto bad_area;
  103. flags |= FAULT_FLAG_WRITE;
  104. } else {
  105. if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
  106. goto bad_area;
  107. }
  108. /*
  109. * If for any reason at all we couldn't handle the fault,
  110. * make sure we exit gracefully rather than endlessly redo
  111. * the fault.
  112. */
  113. fault = handle_mm_fault(mm, vma, address, flags);
  114. if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
  115. return 0;
  116. if (unlikely(fault & VM_FAULT_ERROR)) {
  117. if (fault & VM_FAULT_OOM)
  118. goto out_of_memory;
  119. else if (fault & VM_FAULT_SIGSEGV)
  120. goto bad_area;
  121. else if (fault & VM_FAULT_SIGBUS)
  122. goto do_sigbus;
  123. BUG();
  124. }
  125. if (flags & FAULT_FLAG_ALLOW_RETRY) {
  126. if (fault & VM_FAULT_MAJOR)
  127. tsk->maj_flt++;
  128. else
  129. tsk->min_flt++;
  130. if (fault & VM_FAULT_RETRY) {
  131. flags &= ~FAULT_FLAG_ALLOW_RETRY;
  132. flags |= FAULT_FLAG_TRIED;
  133. /*
  134. * No need to up_read(&mm->mmap_sem) as we would
  135. * have already released it in __lock_page_or_retry
  136. * in mm/filemap.c.
  137. */
  138. goto retry;
  139. }
  140. }
  141. up_read(&mm->mmap_sem);
  142. return 0;
  143. check_expansion:
  144. vma = prev_vma;
  145. if (vma && (expand_stack(vma, address) == 0))
  146. goto good_area;
  147. bad_area:
  148. up_read(&mm->mmap_sem);
  149. bad_area_nosemaphore:
  150. if (user_mode(regs)) {
  151. info.si_signo = SIGSEGV;
  152. info.si_errno = 0;
  153. info.si_code = SEGV_MAPERR;
  154. info.si_addr = (__force void __user *)address;
  155. info.si_trapno = trapno;
  156. if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
  157. printk_ratelimit()) {
  158. pr_info("%s%s[%d]: segfault at %lx pc %08x sp %08x write %d trap %#x (%s)",
  159. task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
  160. tsk->comm, task_pid_nr(tsk), address,
  161. regs->ctx.CurrPC, regs->ctx.AX[0].U0,
  162. write_access, trapno, trap_name(trapno));
  163. print_vma_addr(" in ", regs->ctx.CurrPC);
  164. print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
  165. printk("\n");
  166. show_regs(regs);
  167. }
  168. force_sig_info(SIGSEGV, &info, tsk);
  169. return 1;
  170. }
  171. goto no_context;
  172. do_sigbus:
  173. up_read(&mm->mmap_sem);
  174. /*
  175. * Send a sigbus, regardless of whether we were in kernel
  176. * or user mode.
  177. */
  178. info.si_signo = SIGBUS;
  179. info.si_errno = 0;
  180. info.si_code = BUS_ADRERR;
  181. info.si_addr = (__force void __user *)address;
  182. info.si_trapno = trapno;
  183. force_sig_info(SIGBUS, &info, tsk);
  184. /* Kernel mode? Handle exceptions or die */
  185. if (!user_mode(regs))
  186. goto no_context;
  187. return 1;
  188. /*
  189. * We ran out of memory, or some other thing happened to us that made
  190. * us unable to handle the page fault gracefully.
  191. */
  192. out_of_memory:
  193. up_read(&mm->mmap_sem);
  194. if (user_mode(regs)) {
  195. pagefault_out_of_memory();
  196. return 1;
  197. }
  198. no_context:
  199. /* Are we prepared to handle this kernel fault? */
  200. if (fixup_exception(regs)) {
  201. clear_cbuf_entry(regs, address, trapno);
  202. return 1;
  203. }
  204. die("Oops", regs, (write_access << 15) | trapno, address);
  205. do_exit(SIGKILL);
  206. }