pagewalk.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. #include <linux/mm.h>
  2. #include <linux/highmem.h>
  3. #include <linux/sched.h>
  4. #include <linux/hugetlb.h>
  5. static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  6. struct mm_walk *walk)
  7. {
  8. pte_t *pte;
  9. int err = 0;
  10. pte = pte_offset_map(pmd, addr);
  11. for (;;) {
  12. err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
  13. if (err)
  14. break;
  15. addr += PAGE_SIZE;
  16. if (addr == end)
  17. break;
  18. pte++;
  19. }
  20. pte_unmap(pte);
  21. return err;
  22. }
  23. static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
  24. struct mm_walk *walk)
  25. {
  26. pmd_t *pmd;
  27. unsigned long next;
  28. int err = 0;
  29. pmd = pmd_offset(pud, addr);
  30. do {
  31. again:
  32. next = pmd_addr_end(addr, end);
  33. if (pmd_none(*pmd) || !walk->vma) {
  34. if (walk->pte_hole)
  35. err = walk->pte_hole(addr, next, walk);
  36. if (err)
  37. break;
  38. continue;
  39. }
  40. /*
  41. * This implies that each ->pmd_entry() handler
  42. * needs to know about pmd_trans_huge() pmds
  43. */
  44. if (walk->pmd_entry)
  45. err = walk->pmd_entry(pmd, addr, next, walk);
  46. if (err)
  47. break;
  48. /*
  49. * Check this here so we only break down trans_huge
  50. * pages when we _need_ to
  51. */
  52. if (!walk->pte_entry)
  53. continue;
  54. split_huge_page_pmd_mm(walk->mm, addr, pmd);
  55. if (pmd_trans_unstable(pmd))
  56. goto again;
  57. err = walk_pte_range(pmd, addr, next, walk);
  58. if (err)
  59. break;
  60. } while (pmd++, addr = next, addr != end);
  61. return err;
  62. }
  63. static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
  64. struct mm_walk *walk)
  65. {
  66. pud_t *pud;
  67. unsigned long next;
  68. int err = 0;
  69. pud = pud_offset(pgd, addr);
  70. do {
  71. next = pud_addr_end(addr, end);
  72. if (pud_none_or_clear_bad(pud)) {
  73. if (walk->pte_hole)
  74. err = walk->pte_hole(addr, next, walk);
  75. if (err)
  76. break;
  77. continue;
  78. }
  79. if (walk->pmd_entry || walk->pte_entry)
  80. err = walk_pmd_range(pud, addr, next, walk);
  81. if (err)
  82. break;
  83. } while (pud++, addr = next, addr != end);
  84. return err;
  85. }
  86. static int walk_pgd_range(unsigned long addr, unsigned long end,
  87. struct mm_walk *walk)
  88. {
  89. pgd_t *pgd;
  90. unsigned long next;
  91. int err = 0;
  92. pgd = pgd_offset(walk->mm, addr);
  93. do {
  94. next = pgd_addr_end(addr, end);
  95. if (pgd_none_or_clear_bad(pgd)) {
  96. if (walk->pte_hole)
  97. err = walk->pte_hole(addr, next, walk);
  98. if (err)
  99. break;
  100. continue;
  101. }
  102. if (walk->pmd_entry || walk->pte_entry)
  103. err = walk_pud_range(pgd, addr, next, walk);
  104. if (err)
  105. break;
  106. } while (pgd++, addr = next, addr != end);
  107. return err;
  108. }
  109. #ifdef CONFIG_HUGETLB_PAGE
  110. static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
  111. unsigned long end)
  112. {
  113. unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
  114. return boundary < end ? boundary : end;
  115. }
  116. static int walk_hugetlb_range(unsigned long addr, unsigned long end,
  117. struct mm_walk *walk)
  118. {
  119. struct vm_area_struct *vma = walk->vma;
  120. struct hstate *h = hstate_vma(vma);
  121. unsigned long next;
  122. unsigned long hmask = huge_page_mask(h);
  123. pte_t *pte;
  124. int err = 0;
  125. do {
  126. next = hugetlb_entry_end(h, addr, end);
  127. pte = huge_pte_offset(walk->mm, addr & hmask);
  128. if (pte)
  129. err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
  130. else if (walk->pte_hole)
  131. err = walk->pte_hole(addr, next, walk);
  132. if (err)
  133. break;
  134. } while (addr = next, addr != end);
  135. return err;
  136. }
  137. #else /* CONFIG_HUGETLB_PAGE */
  138. static int walk_hugetlb_range(unsigned long addr, unsigned long end,
  139. struct mm_walk *walk)
  140. {
  141. return 0;
  142. }
  143. #endif /* CONFIG_HUGETLB_PAGE */
  144. /*
  145. * Decide whether we really walk over the current vma on [@start, @end)
  146. * or skip it via the returned value. Return 0 if we do walk over the
  147. * current vma, and return 1 if we skip the vma. Negative values means
  148. * error, where we abort the current walk.
  149. */
  150. static int walk_page_test(unsigned long start, unsigned long end,
  151. struct mm_walk *walk)
  152. {
  153. struct vm_area_struct *vma = walk->vma;
  154. if (walk->test_walk)
  155. return walk->test_walk(start, end, walk);
  156. /*
  157. * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
  158. * range, so we don't walk over it as we do for normal vmas. However,
  159. * Some callers are interested in handling hole range and they don't
  160. * want to just ignore any single address range. Such users certainly
  161. * define their ->pte_hole() callbacks, so let's delegate them to handle
  162. * vma(VM_PFNMAP).
  163. */
  164. if (vma->vm_flags & VM_PFNMAP) {
  165. int err = 1;
  166. if (walk->pte_hole)
  167. err = walk->pte_hole(start, end, walk);
  168. return err ? err : 1;
  169. }
  170. return 0;
  171. }
  172. static int __walk_page_range(unsigned long start, unsigned long end,
  173. struct mm_walk *walk)
  174. {
  175. int err = 0;
  176. struct vm_area_struct *vma = walk->vma;
  177. if (vma && is_vm_hugetlb_page(vma)) {
  178. if (walk->hugetlb_entry)
  179. err = walk_hugetlb_range(start, end, walk);
  180. } else
  181. err = walk_pgd_range(start, end, walk);
  182. return err;
  183. }
  184. /**
  185. * walk_page_range - walk page table with caller specific callbacks
  186. *
  187. * Recursively walk the page table tree of the process represented by @walk->mm
  188. * within the virtual address range [@start, @end). During walking, we can do
  189. * some caller-specific works for each entry, by setting up pmd_entry(),
  190. * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
  191. * callbacks, the associated entries/pages are just ignored.
  192. * The return values of these callbacks are commonly defined like below:
  193. * - 0 : succeeded to handle the current entry, and if you don't reach the
  194. * end address yet, continue to walk.
  195. * - >0 : succeeded to handle the current entry, and return to the caller
  196. * with caller specific value.
  197. * - <0 : failed to handle the current entry, and return to the caller
  198. * with error code.
  199. *
  200. * Before starting to walk page table, some callers want to check whether
  201. * they really want to walk over the current vma, typically by checking
  202. * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
  203. * purpose.
  204. *
  205. * struct mm_walk keeps current values of some common data like vma and pmd,
  206. * which are useful for the access from callbacks. If you want to pass some
  207. * caller-specific data to callbacks, @walk->private should be helpful.
  208. *
  209. * Locking:
  210. * Callers of walk_page_range() and walk_page_vma() should hold
  211. * @walk->mm->mmap_sem, because these function traverse vma list and/or
  212. * access to vma's data.
  213. */
  214. int walk_page_range(unsigned long start, unsigned long end,
  215. struct mm_walk *walk)
  216. {
  217. int err = 0;
  218. unsigned long next;
  219. struct vm_area_struct *vma;
  220. if (start >= end)
  221. return -EINVAL;
  222. if (!walk->mm)
  223. return -EINVAL;
  224. VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
  225. vma = find_vma(walk->mm, start);
  226. do {
  227. if (!vma) { /* after the last vma */
  228. walk->vma = NULL;
  229. next = end;
  230. } else if (start < vma->vm_start) { /* outside vma */
  231. walk->vma = NULL;
  232. next = min(end, vma->vm_start);
  233. } else { /* inside vma */
  234. walk->vma = vma;
  235. next = min(end, vma->vm_end);
  236. vma = vma->vm_next;
  237. err = walk_page_test(start, next, walk);
  238. if (err > 0) {
  239. /*
  240. * positive return values are purely for
  241. * controlling the pagewalk, so should never
  242. * be passed to the callers.
  243. */
  244. err = 0;
  245. continue;
  246. }
  247. if (err < 0)
  248. break;
  249. }
  250. if (walk->vma || walk->pte_hole)
  251. err = __walk_page_range(start, next, walk);
  252. if (err)
  253. break;
  254. } while (start = next, start < end);
  255. return err;
  256. }
  257. int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
  258. {
  259. int err;
  260. if (!walk->mm)
  261. return -EINVAL;
  262. VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
  263. VM_BUG_ON(!vma);
  264. walk->vma = vma;
  265. err = walk_page_test(vma->vm_start, vma->vm_end, walk);
  266. if (err > 0)
  267. return 0;
  268. if (err < 0)
  269. return err;
  270. return __walk_page_range(vma->vm_start, vma->vm_end, walk);
  271. }