hugetlbpage.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * arch/metag/mm/hugetlbpage.c
  3. *
  4. * METAG HugeTLB page support.
  5. *
  6. * Cloned from SuperH
  7. *
  8. * Cloned from sparc64 by Paul Mundt.
  9. *
  10. * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
  11. */
  12. #include <linux/init.h>
  13. #include <linux/fs.h>
  14. #include <linux/mm.h>
  15. #include <linux/hugetlb.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/sysctl.h>
  18. #include <asm/mman.h>
  19. #include <asm/pgalloc.h>
  20. #include <asm/tlb.h>
  21. #include <asm/tlbflush.h>
  22. #include <asm/cacheflush.h>
  23. /*
  24. * If the arch doesn't supply something else, assume that hugepage
  25. * size aligned regions are ok without further preparation.
  26. */
  27. int prepare_hugepage_range(struct file *file, unsigned long addr,
  28. unsigned long len)
  29. {
  30. struct mm_struct *mm = current->mm;
  31. struct hstate *h = hstate_file(file);
  32. struct vm_area_struct *vma;
  33. if (len & ~huge_page_mask(h))
  34. return -EINVAL;
  35. if (addr & ~huge_page_mask(h))
  36. return -EINVAL;
  37. if (TASK_SIZE - len < addr)
  38. return -EINVAL;
  39. vma = find_vma(mm, ALIGN_HUGEPT(addr));
  40. if (vma && !(vma->vm_flags & MAP_HUGETLB))
  41. return -EINVAL;
  42. vma = find_vma(mm, addr);
  43. if (vma) {
  44. if (addr + len > vma->vm_start)
  45. return -EINVAL;
  46. if (!(vma->vm_flags & MAP_HUGETLB) &&
  47. (ALIGN_HUGEPT(addr + len) > vma->vm_start))
  48. return -EINVAL;
  49. }
  50. return 0;
  51. }
  52. pte_t *huge_pte_alloc(struct mm_struct *mm,
  53. unsigned long addr, unsigned long sz)
  54. {
  55. pgd_t *pgd;
  56. pud_t *pud;
  57. pmd_t *pmd;
  58. pte_t *pte;
  59. pgd = pgd_offset(mm, addr);
  60. pud = pud_offset(pgd, addr);
  61. pmd = pmd_offset(pud, addr);
  62. pte = pte_alloc_map(mm, NULL, pmd, addr);
  63. pgd->pgd &= ~_PAGE_SZ_MASK;
  64. pgd->pgd |= _PAGE_SZHUGE;
  65. return pte;
  66. }
  67. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  68. {
  69. pgd_t *pgd;
  70. pud_t *pud;
  71. pmd_t *pmd;
  72. pte_t *pte = NULL;
  73. pgd = pgd_offset(mm, addr);
  74. pud = pud_offset(pgd, addr);
  75. pmd = pmd_offset(pud, addr);
  76. pte = pte_offset_kernel(pmd, addr);
  77. return pte;
  78. }
  79. int pmd_huge(pmd_t pmd)
  80. {
  81. return pmd_page_shift(pmd) > PAGE_SHIFT;
  82. }
  83. int pud_huge(pud_t pud)
  84. {
  85. return 0;
  86. }
  87. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  88. pmd_t *pmd, int write)
  89. {
  90. return NULL;
  91. }
  92. #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  93. /*
  94. * Look for an unmapped area starting after another hugetlb vma.
  95. * There are guaranteed to be no huge pte's spare if all the huge pages are
  96. * full size (4MB), so in that case compile out this search.
  97. */
  98. #if HPAGE_SHIFT == HUGEPT_SHIFT
  99. static inline unsigned long
  100. hugetlb_get_unmapped_area_existing(unsigned long len)
  101. {
  102. return 0;
  103. }
  104. #else
  105. static unsigned long
  106. hugetlb_get_unmapped_area_existing(unsigned long len)
  107. {
  108. struct mm_struct *mm = current->mm;
  109. struct vm_area_struct *vma;
  110. unsigned long start_addr, addr;
  111. int after_huge;
  112. if (mm->context.part_huge) {
  113. start_addr = mm->context.part_huge;
  114. after_huge = 1;
  115. } else {
  116. start_addr = TASK_UNMAPPED_BASE;
  117. after_huge = 0;
  118. }
  119. new_search:
  120. addr = start_addr;
  121. for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  122. if ((!vma && !after_huge) || TASK_SIZE - len < addr) {
  123. /*
  124. * Start a new search - just in case we missed
  125. * some holes.
  126. */
  127. if (start_addr != TASK_UNMAPPED_BASE) {
  128. start_addr = TASK_UNMAPPED_BASE;
  129. goto new_search;
  130. }
  131. return 0;
  132. }
  133. /* skip ahead if we've aligned right over some vmas */
  134. if (vma && vma->vm_end <= addr)
  135. continue;
  136. /* space before the next vma? */
  137. if (after_huge && (!vma || ALIGN_HUGEPT(addr + len)
  138. <= vma->vm_start)) {
  139. unsigned long end = addr + len;
  140. if (end & HUGEPT_MASK)
  141. mm->context.part_huge = end;
  142. else if (addr == mm->context.part_huge)
  143. mm->context.part_huge = 0;
  144. return addr;
  145. }
  146. if (vma->vm_flags & MAP_HUGETLB) {
  147. /* space after a huge vma in 2nd level page table? */
  148. if (vma->vm_end & HUGEPT_MASK) {
  149. after_huge = 1;
  150. /* no need to align to the next PT block */
  151. addr = vma->vm_end;
  152. continue;
  153. }
  154. }
  155. after_huge = 0;
  156. addr = ALIGN_HUGEPT(vma->vm_end);
  157. }
  158. }
  159. #endif
  160. /* Do a full search to find an area without any nearby normal pages. */
  161. static unsigned long
  162. hugetlb_get_unmapped_area_new_pmd(unsigned long len)
  163. {
  164. struct vm_unmapped_area_info info;
  165. info.flags = 0;
  166. info.length = len;
  167. info.low_limit = TASK_UNMAPPED_BASE;
  168. info.high_limit = TASK_SIZE;
  169. info.align_mask = PAGE_MASK & HUGEPT_MASK;
  170. info.align_offset = 0;
  171. return vm_unmapped_area(&info);
  172. }
  173. unsigned long
  174. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  175. unsigned long len, unsigned long pgoff, unsigned long flags)
  176. {
  177. struct hstate *h = hstate_file(file);
  178. if (len & ~huge_page_mask(h))
  179. return -EINVAL;
  180. if (len > TASK_SIZE)
  181. return -ENOMEM;
  182. if (flags & MAP_FIXED) {
  183. if (prepare_hugepage_range(file, addr, len))
  184. return -EINVAL;
  185. return addr;
  186. }
  187. if (addr) {
  188. addr = ALIGN(addr, huge_page_size(h));
  189. if (!prepare_hugepage_range(file, addr, len))
  190. return addr;
  191. }
  192. /*
  193. * Look for an existing hugetlb vma with space after it (this is to to
  194. * minimise fragmentation caused by huge pages.
  195. */
  196. addr = hugetlb_get_unmapped_area_existing(len);
  197. if (addr)
  198. return addr;
  199. /*
  200. * Find an unmapped naturally aligned set of 4MB blocks that we can use
  201. * for huge pages.
  202. */
  203. return hugetlb_get_unmapped_area_new_pmd(len);
  204. }
  205. #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
  206. /* necessary for boot time 4MB huge page allocation */
  207. static __init int setup_hugepagesz(char *opt)
  208. {
  209. unsigned long ps = memparse(opt, &opt);
  210. if (ps == (1 << HPAGE_SHIFT)) {
  211. hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
  212. } else {
  213. pr_err("hugepagesz: Unsupported page size %lu M\n",
  214. ps >> 20);
  215. return 0;
  216. }
  217. return 1;
  218. }
  219. __setup("hugepagesz=", setup_hugepagesz);