hugetlbpage.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. /*
  2. * PARISC64 Huge TLB page support.
  3. *
  4. * This parisc implementation is heavily based on the SPARC and x86 code.
  5. *
  6. * Copyright (C) 2015 Helge Deller <deller@gmx.de>
  7. */
  8. #include <linux/fs.h>
  9. #include <linux/mm.h>
  10. #include <linux/hugetlb.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/sysctl.h>
  13. #include <asm/mman.h>
  14. #include <asm/pgalloc.h>
  15. #include <asm/tlb.h>
  16. #include <asm/tlbflush.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/mmu_context.h>
  19. unsigned long
  20. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  21. unsigned long len, unsigned long pgoff, unsigned long flags)
  22. {
  23. struct hstate *h = hstate_file(file);
  24. if (len & ~huge_page_mask(h))
  25. return -EINVAL;
  26. if (len > TASK_SIZE)
  27. return -ENOMEM;
  28. if (flags & MAP_FIXED)
  29. if (prepare_hugepage_range(file, addr, len))
  30. return -EINVAL;
  31. if (addr)
  32. addr = ALIGN(addr, huge_page_size(h));
  33. /* we need to make sure the colouring is OK */
  34. return arch_get_unmapped_area(file, addr, len, pgoff, flags);
  35. }
  36. pte_t *huge_pte_alloc(struct mm_struct *mm,
  37. unsigned long addr, unsigned long sz)
  38. {
  39. pgd_t *pgd;
  40. pud_t *pud;
  41. pmd_t *pmd;
  42. pte_t *pte = NULL;
  43. /* We must align the address, because our caller will run
  44. * set_huge_pte_at() on whatever we return, which writes out
  45. * all of the sub-ptes for the hugepage range. So we have
  46. * to give it the first such sub-pte.
  47. */
  48. addr &= HPAGE_MASK;
  49. pgd = pgd_offset(mm, addr);
  50. pud = pud_alloc(mm, pgd, addr);
  51. if (pud) {
  52. pmd = pmd_alloc(mm, pud, addr);
  53. if (pmd)
  54. pte = pte_alloc_map(mm, NULL, pmd, addr);
  55. }
  56. return pte;
  57. }
  58. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  59. {
  60. pgd_t *pgd;
  61. pud_t *pud;
  62. pmd_t *pmd;
  63. pte_t *pte = NULL;
  64. addr &= HPAGE_MASK;
  65. pgd = pgd_offset(mm, addr);
  66. if (!pgd_none(*pgd)) {
  67. pud = pud_offset(pgd, addr);
  68. if (!pud_none(*pud)) {
  69. pmd = pmd_offset(pud, addr);
  70. if (!pmd_none(*pmd))
  71. pte = pte_offset_map(pmd, addr);
  72. }
  73. }
  74. return pte;
  75. }
  76. /* Purge data and instruction TLB entries. Must be called holding
  77. * the pa_tlb_lock. The TLB purge instructions are slow on SMP
  78. * machines since the purge must be broadcast to all CPUs.
  79. */
  80. static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
  81. {
  82. int i;
  83. /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
  84. * Linux standard huge pages (e.g. 2 MB) */
  85. BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
  86. addr &= HPAGE_MASK;
  87. addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
  88. for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
  89. purge_tlb_entries(mm, addr);
  90. addr += (1UL << REAL_HPAGE_SHIFT);
  91. }
  92. }
  93. /* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
  94. static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  95. pte_t *ptep, pte_t entry)
  96. {
  97. unsigned long addr_start;
  98. int i;
  99. addr &= HPAGE_MASK;
  100. addr_start = addr;
  101. for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
  102. set_pte(ptep, entry);
  103. ptep++;
  104. addr += PAGE_SIZE;
  105. pte_val(entry) += PAGE_SIZE;
  106. }
  107. purge_tlb_entries_huge(mm, addr_start);
  108. }
  109. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  110. pte_t *ptep, pte_t entry)
  111. {
  112. unsigned long flags;
  113. purge_tlb_start(flags);
  114. __set_huge_pte_at(mm, addr, ptep, entry);
  115. purge_tlb_end(flags);
  116. }
  117. pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  118. pte_t *ptep)
  119. {
  120. unsigned long flags;
  121. pte_t entry;
  122. purge_tlb_start(flags);
  123. entry = *ptep;
  124. __set_huge_pte_at(mm, addr, ptep, __pte(0));
  125. purge_tlb_end(flags);
  126. return entry;
  127. }
  128. void huge_ptep_set_wrprotect(struct mm_struct *mm,
  129. unsigned long addr, pte_t *ptep)
  130. {
  131. unsigned long flags;
  132. pte_t old_pte;
  133. purge_tlb_start(flags);
  134. old_pte = *ptep;
  135. __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
  136. purge_tlb_end(flags);
  137. }
  138. int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  139. unsigned long addr, pte_t *ptep,
  140. pte_t pte, int dirty)
  141. {
  142. unsigned long flags;
  143. int changed;
  144. purge_tlb_start(flags);
  145. changed = !pte_same(*ptep, pte);
  146. if (changed) {
  147. __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
  148. }
  149. purge_tlb_end(flags);
  150. return changed;
  151. }
  152. int pmd_huge(pmd_t pmd)
  153. {
  154. return 0;
  155. }
  156. int pud_huge(pud_t pud)
  157. {
  158. return 0;
  159. }