hugetlb.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #ifndef _ASM_TILE_HUGETLB_H
  15. #define _ASM_TILE_HUGETLB_H
  16. #include <asm/page.h>
  17. #include <asm-generic/hugetlb.h>
  18. static inline int is_hugepage_only_range(struct mm_struct *mm,
  19. unsigned long addr,
  20. unsigned long len) {
  21. return 0;
  22. }
  23. /*
  24. * If the arch doesn't supply something else, assume that hugepage
  25. * size aligned regions are ok without further preparation.
  26. */
  27. static inline int prepare_hugepage_range(struct file *file,
  28. unsigned long addr, unsigned long len)
  29. {
  30. struct hstate *h = hstate_file(file);
  31. if (len & ~huge_page_mask(h))
  32. return -EINVAL;
  33. if (addr & ~huge_page_mask(h))
  34. return -EINVAL;
  35. return 0;
  36. }
  37. static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  38. unsigned long addr, unsigned long end,
  39. unsigned long floor,
  40. unsigned long ceiling)
  41. {
  42. free_pgd_range(tlb, addr, end, floor, ceiling);
  43. }
  44. static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  45. pte_t *ptep, pte_t pte)
  46. {
  47. set_pte(ptep, pte);
  48. }
  49. static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  50. unsigned long addr, pte_t *ptep)
  51. {
  52. return ptep_get_and_clear(mm, addr, ptep);
  53. }
  54. static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
  55. unsigned long addr, pte_t *ptep)
  56. {
  57. ptep_clear_flush(vma, addr, ptep);
  58. }
  59. static inline int huge_pte_none(pte_t pte)
  60. {
  61. return pte_none(pte);
  62. }
  63. static inline pte_t huge_pte_wrprotect(pte_t pte)
  64. {
  65. return pte_wrprotect(pte);
  66. }
  67. static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
  68. unsigned long addr, pte_t *ptep)
  69. {
  70. ptep_set_wrprotect(mm, addr, ptep);
  71. }
  72. static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  73. unsigned long addr, pte_t *ptep,
  74. pte_t pte, int dirty)
  75. {
  76. return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
  77. }
  78. static inline pte_t huge_ptep_get(pte_t *ptep)
  79. {
  80. return *ptep;
  81. }
  82. static inline void arch_clear_hugepage_flags(struct page *page)
  83. {
  84. }
  85. #ifdef CONFIG_HUGETLB_SUPER_PAGES
  86. static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
  87. struct page *page, int writable)
  88. {
  89. size_t pagesize = huge_page_size(hstate_vma(vma));
  90. if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
  91. entry = pte_mksuper(entry);
  92. return entry;
  93. }
  94. #define arch_make_huge_pte arch_make_huge_pte
  95. /* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
  96. enum {
  97. HUGE_SHIFT_PGDIR = 0,
  98. HUGE_SHIFT_PMD = 1,
  99. HUGE_SHIFT_PAGE = 2,
  100. HUGE_SHIFT_ENTRIES
  101. };
  102. extern int huge_shift[HUGE_SHIFT_ENTRIES];
  103. #endif
  104. #endif /* _ASM_TILE_HUGETLB_H */