swapops.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. #ifndef _LINUX_SWAPOPS_H
  2. #define _LINUX_SWAPOPS_H
  3. #include <linux/radix-tree.h>
  4. #include <linux/bug.h>
  5. /*
  6. * swapcache pages are stored in the swapper_space radix tree. We want to
  7. * get good packing density in that tree, so the index should be dense in
  8. * the low-order bits.
  9. *
  10. * We arrange the `type' and `offset' fields so that `type' is at the seven
  11. * high-order bits of the swp_entry_t and `offset' is right-aligned in the
  12. * remaining bits. Although `type' itself needs only five bits, we allow for
  13. * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
  14. *
  15. * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
  16. */
  17. #define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
  18. (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
  19. #define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
  20. /*
  21. * Store a type+offset into a swp_entry_t in an arch-independent format
  22. */
  23. static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
  24. {
  25. swp_entry_t ret;
  26. ret.val = (type << SWP_TYPE_SHIFT(ret)) |
  27. (offset & SWP_OFFSET_MASK(ret));
  28. return ret;
  29. }
  30. /*
  31. * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
  32. * arch-independent format
  33. */
  34. static inline unsigned swp_type(swp_entry_t entry)
  35. {
  36. return (entry.val >> SWP_TYPE_SHIFT(entry));
  37. }
  38. /*
  39. * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
  40. * arch-independent format
  41. */
  42. static inline pgoff_t swp_offset(swp_entry_t entry)
  43. {
  44. return entry.val & SWP_OFFSET_MASK(entry);
  45. }
  46. #ifdef CONFIG_MMU
  47. /* check whether a pte points to a swap entry */
  48. static inline int is_swap_pte(pte_t pte)
  49. {
  50. return !pte_none(pte) && !pte_present(pte);
  51. }
  52. #endif
  53. /*
  54. * Convert the arch-dependent pte representation of a swp_entry_t into an
  55. * arch-independent swp_entry_t.
  56. */
  57. static inline swp_entry_t pte_to_swp_entry(pte_t pte)
  58. {
  59. swp_entry_t arch_entry;
  60. if (pte_swp_soft_dirty(pte))
  61. pte = pte_swp_clear_soft_dirty(pte);
  62. arch_entry = __pte_to_swp_entry(pte);
  63. return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
  64. }
  65. /*
  66. * Convert the arch-independent representation of a swp_entry_t into the
  67. * arch-dependent pte representation.
  68. */
  69. static inline pte_t swp_entry_to_pte(swp_entry_t entry)
  70. {
  71. swp_entry_t arch_entry;
  72. arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
  73. return __swp_entry_to_pte(arch_entry);
  74. }
  75. static inline swp_entry_t radix_to_swp_entry(void *arg)
  76. {
  77. swp_entry_t entry;
  78. entry.val = (unsigned long)arg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
  79. return entry;
  80. }
  81. static inline void *swp_to_radix_entry(swp_entry_t entry)
  82. {
  83. unsigned long value;
  84. value = entry.val << RADIX_TREE_EXCEPTIONAL_SHIFT;
  85. return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
  86. }
  87. #ifdef CONFIG_MIGRATION
  88. static inline swp_entry_t make_migration_entry(struct page *page, int write)
  89. {
  90. BUG_ON(!PageLocked(page));
  91. return swp_entry(write ? SWP_MIGRATION_WRITE : SWP_MIGRATION_READ,
  92. page_to_pfn(page));
  93. }
  94. static inline int is_migration_entry(swp_entry_t entry)
  95. {
  96. return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
  97. swp_type(entry) == SWP_MIGRATION_WRITE);
  98. }
  99. static inline int is_write_migration_entry(swp_entry_t entry)
  100. {
  101. return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
  102. }
  103. static inline struct page *migration_entry_to_page(swp_entry_t entry)
  104. {
  105. struct page *p = pfn_to_page(swp_offset(entry));
  106. /*
  107. * Any use of migration entries may only occur while the
  108. * corresponding page is locked
  109. */
  110. BUG_ON(!PageLocked(p));
  111. return p;
  112. }
  113. static inline void make_migration_entry_read(swp_entry_t *entry)
  114. {
  115. *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
  116. }
  117. extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
  118. spinlock_t *ptl);
  119. extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
  120. unsigned long address);
  121. extern void migration_entry_wait_huge(struct vm_area_struct *vma,
  122. struct mm_struct *mm, pte_t *pte);
  123. #else
  124. #define make_migration_entry(page, write) swp_entry(0, 0)
  125. static inline int is_migration_entry(swp_entry_t swp)
  126. {
  127. return 0;
  128. }
  129. #define migration_entry_to_page(swp) NULL
  130. static inline void make_migration_entry_read(swp_entry_t *entryp) { }
  131. static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
  132. spinlock_t *ptl) { }
  133. static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
  134. unsigned long address) { }
  135. static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
  136. struct mm_struct *mm, pte_t *pte) { }
  137. static inline int is_write_migration_entry(swp_entry_t entry)
  138. {
  139. return 0;
  140. }
  141. #endif
  142. #ifdef CONFIG_MEMORY_FAILURE
  143. extern atomic_long_t num_poisoned_pages __read_mostly;
  144. /*
  145. * Support for hardware poisoned pages
  146. */
  147. static inline swp_entry_t make_hwpoison_entry(struct page *page)
  148. {
  149. BUG_ON(!PageLocked(page));
  150. return swp_entry(SWP_HWPOISON, page_to_pfn(page));
  151. }
  152. static inline int is_hwpoison_entry(swp_entry_t entry)
  153. {
  154. return swp_type(entry) == SWP_HWPOISON;
  155. }
  156. static inline bool test_set_page_hwpoison(struct page *page)
  157. {
  158. return TestSetPageHWPoison(page);
  159. }
  160. static inline void num_poisoned_pages_inc(void)
  161. {
  162. atomic_long_inc(&num_poisoned_pages);
  163. }
  164. static inline void num_poisoned_pages_dec(void)
  165. {
  166. atomic_long_dec(&num_poisoned_pages);
  167. }
  168. static inline void num_poisoned_pages_add(long num)
  169. {
  170. atomic_long_add(num, &num_poisoned_pages);
  171. }
  172. static inline void num_poisoned_pages_sub(long num)
  173. {
  174. atomic_long_sub(num, &num_poisoned_pages);
  175. }
  176. #else
  177. static inline swp_entry_t make_hwpoison_entry(struct page *page)
  178. {
  179. return swp_entry(0, 0);
  180. }
  181. static inline int is_hwpoison_entry(swp_entry_t swp)
  182. {
  183. return 0;
  184. }
  185. static inline bool test_set_page_hwpoison(struct page *page)
  186. {
  187. return false;
  188. }
  189. static inline void num_poisoned_pages_inc(void)
  190. {
  191. }
  192. #endif
  193. #if defined(CONFIG_MEMORY_FAILURE) || defined(CONFIG_MIGRATION)
  194. static inline int non_swap_entry(swp_entry_t entry)
  195. {
  196. return swp_type(entry) >= MAX_SWAPFILES;
  197. }
  198. #else
  199. static inline int non_swap_entry(swp_entry_t entry)
  200. {
  201. return 0;
  202. }
  203. #endif
  204. #endif /* _LINUX_SWAPOPS_H */