balloon_compaction.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /*
  2. * mm/balloon_compaction.c
  3. *
  4. * Common interface for making balloon pages movable by compaction.
  5. *
  6. * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/slab.h>
  10. #include <linux/export.h>
  11. #include <linux/balloon_compaction.h>
  12. /*
  13. * balloon_page_enqueue - allocates a new page and inserts it into the balloon
  14. * page list.
  15. * @b_dev_info: balloon device decriptor where we will insert a new page to
  16. *
  17. * Driver must call it to properly allocate a new enlisted balloon page
  18. * before definetively removing it from the guest system.
  19. * This function returns the page address for the recently enqueued page or
  20. * NULL in the case we fail to allocate a new page this turn.
  21. */
  22. struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
  23. {
  24. unsigned long flags;
  25. struct page *page = alloc_page(balloon_mapping_gfp_mask() |
  26. __GFP_NOMEMALLOC | __GFP_NORETRY);
  27. if (!page)
  28. return NULL;
  29. /*
  30. * Block others from accessing the 'page' when we get around to
  31. * establishing additional references. We should be the only one
  32. * holding a reference to the 'page' at this point.
  33. */
  34. BUG_ON(!trylock_page(page));
  35. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  36. balloon_page_insert(b_dev_info, page);
  37. __count_vm_event(BALLOON_INFLATE);
  38. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  39. unlock_page(page);
  40. return page;
  41. }
  42. EXPORT_SYMBOL_GPL(balloon_page_enqueue);
  43. /*
  44. * balloon_page_dequeue - removes a page from balloon's page list and returns
  45. * the its address to allow the driver release the page.
  46. * @b_dev_info: balloon device decriptor where we will grab a page from.
  47. *
  48. * Driver must call it to properly de-allocate a previous enlisted balloon page
  49. * before definetively releasing it back to the guest system.
  50. * This function returns the page address for the recently dequeued page or
  51. * NULL in the case we find balloon's page list temporarily empty due to
  52. * compaction isolated pages.
  53. */
  54. struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
  55. {
  56. struct page *page, *tmp;
  57. unsigned long flags;
  58. bool dequeued_page;
  59. dequeued_page = false;
  60. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  61. list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
  62. /*
  63. * Block others from accessing the 'page' while we get around
  64. * establishing additional references and preparing the 'page'
  65. * to be released by the balloon driver.
  66. */
  67. if (trylock_page(page)) {
  68. #ifdef CONFIG_BALLOON_COMPACTION
  69. if (!PagePrivate(page)) {
  70. /* raced with isolation */
  71. unlock_page(page);
  72. continue;
  73. }
  74. #endif
  75. balloon_page_delete(page);
  76. __count_vm_event(BALLOON_DEFLATE);
  77. unlock_page(page);
  78. dequeued_page = true;
  79. break;
  80. }
  81. }
  82. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  83. if (!dequeued_page) {
  84. /*
  85. * If we are unable to dequeue a balloon page because the page
  86. * list is empty and there is no isolated pages, then something
  87. * went out of track and some balloon pages are lost.
  88. * BUG() here, otherwise the balloon driver may get stuck into
  89. * an infinite loop while attempting to release all its pages.
  90. */
  91. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  92. if (unlikely(list_empty(&b_dev_info->pages) &&
  93. !b_dev_info->isolated_pages))
  94. BUG();
  95. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  96. page = NULL;
  97. }
  98. return page;
  99. }
  100. EXPORT_SYMBOL_GPL(balloon_page_dequeue);
  101. #ifdef CONFIG_BALLOON_COMPACTION
  102. static inline void __isolate_balloon_page(struct page *page)
  103. {
  104. struct balloon_dev_info *b_dev_info = balloon_page_device(page);
  105. unsigned long flags;
  106. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  107. ClearPagePrivate(page);
  108. list_del(&page->lru);
  109. b_dev_info->isolated_pages++;
  110. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  111. }
  112. static inline void __putback_balloon_page(struct page *page)
  113. {
  114. struct balloon_dev_info *b_dev_info = balloon_page_device(page);
  115. unsigned long flags;
  116. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  117. SetPagePrivate(page);
  118. list_add(&page->lru, &b_dev_info->pages);
  119. b_dev_info->isolated_pages--;
  120. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  121. }
  122. /* __isolate_lru_page() counterpart for a ballooned page */
  123. bool balloon_page_isolate(struct page *page)
  124. {
  125. /*
  126. * Avoid burning cycles with pages that are yet under __free_pages(),
  127. * or just got freed under us.
  128. *
  129. * In case we 'win' a race for a balloon page being freed under us and
  130. * raise its refcount preventing __free_pages() from doing its job
  131. * the put_page() at the end of this block will take care of
  132. * release this page, thus avoiding a nasty leakage.
  133. */
  134. if (likely(get_page_unless_zero(page))) {
  135. /*
  136. * As balloon pages are not isolated from LRU lists, concurrent
  137. * compaction threads can race against page migration functions
  138. * as well as race against the balloon driver releasing a page.
  139. *
  140. * In order to avoid having an already isolated balloon page
  141. * being (wrongly) re-isolated while it is under migration,
  142. * or to avoid attempting to isolate pages being released by
  143. * the balloon driver, lets be sure we have the page lock
  144. * before proceeding with the balloon page isolation steps.
  145. */
  146. if (likely(trylock_page(page))) {
  147. /*
  148. * A ballooned page, by default, has PagePrivate set.
  149. * Prevent concurrent compaction threads from isolating
  150. * an already isolated balloon page by clearing it.
  151. */
  152. if (balloon_page_movable(page)) {
  153. __isolate_balloon_page(page);
  154. unlock_page(page);
  155. return true;
  156. }
  157. unlock_page(page);
  158. }
  159. put_page(page);
  160. }
  161. return false;
  162. }
  163. /* putback_lru_page() counterpart for a ballooned page */
  164. void balloon_page_putback(struct page *page)
  165. {
  166. /*
  167. * 'lock_page()' stabilizes the page and prevents races against
  168. * concurrent isolation threads attempting to re-isolate it.
  169. */
  170. lock_page(page);
  171. if (__is_movable_balloon_page(page)) {
  172. __putback_balloon_page(page);
  173. /* drop the extra ref count taken for page isolation */
  174. put_page(page);
  175. } else {
  176. WARN_ON(1);
  177. dump_page(page, "not movable balloon page");
  178. }
  179. unlock_page(page);
  180. }
  181. /* move_to_new_page() counterpart for a ballooned page */
  182. int balloon_page_migrate(struct page *newpage,
  183. struct page *page, enum migrate_mode mode)
  184. {
  185. struct balloon_dev_info *balloon = balloon_page_device(page);
  186. int rc = -EAGAIN;
  187. VM_BUG_ON_PAGE(!PageLocked(page), page);
  188. VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
  189. if (WARN_ON(!__is_movable_balloon_page(page))) {
  190. dump_page(page, "not movable balloon page");
  191. return rc;
  192. }
  193. if (balloon && balloon->migratepage)
  194. rc = balloon->migratepage(balloon, newpage, page, mode);
  195. return rc;
  196. }
  197. #endif /* CONFIG_BALLOON_COMPACTION */