hugetlb_cgroup.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423
  1. /*
  2. *
  3. * Copyright IBM Corporation, 2012
  4. * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of version 2.1 of the GNU Lesser General Public License
  8. * as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it would be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. */
  15. #include <linux/cgroup.h>
  16. #include <linux/page_counter.h>
  17. #include <linux/slab.h>
  18. #include <linux/hugetlb.h>
  19. #include <linux/hugetlb_cgroup.h>
  20. struct hugetlb_cgroup {
  21. struct cgroup_subsys_state css;
  22. /*
  23. * the counter to account for hugepages from hugetlb.
  24. */
  25. struct page_counter hugepage[HUGE_MAX_HSTATE];
  26. };
  27. #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
  28. #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
  29. #define MEMFILE_ATTR(val) ((val) & 0xffff)
  30. static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
  31. static inline
  32. struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
  33. {
  34. return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
  35. }
  36. static inline
  37. struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
  38. {
  39. return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
  40. }
  41. static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
  42. {
  43. return (h_cg == root_h_cgroup);
  44. }
  45. static inline struct hugetlb_cgroup *
  46. parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
  47. {
  48. return hugetlb_cgroup_from_css(h_cg->css.parent);
  49. }
  50. static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
  51. {
  52. int idx;
  53. for (idx = 0; idx < hugetlb_max_hstate; idx++) {
  54. if (page_counter_read(&h_cg->hugepage[idx]))
  55. return true;
  56. }
  57. return false;
  58. }
  59. static struct cgroup_subsys_state *
  60. hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
  61. {
  62. struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
  63. struct hugetlb_cgroup *h_cgroup;
  64. int idx;
  65. h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
  66. if (!h_cgroup)
  67. return ERR_PTR(-ENOMEM);
  68. if (parent_h_cgroup) {
  69. for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
  70. page_counter_init(&h_cgroup->hugepage[idx],
  71. &parent_h_cgroup->hugepage[idx]);
  72. } else {
  73. root_h_cgroup = h_cgroup;
  74. for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
  75. page_counter_init(&h_cgroup->hugepage[idx], NULL);
  76. }
  77. return &h_cgroup->css;
  78. }
  79. static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
  80. {
  81. struct hugetlb_cgroup *h_cgroup;
  82. h_cgroup = hugetlb_cgroup_from_css(css);
  83. kfree(h_cgroup);
  84. }
  85. /*
  86. * Should be called with hugetlb_lock held.
  87. * Since we are holding hugetlb_lock, pages cannot get moved from
  88. * active list or uncharged from the cgroup, So no need to get
  89. * page reference and test for page active here. This function
  90. * cannot fail.
  91. */
  92. static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
  93. struct page *page)
  94. {
  95. unsigned int nr_pages;
  96. struct page_counter *counter;
  97. struct hugetlb_cgroup *page_hcg;
  98. struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
  99. page_hcg = hugetlb_cgroup_from_page(page);
  100. /*
  101. * We can have pages in active list without any cgroup
  102. * ie, hugepage with less than 3 pages. We can safely
  103. * ignore those pages.
  104. */
  105. if (!page_hcg || page_hcg != h_cg)
  106. goto out;
  107. nr_pages = 1 << compound_order(page);
  108. if (!parent) {
  109. parent = root_h_cgroup;
  110. /* root has no limit */
  111. page_counter_charge(&parent->hugepage[idx], nr_pages);
  112. }
  113. counter = &h_cg->hugepage[idx];
  114. /* Take the pages off the local counter */
  115. page_counter_cancel(counter, nr_pages);
  116. set_hugetlb_cgroup(page, parent);
  117. out:
  118. return;
  119. }
  120. /*
  121. * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
  122. * the parent cgroup.
  123. */
  124. static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
  125. {
  126. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
  127. struct hstate *h;
  128. struct page *page;
  129. int idx = 0;
  130. do {
  131. for_each_hstate(h) {
  132. spin_lock(&hugetlb_lock);
  133. list_for_each_entry(page, &h->hugepage_activelist, lru)
  134. hugetlb_cgroup_move_parent(idx, h_cg, page);
  135. spin_unlock(&hugetlb_lock);
  136. idx++;
  137. }
  138. cond_resched();
  139. } while (hugetlb_cgroup_have_usage(h_cg));
  140. }
  141. int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
  142. struct hugetlb_cgroup **ptr)
  143. {
  144. int ret = 0;
  145. struct page_counter *counter;
  146. struct hugetlb_cgroup *h_cg = NULL;
  147. if (hugetlb_cgroup_disabled())
  148. goto done;
  149. /*
  150. * We don't charge any cgroup if the compound page have less
  151. * than 3 pages.
  152. */
  153. if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
  154. goto done;
  155. again:
  156. rcu_read_lock();
  157. h_cg = hugetlb_cgroup_from_task(current);
  158. if (!css_tryget_online(&h_cg->css)) {
  159. rcu_read_unlock();
  160. goto again;
  161. }
  162. rcu_read_unlock();
  163. if (!page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter))
  164. ret = -ENOMEM;
  165. css_put(&h_cg->css);
  166. done:
  167. *ptr = h_cg;
  168. return ret;
  169. }
  170. /* Should be called with hugetlb_lock held */
  171. void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
  172. struct hugetlb_cgroup *h_cg,
  173. struct page *page)
  174. {
  175. if (hugetlb_cgroup_disabled() || !h_cg)
  176. return;
  177. set_hugetlb_cgroup(page, h_cg);
  178. return;
  179. }
  180. /*
  181. * Should be called with hugetlb_lock held
  182. */
  183. void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
  184. struct page *page)
  185. {
  186. struct hugetlb_cgroup *h_cg;
  187. if (hugetlb_cgroup_disabled())
  188. return;
  189. lockdep_assert_held(&hugetlb_lock);
  190. h_cg = hugetlb_cgroup_from_page(page);
  191. if (unlikely(!h_cg))
  192. return;
  193. set_hugetlb_cgroup(page, NULL);
  194. page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
  195. return;
  196. }
  197. void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
  198. struct hugetlb_cgroup *h_cg)
  199. {
  200. if (hugetlb_cgroup_disabled() || !h_cg)
  201. return;
  202. if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
  203. return;
  204. page_counter_uncharge(&h_cg->hugepage[idx], nr_pages);
  205. return;
  206. }
  207. enum {
  208. RES_USAGE,
  209. RES_LIMIT,
  210. RES_MAX_USAGE,
  211. RES_FAILCNT,
  212. };
  213. static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
  214. struct cftype *cft)
  215. {
  216. struct page_counter *counter;
  217. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
  218. counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)];
  219. switch (MEMFILE_ATTR(cft->private)) {
  220. case RES_USAGE:
  221. return (u64)page_counter_read(counter) * PAGE_SIZE;
  222. case RES_LIMIT:
  223. return (u64)counter->limit * PAGE_SIZE;
  224. case RES_MAX_USAGE:
  225. return (u64)counter->watermark * PAGE_SIZE;
  226. case RES_FAILCNT:
  227. return counter->failcnt;
  228. default:
  229. BUG();
  230. }
  231. }
  232. static DEFINE_MUTEX(hugetlb_limit_mutex);
  233. static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of,
  234. char *buf, size_t nbytes, loff_t off)
  235. {
  236. int ret, idx;
  237. unsigned long nr_pages;
  238. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
  239. if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */
  240. return -EINVAL;
  241. buf = strstrip(buf);
  242. ret = page_counter_memparse(buf, "-1", &nr_pages);
  243. if (ret)
  244. return ret;
  245. idx = MEMFILE_IDX(of_cft(of)->private);
  246. switch (MEMFILE_ATTR(of_cft(of)->private)) {
  247. case RES_LIMIT:
  248. mutex_lock(&hugetlb_limit_mutex);
  249. ret = page_counter_limit(&h_cg->hugepage[idx], nr_pages);
  250. mutex_unlock(&hugetlb_limit_mutex);
  251. break;
  252. default:
  253. ret = -EINVAL;
  254. break;
  255. }
  256. return ret ?: nbytes;
  257. }
  258. static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
  259. char *buf, size_t nbytes, loff_t off)
  260. {
  261. int ret = 0;
  262. struct page_counter *counter;
  263. struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of));
  264. counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)];
  265. switch (MEMFILE_ATTR(of_cft(of)->private)) {
  266. case RES_MAX_USAGE:
  267. page_counter_reset_watermark(counter);
  268. break;
  269. case RES_FAILCNT:
  270. counter->failcnt = 0;
  271. break;
  272. default:
  273. ret = -EINVAL;
  274. break;
  275. }
  276. return ret ?: nbytes;
  277. }
  278. static char *mem_fmt(char *buf, int size, unsigned long hsize)
  279. {
  280. if (hsize >= (1UL << 30))
  281. snprintf(buf, size, "%luGB", hsize >> 30);
  282. else if (hsize >= (1UL << 20))
  283. snprintf(buf, size, "%luMB", hsize >> 20);
  284. else
  285. snprintf(buf, size, "%luKB", hsize >> 10);
  286. return buf;
  287. }
  288. static void __init __hugetlb_cgroup_file_init(int idx)
  289. {
  290. char buf[32];
  291. struct cftype *cft;
  292. struct hstate *h = &hstates[idx];
  293. /* format the size */
  294. mem_fmt(buf, 32, huge_page_size(h));
  295. /* Add the limit file */
  296. cft = &h->cgroup_files[0];
  297. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.limit_in_bytes", buf);
  298. cft->private = MEMFILE_PRIVATE(idx, RES_LIMIT);
  299. cft->read_u64 = hugetlb_cgroup_read_u64;
  300. cft->write = hugetlb_cgroup_write;
  301. /* Add the usage file */
  302. cft = &h->cgroup_files[1];
  303. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.usage_in_bytes", buf);
  304. cft->private = MEMFILE_PRIVATE(idx, RES_USAGE);
  305. cft->read_u64 = hugetlb_cgroup_read_u64;
  306. /* Add the MAX usage file */
  307. cft = &h->cgroup_files[2];
  308. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.max_usage_in_bytes", buf);
  309. cft->private = MEMFILE_PRIVATE(idx, RES_MAX_USAGE);
  310. cft->write = hugetlb_cgroup_reset;
  311. cft->read_u64 = hugetlb_cgroup_read_u64;
  312. /* Add the failcntfile */
  313. cft = &h->cgroup_files[3];
  314. snprintf(cft->name, MAX_CFTYPE_NAME, "%s.failcnt", buf);
  315. cft->private = MEMFILE_PRIVATE(idx, RES_FAILCNT);
  316. cft->write = hugetlb_cgroup_reset;
  317. cft->read_u64 = hugetlb_cgroup_read_u64;
  318. /* NULL terminate the last cft */
  319. cft = &h->cgroup_files[4];
  320. memset(cft, 0, sizeof(*cft));
  321. WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
  322. h->cgroup_files));
  323. }
  324. void __init hugetlb_cgroup_file_init(void)
  325. {
  326. struct hstate *h;
  327. for_each_hstate(h) {
  328. /*
  329. * Add cgroup control files only if the huge page consists
  330. * of more than two normal pages. This is because we use
  331. * page[2].private for storing cgroup details.
  332. */
  333. if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
  334. __hugetlb_cgroup_file_init(hstate_index(h));
  335. }
  336. }
  337. /*
  338. * hugetlb_lock will make sure a parallel cgroup rmdir won't happen
  339. * when we migrate hugepages
  340. */
  341. void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
  342. {
  343. struct hugetlb_cgroup *h_cg;
  344. struct hstate *h = page_hstate(oldhpage);
  345. if (hugetlb_cgroup_disabled())
  346. return;
  347. VM_BUG_ON_PAGE(!PageHuge(oldhpage), oldhpage);
  348. spin_lock(&hugetlb_lock);
  349. h_cg = hugetlb_cgroup_from_page(oldhpage);
  350. set_hugetlb_cgroup(oldhpage, NULL);
  351. /* move the h_cg details to new cgroup */
  352. set_hugetlb_cgroup(newhpage, h_cg);
  353. list_move(&newhpage->lru, &h->hugepage_activelist);
  354. spin_unlock(&hugetlb_lock);
  355. return;
  356. }
  357. struct cgroup_subsys hugetlb_cgrp_subsys = {
  358. .css_alloc = hugetlb_cgroup_css_alloc,
  359. .css_offline = hugetlb_cgroup_css_offline,
  360. .css_free = hugetlb_cgroup_css_free,
  361. };