page.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. /*
  19. * check to see if a page is being written to the cache
  20. */
  21. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  22. {
  23. void *val;
  24. rcu_read_lock();
  25. val = radix_tree_lookup(&cookie->stores, page->index);
  26. rcu_read_unlock();
  27. return val != NULL;
  28. }
  29. EXPORT_SYMBOL(__fscache_check_page_write);
  30. /*
  31. * wait for a page to finish being written to the cache
  32. */
  33. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  34. {
  35. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  36. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  37. }
  38. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  39. /*
  40. * wait for a page to finish being written to the cache. Put a timeout here
  41. * since we might be called recursively via parent fs.
  42. */
  43. static
  44. bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
  45. {
  46. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  47. return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
  48. HZ);
  49. }
  50. /*
  51. * decide whether a page can be released, possibly by cancelling a store to it
  52. * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged
  53. */
  54. bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
  55. struct page *page,
  56. gfp_t gfp)
  57. {
  58. struct page *xpage;
  59. void *val;
  60. _enter("%p,%p,%x", cookie, page, gfp);
  61. try_again:
  62. rcu_read_lock();
  63. val = radix_tree_lookup(&cookie->stores, page->index);
  64. if (!val) {
  65. rcu_read_unlock();
  66. fscache_stat(&fscache_n_store_vmscan_not_storing);
  67. __fscache_uncache_page(cookie, page);
  68. return true;
  69. }
  70. /* see if the page is actually undergoing storage - if so we can't get
  71. * rid of it till the cache has finished with it */
  72. if (radix_tree_tag_get(&cookie->stores, page->index,
  73. FSCACHE_COOKIE_STORING_TAG)) {
  74. rcu_read_unlock();
  75. goto page_busy;
  76. }
  77. /* the page is pending storage, so we attempt to cancel the store and
  78. * discard the store request so that the page can be reclaimed */
  79. spin_lock(&cookie->stores_lock);
  80. rcu_read_unlock();
  81. if (radix_tree_tag_get(&cookie->stores, page->index,
  82. FSCACHE_COOKIE_STORING_TAG)) {
  83. /* the page started to undergo storage whilst we were looking,
  84. * so now we can only wait or return */
  85. spin_unlock(&cookie->stores_lock);
  86. goto page_busy;
  87. }
  88. xpage = radix_tree_delete(&cookie->stores, page->index);
  89. spin_unlock(&cookie->stores_lock);
  90. if (xpage) {
  91. fscache_stat(&fscache_n_store_vmscan_cancelled);
  92. fscache_stat(&fscache_n_store_radix_deletes);
  93. ASSERTCMP(xpage, ==, page);
  94. } else {
  95. fscache_stat(&fscache_n_store_vmscan_gone);
  96. }
  97. wake_up_bit(&cookie->flags, 0);
  98. if (xpage)
  99. page_cache_release(xpage);
  100. __fscache_uncache_page(cookie, page);
  101. return true;
  102. page_busy:
  103. /* We will wait here if we're allowed to, but that could deadlock the
  104. * allocator as the work threads writing to the cache may all end up
  105. * sleeping on memory allocation, so we may need to impose a timeout
  106. * too. */
  107. if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
  108. fscache_stat(&fscache_n_store_vmscan_busy);
  109. return false;
  110. }
  111. fscache_stat(&fscache_n_store_vmscan_wait);
  112. if (!release_page_wait_timeout(cookie, page))
  113. _debug("fscache writeout timeout page: %p{%lx}",
  114. page, page->index);
  115. gfp &= ~__GFP_DIRECT_RECLAIM;
  116. goto try_again;
  117. }
  118. EXPORT_SYMBOL(__fscache_maybe_release_page);
  119. /*
  120. * note that a page has finished being written to the cache
  121. */
  122. static void fscache_end_page_write(struct fscache_object *object,
  123. struct page *page)
  124. {
  125. struct fscache_cookie *cookie;
  126. struct page *xpage = NULL;
  127. spin_lock(&object->lock);
  128. cookie = object->cookie;
  129. if (cookie) {
  130. /* delete the page from the tree if it is now no longer
  131. * pending */
  132. spin_lock(&cookie->stores_lock);
  133. radix_tree_tag_clear(&cookie->stores, page->index,
  134. FSCACHE_COOKIE_STORING_TAG);
  135. if (!radix_tree_tag_get(&cookie->stores, page->index,
  136. FSCACHE_COOKIE_PENDING_TAG)) {
  137. fscache_stat(&fscache_n_store_radix_deletes);
  138. xpage = radix_tree_delete(&cookie->stores, page->index);
  139. }
  140. spin_unlock(&cookie->stores_lock);
  141. wake_up_bit(&cookie->flags, 0);
  142. }
  143. spin_unlock(&object->lock);
  144. if (xpage)
  145. page_cache_release(xpage);
  146. }
  147. /*
  148. * actually apply the changed attributes to a cache object
  149. */
  150. static void fscache_attr_changed_op(struct fscache_operation *op)
  151. {
  152. struct fscache_object *object = op->object;
  153. int ret;
  154. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  155. fscache_stat(&fscache_n_attr_changed_calls);
  156. if (fscache_object_is_active(object)) {
  157. fscache_stat(&fscache_n_cop_attr_changed);
  158. ret = object->cache->ops->attr_changed(object);
  159. fscache_stat_d(&fscache_n_cop_attr_changed);
  160. if (ret < 0)
  161. fscache_abort_object(object);
  162. }
  163. fscache_op_complete(op, true);
  164. _leave("");
  165. }
  166. /*
  167. * notification that the attributes on an object have changed
  168. */
  169. int __fscache_attr_changed(struct fscache_cookie *cookie)
  170. {
  171. struct fscache_operation *op;
  172. struct fscache_object *object;
  173. bool wake_cookie = false;
  174. _enter("%p", cookie);
  175. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  176. fscache_stat(&fscache_n_attr_changed);
  177. op = kzalloc(sizeof(*op), GFP_KERNEL);
  178. if (!op) {
  179. fscache_stat(&fscache_n_attr_changed_nomem);
  180. _leave(" = -ENOMEM");
  181. return -ENOMEM;
  182. }
  183. fscache_operation_init(op, fscache_attr_changed_op, NULL, NULL);
  184. op->flags = FSCACHE_OP_ASYNC |
  185. (1 << FSCACHE_OP_EXCLUSIVE) |
  186. (1 << FSCACHE_OP_UNUSE_COOKIE);
  187. spin_lock(&cookie->lock);
  188. if (!fscache_cookie_enabled(cookie) ||
  189. hlist_empty(&cookie->backing_objects))
  190. goto nobufs;
  191. object = hlist_entry(cookie->backing_objects.first,
  192. struct fscache_object, cookie_link);
  193. __fscache_use_cookie(cookie);
  194. if (fscache_submit_exclusive_op(object, op) < 0)
  195. goto nobufs_dec;
  196. spin_unlock(&cookie->lock);
  197. fscache_stat(&fscache_n_attr_changed_ok);
  198. fscache_put_operation(op);
  199. _leave(" = 0");
  200. return 0;
  201. nobufs_dec:
  202. wake_cookie = __fscache_unuse_cookie(cookie);
  203. nobufs:
  204. spin_unlock(&cookie->lock);
  205. fscache_put_operation(op);
  206. if (wake_cookie)
  207. __fscache_wake_unused_cookie(cookie);
  208. fscache_stat(&fscache_n_attr_changed_nobufs);
  209. _leave(" = %d", -ENOBUFS);
  210. return -ENOBUFS;
  211. }
  212. EXPORT_SYMBOL(__fscache_attr_changed);
  213. /*
  214. * Handle cancellation of a pending retrieval op
  215. */
  216. static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
  217. {
  218. struct fscache_retrieval *op =
  219. container_of(_op, struct fscache_retrieval, op);
  220. atomic_set(&op->n_pages, 0);
  221. }
  222. /*
  223. * release a retrieval op reference
  224. */
  225. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  226. {
  227. struct fscache_retrieval *op =
  228. container_of(_op, struct fscache_retrieval, op);
  229. _enter("{OP%x}", op->op.debug_id);
  230. ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
  231. atomic_read(&op->n_pages), ==, 0);
  232. fscache_hist(fscache_retrieval_histogram, op->start_time);
  233. if (op->context)
  234. fscache_put_context(op->cookie, op->context);
  235. _leave("");
  236. }
  237. /*
  238. * allocate a retrieval op
  239. */
  240. static struct fscache_retrieval *fscache_alloc_retrieval(
  241. struct fscache_cookie *cookie,
  242. struct address_space *mapping,
  243. fscache_rw_complete_t end_io_func,
  244. void *context)
  245. {
  246. struct fscache_retrieval *op;
  247. /* allocate a retrieval operation and attempt to submit it */
  248. op = kzalloc(sizeof(*op), GFP_NOIO);
  249. if (!op) {
  250. fscache_stat(&fscache_n_retrievals_nomem);
  251. return NULL;
  252. }
  253. fscache_operation_init(&op->op, NULL,
  254. fscache_do_cancel_retrieval,
  255. fscache_release_retrieval_op);
  256. op->op.flags = FSCACHE_OP_MYTHREAD |
  257. (1UL << FSCACHE_OP_WAITING) |
  258. (1UL << FSCACHE_OP_UNUSE_COOKIE);
  259. op->cookie = cookie;
  260. op->mapping = mapping;
  261. op->end_io_func = end_io_func;
  262. op->context = context;
  263. op->start_time = jiffies;
  264. INIT_LIST_HEAD(&op->to_do);
  265. /* Pin the netfs read context in case we need to do the actual netfs
  266. * read because we've encountered a cache read failure.
  267. */
  268. if (context)
  269. fscache_get_context(op->cookie, context);
  270. return op;
  271. }
  272. /*
  273. * wait for a deferred lookup to complete
  274. */
  275. int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  276. {
  277. unsigned long jif;
  278. _enter("");
  279. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  280. _leave(" = 0 [imm]");
  281. return 0;
  282. }
  283. fscache_stat(&fscache_n_retrievals_wait);
  284. jif = jiffies;
  285. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  286. TASK_INTERRUPTIBLE) != 0) {
  287. fscache_stat(&fscache_n_retrievals_intr);
  288. _leave(" = -ERESTARTSYS");
  289. return -ERESTARTSYS;
  290. }
  291. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  292. smp_rmb();
  293. fscache_hist(fscache_retrieval_delay_histogram, jif);
  294. _leave(" = 0 [dly]");
  295. return 0;
  296. }
  297. /*
  298. * wait for an object to become active (or dead)
  299. */
  300. int fscache_wait_for_operation_activation(struct fscache_object *object,
  301. struct fscache_operation *op,
  302. atomic_t *stat_op_waits,
  303. atomic_t *stat_object_dead)
  304. {
  305. int ret;
  306. if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
  307. goto check_if_dead;
  308. _debug(">>> WT");
  309. if (stat_op_waits)
  310. fscache_stat(stat_op_waits);
  311. if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
  312. TASK_INTERRUPTIBLE) != 0) {
  313. ret = fscache_cancel_op(op, false);
  314. if (ret == 0)
  315. return -ERESTARTSYS;
  316. /* it's been removed from the pending queue by another party,
  317. * so we should get to run shortly */
  318. wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
  319. TASK_UNINTERRUPTIBLE);
  320. }
  321. _debug("<<< GO");
  322. check_if_dead:
  323. if (op->state == FSCACHE_OP_ST_CANCELLED) {
  324. if (stat_object_dead)
  325. fscache_stat(stat_object_dead);
  326. _leave(" = -ENOBUFS [cancelled]");
  327. return -ENOBUFS;
  328. }
  329. if (unlikely(fscache_object_is_dying(object) ||
  330. fscache_cache_is_broken(object))) {
  331. enum fscache_operation_state state = op->state;
  332. fscache_cancel_op(op, true);
  333. if (stat_object_dead)
  334. fscache_stat(stat_object_dead);
  335. _leave(" = -ENOBUFS [obj dead %d]", state);
  336. return -ENOBUFS;
  337. }
  338. return 0;
  339. }
  340. /*
  341. * read a page from the cache or allocate a block in which to store it
  342. * - we return:
  343. * -ENOMEM - out of memory, nothing done
  344. * -ERESTARTSYS - interrupted
  345. * -ENOBUFS - no backing object available in which to cache the block
  346. * -ENODATA - no data available in the backing object for this block
  347. * 0 - dispatched a read - it'll call end_io_func() when finished
  348. */
  349. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  350. struct page *page,
  351. fscache_rw_complete_t end_io_func,
  352. void *context,
  353. gfp_t gfp)
  354. {
  355. struct fscache_retrieval *op;
  356. struct fscache_object *object;
  357. bool wake_cookie = false;
  358. int ret;
  359. _enter("%p,%p,,,", cookie, page);
  360. fscache_stat(&fscache_n_retrievals);
  361. if (hlist_empty(&cookie->backing_objects))
  362. goto nobufs;
  363. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  364. _leave(" = -ENOBUFS [invalidating]");
  365. return -ENOBUFS;
  366. }
  367. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  368. ASSERTCMP(page, !=, NULL);
  369. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  370. return -ERESTARTSYS;
  371. op = fscache_alloc_retrieval(cookie, page->mapping,
  372. end_io_func, context);
  373. if (!op) {
  374. _leave(" = -ENOMEM");
  375. return -ENOMEM;
  376. }
  377. atomic_set(&op->n_pages, 1);
  378. spin_lock(&cookie->lock);
  379. if (!fscache_cookie_enabled(cookie) ||
  380. hlist_empty(&cookie->backing_objects))
  381. goto nobufs_unlock;
  382. object = hlist_entry(cookie->backing_objects.first,
  383. struct fscache_object, cookie_link);
  384. ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
  385. __fscache_use_cookie(cookie);
  386. atomic_inc(&object->n_reads);
  387. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  388. if (fscache_submit_op(object, &op->op) < 0)
  389. goto nobufs_unlock_dec;
  390. spin_unlock(&cookie->lock);
  391. fscache_stat(&fscache_n_retrieval_ops);
  392. /* we wait for the operation to become active, and then process it
  393. * *here*, in this thread, and not in the thread pool */
  394. ret = fscache_wait_for_operation_activation(
  395. object, &op->op,
  396. __fscache_stat(&fscache_n_retrieval_op_waits),
  397. __fscache_stat(&fscache_n_retrievals_object_dead));
  398. if (ret < 0)
  399. goto error;
  400. /* ask the cache to honour the operation */
  401. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  402. fscache_stat(&fscache_n_cop_allocate_page);
  403. ret = object->cache->ops->allocate_page(op, page, gfp);
  404. fscache_stat_d(&fscache_n_cop_allocate_page);
  405. if (ret == 0)
  406. ret = -ENODATA;
  407. } else {
  408. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  409. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  410. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  411. }
  412. error:
  413. if (ret == -ENOMEM)
  414. fscache_stat(&fscache_n_retrievals_nomem);
  415. else if (ret == -ERESTARTSYS)
  416. fscache_stat(&fscache_n_retrievals_intr);
  417. else if (ret == -ENODATA)
  418. fscache_stat(&fscache_n_retrievals_nodata);
  419. else if (ret < 0)
  420. fscache_stat(&fscache_n_retrievals_nobufs);
  421. else
  422. fscache_stat(&fscache_n_retrievals_ok);
  423. fscache_put_retrieval(op);
  424. _leave(" = %d", ret);
  425. return ret;
  426. nobufs_unlock_dec:
  427. atomic_dec(&object->n_reads);
  428. wake_cookie = __fscache_unuse_cookie(cookie);
  429. nobufs_unlock:
  430. spin_unlock(&cookie->lock);
  431. if (wake_cookie)
  432. __fscache_wake_unused_cookie(cookie);
  433. fscache_put_retrieval(op);
  434. nobufs:
  435. fscache_stat(&fscache_n_retrievals_nobufs);
  436. _leave(" = -ENOBUFS");
  437. return -ENOBUFS;
  438. }
  439. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  440. /*
  441. * read a list of page from the cache or allocate a block in which to store
  442. * them
  443. * - we return:
  444. * -ENOMEM - out of memory, some pages may be being read
  445. * -ERESTARTSYS - interrupted, some pages may be being read
  446. * -ENOBUFS - no backing object or space available in which to cache any
  447. * pages not being read
  448. * -ENODATA - no data available in the backing object for some or all of
  449. * the pages
  450. * 0 - dispatched a read on all pages
  451. *
  452. * end_io_func() will be called for each page read from the cache as it is
  453. * finishes being read
  454. *
  455. * any pages for which a read is dispatched will be removed from pages and
  456. * nr_pages
  457. */
  458. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  459. struct address_space *mapping,
  460. struct list_head *pages,
  461. unsigned *nr_pages,
  462. fscache_rw_complete_t end_io_func,
  463. void *context,
  464. gfp_t gfp)
  465. {
  466. struct fscache_retrieval *op;
  467. struct fscache_object *object;
  468. bool wake_cookie = false;
  469. int ret;
  470. _enter("%p,,%d,,,", cookie, *nr_pages);
  471. fscache_stat(&fscache_n_retrievals);
  472. if (hlist_empty(&cookie->backing_objects))
  473. goto nobufs;
  474. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  475. _leave(" = -ENOBUFS [invalidating]");
  476. return -ENOBUFS;
  477. }
  478. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  479. ASSERTCMP(*nr_pages, >, 0);
  480. ASSERT(!list_empty(pages));
  481. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  482. return -ERESTARTSYS;
  483. op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
  484. if (!op)
  485. return -ENOMEM;
  486. atomic_set(&op->n_pages, *nr_pages);
  487. spin_lock(&cookie->lock);
  488. if (!fscache_cookie_enabled(cookie) ||
  489. hlist_empty(&cookie->backing_objects))
  490. goto nobufs_unlock;
  491. object = hlist_entry(cookie->backing_objects.first,
  492. struct fscache_object, cookie_link);
  493. __fscache_use_cookie(cookie);
  494. atomic_inc(&object->n_reads);
  495. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  496. if (fscache_submit_op(object, &op->op) < 0)
  497. goto nobufs_unlock_dec;
  498. spin_unlock(&cookie->lock);
  499. fscache_stat(&fscache_n_retrieval_ops);
  500. /* we wait for the operation to become active, and then process it
  501. * *here*, in this thread, and not in the thread pool */
  502. ret = fscache_wait_for_operation_activation(
  503. object, &op->op,
  504. __fscache_stat(&fscache_n_retrieval_op_waits),
  505. __fscache_stat(&fscache_n_retrievals_object_dead));
  506. if (ret < 0)
  507. goto error;
  508. /* ask the cache to honour the operation */
  509. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  510. fscache_stat(&fscache_n_cop_allocate_pages);
  511. ret = object->cache->ops->allocate_pages(
  512. op, pages, nr_pages, gfp);
  513. fscache_stat_d(&fscache_n_cop_allocate_pages);
  514. } else {
  515. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  516. ret = object->cache->ops->read_or_alloc_pages(
  517. op, pages, nr_pages, gfp);
  518. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  519. }
  520. error:
  521. if (ret == -ENOMEM)
  522. fscache_stat(&fscache_n_retrievals_nomem);
  523. else if (ret == -ERESTARTSYS)
  524. fscache_stat(&fscache_n_retrievals_intr);
  525. else if (ret == -ENODATA)
  526. fscache_stat(&fscache_n_retrievals_nodata);
  527. else if (ret < 0)
  528. fscache_stat(&fscache_n_retrievals_nobufs);
  529. else
  530. fscache_stat(&fscache_n_retrievals_ok);
  531. fscache_put_retrieval(op);
  532. _leave(" = %d", ret);
  533. return ret;
  534. nobufs_unlock_dec:
  535. atomic_dec(&object->n_reads);
  536. wake_cookie = __fscache_unuse_cookie(cookie);
  537. nobufs_unlock:
  538. spin_unlock(&cookie->lock);
  539. fscache_put_retrieval(op);
  540. if (wake_cookie)
  541. __fscache_wake_unused_cookie(cookie);
  542. nobufs:
  543. fscache_stat(&fscache_n_retrievals_nobufs);
  544. _leave(" = -ENOBUFS");
  545. return -ENOBUFS;
  546. }
  547. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  548. /*
  549. * allocate a block in the cache on which to store a page
  550. * - we return:
  551. * -ENOMEM - out of memory, nothing done
  552. * -ERESTARTSYS - interrupted
  553. * -ENOBUFS - no backing object available in which to cache the block
  554. * 0 - block allocated
  555. */
  556. int __fscache_alloc_page(struct fscache_cookie *cookie,
  557. struct page *page,
  558. gfp_t gfp)
  559. {
  560. struct fscache_retrieval *op;
  561. struct fscache_object *object;
  562. bool wake_cookie = false;
  563. int ret;
  564. _enter("%p,%p,,,", cookie, page);
  565. fscache_stat(&fscache_n_allocs);
  566. if (hlist_empty(&cookie->backing_objects))
  567. goto nobufs;
  568. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  569. ASSERTCMP(page, !=, NULL);
  570. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  571. _leave(" = -ENOBUFS [invalidating]");
  572. return -ENOBUFS;
  573. }
  574. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  575. return -ERESTARTSYS;
  576. op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
  577. if (!op)
  578. return -ENOMEM;
  579. atomic_set(&op->n_pages, 1);
  580. spin_lock(&cookie->lock);
  581. if (!fscache_cookie_enabled(cookie) ||
  582. hlist_empty(&cookie->backing_objects))
  583. goto nobufs_unlock;
  584. object = hlist_entry(cookie->backing_objects.first,
  585. struct fscache_object, cookie_link);
  586. __fscache_use_cookie(cookie);
  587. if (fscache_submit_op(object, &op->op) < 0)
  588. goto nobufs_unlock_dec;
  589. spin_unlock(&cookie->lock);
  590. fscache_stat(&fscache_n_alloc_ops);
  591. ret = fscache_wait_for_operation_activation(
  592. object, &op->op,
  593. __fscache_stat(&fscache_n_alloc_op_waits),
  594. __fscache_stat(&fscache_n_allocs_object_dead));
  595. if (ret < 0)
  596. goto error;
  597. /* ask the cache to honour the operation */
  598. fscache_stat(&fscache_n_cop_allocate_page);
  599. ret = object->cache->ops->allocate_page(op, page, gfp);
  600. fscache_stat_d(&fscache_n_cop_allocate_page);
  601. error:
  602. if (ret == -ERESTARTSYS)
  603. fscache_stat(&fscache_n_allocs_intr);
  604. else if (ret < 0)
  605. fscache_stat(&fscache_n_allocs_nobufs);
  606. else
  607. fscache_stat(&fscache_n_allocs_ok);
  608. fscache_put_retrieval(op);
  609. _leave(" = %d", ret);
  610. return ret;
  611. nobufs_unlock_dec:
  612. wake_cookie = __fscache_unuse_cookie(cookie);
  613. nobufs_unlock:
  614. spin_unlock(&cookie->lock);
  615. fscache_put_retrieval(op);
  616. if (wake_cookie)
  617. __fscache_wake_unused_cookie(cookie);
  618. nobufs:
  619. fscache_stat(&fscache_n_allocs_nobufs);
  620. _leave(" = -ENOBUFS");
  621. return -ENOBUFS;
  622. }
  623. EXPORT_SYMBOL(__fscache_alloc_page);
  624. /*
  625. * Unmark pages allocate in the readahead code path (via:
  626. * fscache_readpages_or_alloc) after delegating to the base filesystem
  627. */
  628. void __fscache_readpages_cancel(struct fscache_cookie *cookie,
  629. struct list_head *pages)
  630. {
  631. struct page *page;
  632. list_for_each_entry(page, pages, lru) {
  633. if (PageFsCache(page))
  634. __fscache_uncache_page(cookie, page);
  635. }
  636. }
  637. EXPORT_SYMBOL(__fscache_readpages_cancel);
  638. /*
  639. * release a write op reference
  640. */
  641. static void fscache_release_write_op(struct fscache_operation *_op)
  642. {
  643. _enter("{OP%x}", _op->debug_id);
  644. }
  645. /*
  646. * perform the background storage of a page into the cache
  647. */
  648. static void fscache_write_op(struct fscache_operation *_op)
  649. {
  650. struct fscache_storage *op =
  651. container_of(_op, struct fscache_storage, op);
  652. struct fscache_object *object = op->op.object;
  653. struct fscache_cookie *cookie;
  654. struct page *page;
  655. unsigned n;
  656. void *results[1];
  657. int ret;
  658. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  659. again:
  660. spin_lock(&object->lock);
  661. cookie = object->cookie;
  662. if (!fscache_object_is_active(object)) {
  663. /* If we get here, then the on-disk cache object likely longer
  664. * exists, so we should just cancel this write operation.
  665. */
  666. spin_unlock(&object->lock);
  667. fscache_op_complete(&op->op, false);
  668. _leave(" [inactive]");
  669. return;
  670. }
  671. if (!cookie) {
  672. /* If we get here, then the cookie belonging to the object was
  673. * detached, probably by the cookie being withdrawn due to
  674. * memory pressure, which means that the pages we might write
  675. * to the cache from no longer exist - therefore, we can just
  676. * cancel this write operation.
  677. */
  678. spin_unlock(&object->lock);
  679. fscache_op_complete(&op->op, false);
  680. _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
  681. _op->flags, _op->state, object->state->short_name,
  682. object->flags);
  683. return;
  684. }
  685. spin_lock(&cookie->stores_lock);
  686. fscache_stat(&fscache_n_store_calls);
  687. /* find a page to store */
  688. page = NULL;
  689. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  690. FSCACHE_COOKIE_PENDING_TAG);
  691. if (n != 1)
  692. goto superseded;
  693. page = results[0];
  694. _debug("gang %d [%lx]", n, page->index);
  695. radix_tree_tag_set(&cookie->stores, page->index,
  696. FSCACHE_COOKIE_STORING_TAG);
  697. radix_tree_tag_clear(&cookie->stores, page->index,
  698. FSCACHE_COOKIE_PENDING_TAG);
  699. spin_unlock(&cookie->stores_lock);
  700. spin_unlock(&object->lock);
  701. if (page->index >= op->store_limit)
  702. goto discard_page;
  703. fscache_stat(&fscache_n_store_pages);
  704. fscache_stat(&fscache_n_cop_write_page);
  705. ret = object->cache->ops->write_page(op, page);
  706. fscache_stat_d(&fscache_n_cop_write_page);
  707. fscache_end_page_write(object, page);
  708. if (ret < 0) {
  709. fscache_abort_object(object);
  710. fscache_op_complete(&op->op, true);
  711. } else {
  712. fscache_enqueue_operation(&op->op);
  713. }
  714. _leave("");
  715. return;
  716. discard_page:
  717. fscache_stat(&fscache_n_store_pages_over_limit);
  718. fscache_end_page_write(object, page);
  719. goto again;
  720. superseded:
  721. /* this writer is going away and there aren't any more things to
  722. * write */
  723. _debug("cease");
  724. spin_unlock(&cookie->stores_lock);
  725. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  726. spin_unlock(&object->lock);
  727. fscache_op_complete(&op->op, true);
  728. _leave("");
  729. }
  730. /*
  731. * Clear the pages pending writing for invalidation
  732. */
  733. void fscache_invalidate_writes(struct fscache_cookie *cookie)
  734. {
  735. struct page *page;
  736. void *results[16];
  737. int n, i;
  738. _enter("");
  739. for (;;) {
  740. spin_lock(&cookie->stores_lock);
  741. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
  742. ARRAY_SIZE(results),
  743. FSCACHE_COOKIE_PENDING_TAG);
  744. if (n == 0) {
  745. spin_unlock(&cookie->stores_lock);
  746. break;
  747. }
  748. for (i = n - 1; i >= 0; i--) {
  749. page = results[i];
  750. radix_tree_delete(&cookie->stores, page->index);
  751. }
  752. spin_unlock(&cookie->stores_lock);
  753. for (i = n - 1; i >= 0; i--)
  754. page_cache_release(results[i]);
  755. }
  756. _leave("");
  757. }
  758. /*
  759. * request a page be stored in the cache
  760. * - returns:
  761. * -ENOMEM - out of memory, nothing done
  762. * -ENOBUFS - no backing object available in which to cache the page
  763. * 0 - dispatched a write - it'll call end_io_func() when finished
  764. *
  765. * if the cookie still has a backing object at this point, that object can be
  766. * in one of a few states with respect to storage processing:
  767. *
  768. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  769. * set)
  770. *
  771. * (a) no writes yet
  772. *
  773. * (b) writes deferred till post-creation (mark page for writing and
  774. * return immediately)
  775. *
  776. * (2) negative lookup, object created, initial fill being made from netfs
  777. *
  778. * (a) fill point not yet reached this page (mark page for writing and
  779. * return)
  780. *
  781. * (b) fill point passed this page (queue op to store this page)
  782. *
  783. * (3) object extant (queue op to store this page)
  784. *
  785. * any other state is invalid
  786. */
  787. int __fscache_write_page(struct fscache_cookie *cookie,
  788. struct page *page,
  789. gfp_t gfp)
  790. {
  791. struct fscache_storage *op;
  792. struct fscache_object *object;
  793. bool wake_cookie = false;
  794. int ret;
  795. _enter("%p,%x,", cookie, (u32) page->flags);
  796. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  797. ASSERT(PageFsCache(page));
  798. fscache_stat(&fscache_n_stores);
  799. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  800. _leave(" = -ENOBUFS [invalidating]");
  801. return -ENOBUFS;
  802. }
  803. op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
  804. if (!op)
  805. goto nomem;
  806. fscache_operation_init(&op->op, fscache_write_op, NULL,
  807. fscache_release_write_op);
  808. op->op.flags = FSCACHE_OP_ASYNC |
  809. (1 << FSCACHE_OP_WAITING) |
  810. (1 << FSCACHE_OP_UNUSE_COOKIE);
  811. ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
  812. if (ret < 0)
  813. goto nomem_free;
  814. ret = -ENOBUFS;
  815. spin_lock(&cookie->lock);
  816. if (!fscache_cookie_enabled(cookie) ||
  817. hlist_empty(&cookie->backing_objects))
  818. goto nobufs;
  819. object = hlist_entry(cookie->backing_objects.first,
  820. struct fscache_object, cookie_link);
  821. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  822. goto nobufs;
  823. /* add the page to the pending-storage radix tree on the backing
  824. * object */
  825. spin_lock(&object->lock);
  826. spin_lock(&cookie->stores_lock);
  827. _debug("store limit %llx", (unsigned long long) object->store_limit);
  828. ret = radix_tree_insert(&cookie->stores, page->index, page);
  829. if (ret < 0) {
  830. if (ret == -EEXIST)
  831. goto already_queued;
  832. _debug("insert failed %d", ret);
  833. goto nobufs_unlock_obj;
  834. }
  835. radix_tree_tag_set(&cookie->stores, page->index,
  836. FSCACHE_COOKIE_PENDING_TAG);
  837. page_cache_get(page);
  838. /* we only want one writer at a time, but we do need to queue new
  839. * writers after exclusive ops */
  840. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  841. goto already_pending;
  842. spin_unlock(&cookie->stores_lock);
  843. spin_unlock(&object->lock);
  844. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  845. op->store_limit = object->store_limit;
  846. __fscache_use_cookie(cookie);
  847. if (fscache_submit_op(object, &op->op) < 0)
  848. goto submit_failed;
  849. spin_unlock(&cookie->lock);
  850. radix_tree_preload_end();
  851. fscache_stat(&fscache_n_store_ops);
  852. fscache_stat(&fscache_n_stores_ok);
  853. /* the work queue now carries its own ref on the object */
  854. fscache_put_operation(&op->op);
  855. _leave(" = 0");
  856. return 0;
  857. already_queued:
  858. fscache_stat(&fscache_n_stores_again);
  859. already_pending:
  860. spin_unlock(&cookie->stores_lock);
  861. spin_unlock(&object->lock);
  862. spin_unlock(&cookie->lock);
  863. radix_tree_preload_end();
  864. fscache_put_operation(&op->op);
  865. fscache_stat(&fscache_n_stores_ok);
  866. _leave(" = 0");
  867. return 0;
  868. submit_failed:
  869. spin_lock(&cookie->stores_lock);
  870. radix_tree_delete(&cookie->stores, page->index);
  871. spin_unlock(&cookie->stores_lock);
  872. wake_cookie = __fscache_unuse_cookie(cookie);
  873. page_cache_release(page);
  874. ret = -ENOBUFS;
  875. goto nobufs;
  876. nobufs_unlock_obj:
  877. spin_unlock(&cookie->stores_lock);
  878. spin_unlock(&object->lock);
  879. nobufs:
  880. spin_unlock(&cookie->lock);
  881. radix_tree_preload_end();
  882. fscache_put_operation(&op->op);
  883. if (wake_cookie)
  884. __fscache_wake_unused_cookie(cookie);
  885. fscache_stat(&fscache_n_stores_nobufs);
  886. _leave(" = -ENOBUFS");
  887. return -ENOBUFS;
  888. nomem_free:
  889. fscache_put_operation(&op->op);
  890. nomem:
  891. fscache_stat(&fscache_n_stores_oom);
  892. _leave(" = -ENOMEM");
  893. return -ENOMEM;
  894. }
  895. EXPORT_SYMBOL(__fscache_write_page);
  896. /*
  897. * remove a page from the cache
  898. */
  899. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  900. {
  901. struct fscache_object *object;
  902. _enter(",%p", page);
  903. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  904. ASSERTCMP(page, !=, NULL);
  905. fscache_stat(&fscache_n_uncaches);
  906. /* cache withdrawal may beat us to it */
  907. if (!PageFsCache(page))
  908. goto done;
  909. /* get the object */
  910. spin_lock(&cookie->lock);
  911. if (hlist_empty(&cookie->backing_objects)) {
  912. ClearPageFsCache(page);
  913. goto done_unlock;
  914. }
  915. object = hlist_entry(cookie->backing_objects.first,
  916. struct fscache_object, cookie_link);
  917. /* there might now be stuff on disk we could read */
  918. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  919. /* only invoke the cache backend if we managed to mark the page
  920. * uncached here; this deals with synchronisation vs withdrawal */
  921. if (TestClearPageFsCache(page) &&
  922. object->cache->ops->uncache_page) {
  923. /* the cache backend releases the cookie lock */
  924. fscache_stat(&fscache_n_cop_uncache_page);
  925. object->cache->ops->uncache_page(object, page);
  926. fscache_stat_d(&fscache_n_cop_uncache_page);
  927. goto done;
  928. }
  929. done_unlock:
  930. spin_unlock(&cookie->lock);
  931. done:
  932. _leave("");
  933. }
  934. EXPORT_SYMBOL(__fscache_uncache_page);
  935. /**
  936. * fscache_mark_page_cached - Mark a page as being cached
  937. * @op: The retrieval op pages are being marked for
  938. * @page: The page to be marked
  939. *
  940. * Mark a netfs page as being cached. After this is called, the netfs
  941. * must call fscache_uncache_page() to remove the mark.
  942. */
  943. void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
  944. {
  945. struct fscache_cookie *cookie = op->op.object->cookie;
  946. #ifdef CONFIG_FSCACHE_STATS
  947. atomic_inc(&fscache_n_marks);
  948. #endif
  949. _debug("- mark %p{%lx}", page, page->index);
  950. if (TestSetPageFsCache(page)) {
  951. static bool once_only;
  952. if (!once_only) {
  953. once_only = true;
  954. pr_warn("Cookie type %s marked page %lx multiple times\n",
  955. cookie->def->name, page->index);
  956. }
  957. }
  958. if (cookie->def->mark_page_cached)
  959. cookie->def->mark_page_cached(cookie->netfs_data,
  960. op->mapping, page);
  961. }
  962. EXPORT_SYMBOL(fscache_mark_page_cached);
  963. /**
  964. * fscache_mark_pages_cached - Mark pages as being cached
  965. * @op: The retrieval op pages are being marked for
  966. * @pagevec: The pages to be marked
  967. *
  968. * Mark a bunch of netfs pages as being cached. After this is called,
  969. * the netfs must call fscache_uncache_page() to remove the mark.
  970. */
  971. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  972. struct pagevec *pagevec)
  973. {
  974. unsigned long loop;
  975. for (loop = 0; loop < pagevec->nr; loop++)
  976. fscache_mark_page_cached(op, pagevec->pages[loop]);
  977. pagevec_reinit(pagevec);
  978. }
  979. EXPORT_SYMBOL(fscache_mark_pages_cached);
  980. /*
  981. * Uncache all the pages in an inode that are marked PG_fscache, assuming them
  982. * to be associated with the given cookie.
  983. */
  984. void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
  985. struct inode *inode)
  986. {
  987. struct address_space *mapping = inode->i_mapping;
  988. struct pagevec pvec;
  989. pgoff_t next;
  990. int i;
  991. _enter("%p,%p", cookie, inode);
  992. if (!mapping || mapping->nrpages == 0) {
  993. _leave(" [no pages]");
  994. return;
  995. }
  996. pagevec_init(&pvec, 0);
  997. next = 0;
  998. do {
  999. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
  1000. break;
  1001. for (i = 0; i < pagevec_count(&pvec); i++) {
  1002. struct page *page = pvec.pages[i];
  1003. next = page->index;
  1004. if (PageFsCache(page)) {
  1005. __fscache_wait_on_page_write(cookie, page);
  1006. __fscache_uncache_page(cookie, page);
  1007. }
  1008. }
  1009. pagevec_release(&pvec);
  1010. cond_resched();
  1011. } while (++next);
  1012. _leave("");
  1013. }
  1014. EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);