object.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110
  1. /* FS-Cache object state machine handler
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * See Documentation/filesystems/caching/object.txt for a description of the
  12. * object state machine and the in-kernel representations.
  13. */
  14. #define FSCACHE_DEBUG_LEVEL COOKIE
  15. #include <linux/module.h>
  16. #include <linux/slab.h>
  17. #include <linux/prefetch.h>
  18. #include "internal.h"
  19. static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
  20. static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
  21. static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
  22. static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
  23. static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
  24. static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
  25. static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
  26. static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
  27. static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
  28. static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
  29. static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
  30. static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
  31. static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
  32. #define __STATE_NAME(n) fscache_osm_##n
  33. #define STATE(n) (&__STATE_NAME(n))
  34. /*
  35. * Define a work state. Work states are execution states. No event processing
  36. * is performed by them. The function attached to a work state returns a
  37. * pointer indicating the next state to which the state machine should
  38. * transition. Returning NO_TRANSIT repeats the current state, but goes back
  39. * to the scheduler first.
  40. */
  41. #define WORK_STATE(n, sn, f) \
  42. const struct fscache_state __STATE_NAME(n) = { \
  43. .name = #n, \
  44. .short_name = sn, \
  45. .work = f \
  46. }
  47. /*
  48. * Returns from work states.
  49. */
  50. #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
  51. #define NO_TRANSIT ((struct fscache_state *)NULL)
  52. /*
  53. * Define a wait state. Wait states are event processing states. No execution
  54. * is performed by them. Wait states are just tables of "if event X occurs,
  55. * clear it and transition to state Y". The dispatcher returns to the
  56. * scheduler if none of the events in which the wait state has an interest are
  57. * currently pending.
  58. */
  59. #define WAIT_STATE(n, sn, ...) \
  60. const struct fscache_state __STATE_NAME(n) = { \
  61. .name = #n, \
  62. .short_name = sn, \
  63. .work = NULL, \
  64. .transitions = { __VA_ARGS__, { 0, NULL } } \
  65. }
  66. #define TRANSIT_TO(state, emask) \
  67. { .events = (emask), .transit_to = STATE(state) }
  68. /*
  69. * The object state machine.
  70. */
  71. static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object);
  72. static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);
  73. static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);
  74. static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);
  75. static WORK_STATE(CREATE_OBJECT, "CRTO", fscache_look_up_object);
  76. static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);
  77. static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);
  78. static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object);
  79. static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object);
  80. static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
  81. static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
  82. static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
  83. static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
  84. static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
  85. static WAIT_STATE(WAIT_FOR_INIT, "?INI",
  86. TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
  87. static WAIT_STATE(WAIT_FOR_PARENT, "?PRN",
  88. TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY));
  89. static WAIT_STATE(WAIT_FOR_CMD, "?CMD",
  90. TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
  91. TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE),
  92. TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
  93. static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR",
  94. TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED));
  95. /*
  96. * Out-of-band event transition tables. These are for handling unexpected
  97. * events, such as an I/O error. If an OOB event occurs, the state machine
  98. * clears and disables the event and forces a transition to the nominated work
  99. * state (acurrently executing work states will complete first).
  100. *
  101. * In such a situation, object->state remembers the state the machine should
  102. * have been in/gone to and returning NO_TRANSIT returns to that.
  103. */
  104. static const struct fscache_transition fscache_osm_init_oob[] = {
  105. TRANSIT_TO(ABORT_INIT,
  106. (1 << FSCACHE_OBJECT_EV_ERROR) |
  107. (1 << FSCACHE_OBJECT_EV_KILL)),
  108. { 0, NULL }
  109. };
  110. static const struct fscache_transition fscache_osm_lookup_oob[] = {
  111. TRANSIT_TO(LOOKUP_FAILURE,
  112. (1 << FSCACHE_OBJECT_EV_ERROR) |
  113. (1 << FSCACHE_OBJECT_EV_KILL)),
  114. { 0, NULL }
  115. };
  116. static const struct fscache_transition fscache_osm_run_oob[] = {
  117. TRANSIT_TO(KILL_OBJECT,
  118. (1 << FSCACHE_OBJECT_EV_ERROR) |
  119. (1 << FSCACHE_OBJECT_EV_KILL)),
  120. { 0, NULL }
  121. };
  122. static int fscache_get_object(struct fscache_object *);
  123. static void fscache_put_object(struct fscache_object *);
  124. static bool fscache_enqueue_dependents(struct fscache_object *, int);
  125. static void fscache_dequeue_object(struct fscache_object *);
  126. /*
  127. * we need to notify the parent when an op completes that we had outstanding
  128. * upon it
  129. */
  130. static inline void fscache_done_parent_op(struct fscache_object *object)
  131. {
  132. struct fscache_object *parent = object->parent;
  133. _enter("OBJ%x {OBJ%x,%x}",
  134. object->debug_id, parent->debug_id, parent->n_ops);
  135. spin_lock_nested(&parent->lock, 1);
  136. parent->n_obj_ops--;
  137. parent->n_ops--;
  138. if (parent->n_ops == 0)
  139. fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
  140. spin_unlock(&parent->lock);
  141. }
  142. /*
  143. * Object state machine dispatcher.
  144. */
  145. static void fscache_object_sm_dispatcher(struct fscache_object *object)
  146. {
  147. const struct fscache_transition *t;
  148. const struct fscache_state *state, *new_state;
  149. unsigned long events, event_mask;
  150. int event = -1;
  151. ASSERT(object != NULL);
  152. _enter("{OBJ%x,%s,%lx}",
  153. object->debug_id, object->state->name, object->events);
  154. event_mask = object->event_mask;
  155. restart:
  156. object->event_mask = 0; /* Mask normal event handling */
  157. state = object->state;
  158. restart_masked:
  159. events = object->events;
  160. /* Handle any out-of-band events (typically an error) */
  161. if (events & object->oob_event_mask) {
  162. _debug("{OBJ%x} oob %lx",
  163. object->debug_id, events & object->oob_event_mask);
  164. for (t = object->oob_table; t->events; t++) {
  165. if (events & t->events) {
  166. state = t->transit_to;
  167. ASSERT(state->work != NULL);
  168. event = fls(events & t->events) - 1;
  169. __clear_bit(event, &object->oob_event_mask);
  170. clear_bit(event, &object->events);
  171. goto execute_work_state;
  172. }
  173. }
  174. }
  175. /* Wait states are just transition tables */
  176. if (!state->work) {
  177. if (events & event_mask) {
  178. for (t = state->transitions; t->events; t++) {
  179. if (events & t->events) {
  180. new_state = t->transit_to;
  181. event = fls(events & t->events) - 1;
  182. clear_bit(event, &object->events);
  183. _debug("{OBJ%x} ev %d: %s -> %s",
  184. object->debug_id, event,
  185. state->name, new_state->name);
  186. object->state = state = new_state;
  187. goto execute_work_state;
  188. }
  189. }
  190. /* The event mask didn't include all the tabled bits */
  191. BUG();
  192. }
  193. /* Randomly woke up */
  194. goto unmask_events;
  195. }
  196. execute_work_state:
  197. _debug("{OBJ%x} exec %s", object->debug_id, state->name);
  198. new_state = state->work(object, event);
  199. event = -1;
  200. if (new_state == NO_TRANSIT) {
  201. _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
  202. if (unlikely(state == STATE(OBJECT_DEAD))) {
  203. _leave(" [dead]");
  204. return;
  205. }
  206. fscache_enqueue_object(object);
  207. event_mask = object->oob_event_mask;
  208. goto unmask_events;
  209. }
  210. _debug("{OBJ%x} %s -> %s",
  211. object->debug_id, state->name, new_state->name);
  212. object->state = state = new_state;
  213. if (state->work) {
  214. if (unlikely(state == STATE(OBJECT_DEAD))) {
  215. _leave(" [dead]");
  216. return;
  217. }
  218. goto restart_masked;
  219. }
  220. /* Transited to wait state */
  221. event_mask = object->oob_event_mask;
  222. for (t = state->transitions; t->events; t++)
  223. event_mask |= t->events;
  224. unmask_events:
  225. object->event_mask = event_mask;
  226. smp_mb();
  227. events = object->events;
  228. if (events & event_mask)
  229. goto restart;
  230. _leave(" [msk %lx]", event_mask);
  231. }
  232. /*
  233. * execute an object
  234. */
  235. static void fscache_object_work_func(struct work_struct *work)
  236. {
  237. struct fscache_object *object =
  238. container_of(work, struct fscache_object, work);
  239. unsigned long start;
  240. _enter("{OBJ%x}", object->debug_id);
  241. start = jiffies;
  242. fscache_object_sm_dispatcher(object);
  243. fscache_hist(fscache_objs_histogram, start);
  244. fscache_put_object(object);
  245. }
  246. /**
  247. * fscache_object_init - Initialise a cache object description
  248. * @object: Object description
  249. * @cookie: Cookie object will be attached to
  250. * @cache: Cache in which backing object will be found
  251. *
  252. * Initialise a cache object description to its basic values.
  253. *
  254. * See Documentation/filesystems/caching/backend-api.txt for a complete
  255. * description.
  256. */
  257. void fscache_object_init(struct fscache_object *object,
  258. struct fscache_cookie *cookie,
  259. struct fscache_cache *cache)
  260. {
  261. const struct fscache_transition *t;
  262. atomic_inc(&cache->object_count);
  263. object->state = STATE(WAIT_FOR_INIT);
  264. object->oob_table = fscache_osm_init_oob;
  265. object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
  266. spin_lock_init(&object->lock);
  267. INIT_LIST_HEAD(&object->cache_link);
  268. INIT_HLIST_NODE(&object->cookie_link);
  269. INIT_WORK(&object->work, fscache_object_work_func);
  270. INIT_LIST_HEAD(&object->dependents);
  271. INIT_LIST_HEAD(&object->dep_link);
  272. INIT_LIST_HEAD(&object->pending_ops);
  273. object->n_children = 0;
  274. object->n_ops = object->n_in_progress = object->n_exclusive = 0;
  275. object->events = 0;
  276. object->store_limit = 0;
  277. object->store_limit_l = 0;
  278. object->cache = cache;
  279. object->cookie = cookie;
  280. object->parent = NULL;
  281. #ifdef CONFIG_FSCACHE_OBJECT_LIST
  282. RB_CLEAR_NODE(&object->objlist_link);
  283. #endif
  284. object->oob_event_mask = 0;
  285. for (t = object->oob_table; t->events; t++)
  286. object->oob_event_mask |= t->events;
  287. object->event_mask = object->oob_event_mask;
  288. for (t = object->state->transitions; t->events; t++)
  289. object->event_mask |= t->events;
  290. }
  291. EXPORT_SYMBOL(fscache_object_init);
  292. /*
  293. * Mark the object as no longer being live, making sure that we synchronise
  294. * against op submission.
  295. */
  296. static inline void fscache_mark_object_dead(struct fscache_object *object)
  297. {
  298. spin_lock(&object->lock);
  299. clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
  300. spin_unlock(&object->lock);
  301. }
  302. /*
  303. * Abort object initialisation before we start it.
  304. */
  305. static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
  306. int event)
  307. {
  308. _enter("{OBJ%x},%d", object->debug_id, event);
  309. object->oob_event_mask = 0;
  310. fscache_dequeue_object(object);
  311. return transit_to(KILL_OBJECT);
  312. }
  313. /*
  314. * initialise an object
  315. * - check the specified object's parent to see if we can make use of it
  316. * immediately to do a creation
  317. * - we may need to start the process of creating a parent and we need to wait
  318. * for the parent's lookup and creation to complete if it's not there yet
  319. */
  320. static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
  321. int event)
  322. {
  323. struct fscache_object *parent;
  324. bool success;
  325. _enter("{OBJ%x},%d", object->debug_id, event);
  326. ASSERT(list_empty(&object->dep_link));
  327. parent = object->parent;
  328. if (!parent) {
  329. _leave(" [no parent]");
  330. return transit_to(DROP_OBJECT);
  331. }
  332. _debug("parent: %s of:%lx", parent->state->name, parent->flags);
  333. if (fscache_object_is_dying(parent)) {
  334. _leave(" [bad parent]");
  335. return transit_to(DROP_OBJECT);
  336. }
  337. if (fscache_object_is_available(parent)) {
  338. _leave(" [ready]");
  339. return transit_to(PARENT_READY);
  340. }
  341. _debug("wait");
  342. spin_lock(&parent->lock);
  343. fscache_stat(&fscache_n_cop_grab_object);
  344. success = false;
  345. if (fscache_object_is_live(parent) &&
  346. object->cache->ops->grab_object(object)) {
  347. list_add(&object->dep_link, &parent->dependents);
  348. success = true;
  349. }
  350. fscache_stat_d(&fscache_n_cop_grab_object);
  351. spin_unlock(&parent->lock);
  352. if (!success) {
  353. _leave(" [grab failed]");
  354. return transit_to(DROP_OBJECT);
  355. }
  356. /* fscache_acquire_non_index_cookie() uses this
  357. * to wake the chain up */
  358. fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
  359. _leave(" [wait]");
  360. return transit_to(WAIT_FOR_PARENT);
  361. }
  362. /*
  363. * Once the parent object is ready, we should kick off our lookup op.
  364. */
  365. static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
  366. int event)
  367. {
  368. struct fscache_object *parent = object->parent;
  369. _enter("{OBJ%x},%d", object->debug_id, event);
  370. ASSERT(parent != NULL);
  371. spin_lock(&parent->lock);
  372. parent->n_ops++;
  373. parent->n_obj_ops++;
  374. object->lookup_jif = jiffies;
  375. spin_unlock(&parent->lock);
  376. _leave("");
  377. return transit_to(LOOK_UP_OBJECT);
  378. }
  379. /*
  380. * look an object up in the cache from which it was allocated
  381. * - we hold an "access lock" on the parent object, so the parent object cannot
  382. * be withdrawn by either party till we've finished
  383. */
  384. static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
  385. int event)
  386. {
  387. struct fscache_cookie *cookie = object->cookie;
  388. struct fscache_object *parent = object->parent;
  389. int ret;
  390. _enter("{OBJ%x},%d", object->debug_id, event);
  391. object->oob_table = fscache_osm_lookup_oob;
  392. ASSERT(parent != NULL);
  393. ASSERTCMP(parent->n_ops, >, 0);
  394. ASSERTCMP(parent->n_obj_ops, >, 0);
  395. /* make sure the parent is still available */
  396. ASSERT(fscache_object_is_available(parent));
  397. if (fscache_object_is_dying(parent) ||
  398. test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
  399. !fscache_use_cookie(object)) {
  400. _leave(" [unavailable]");
  401. return transit_to(LOOKUP_FAILURE);
  402. }
  403. _debug("LOOKUP \"%s\" in \"%s\"",
  404. cookie->def->name, object->cache->tag->name);
  405. fscache_stat(&fscache_n_object_lookups);
  406. fscache_stat(&fscache_n_cop_lookup_object);
  407. ret = object->cache->ops->lookup_object(object);
  408. fscache_stat_d(&fscache_n_cop_lookup_object);
  409. fscache_unuse_cookie(object);
  410. if (ret == -ETIMEDOUT) {
  411. /* probably stuck behind another object, so move this one to
  412. * the back of the queue */
  413. fscache_stat(&fscache_n_object_lookups_timed_out);
  414. _leave(" [timeout]");
  415. return NO_TRANSIT;
  416. }
  417. if (ret < 0) {
  418. _leave(" [error]");
  419. return transit_to(LOOKUP_FAILURE);
  420. }
  421. _leave(" [ok]");
  422. return transit_to(OBJECT_AVAILABLE);
  423. }
  424. /**
  425. * fscache_object_lookup_negative - Note negative cookie lookup
  426. * @object: Object pointing to cookie to mark
  427. *
  428. * Note negative lookup, permitting those waiting to read data from an already
  429. * existing backing object to continue as there's no data for them to read.
  430. */
  431. void fscache_object_lookup_negative(struct fscache_object *object)
  432. {
  433. struct fscache_cookie *cookie = object->cookie;
  434. _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
  435. if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
  436. fscache_stat(&fscache_n_object_lookups_negative);
  437. /* Allow write requests to begin stacking up and read requests to begin
  438. * returning ENODATA.
  439. */
  440. set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  441. clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  442. _debug("wake up lookup %p", &cookie->flags);
  443. clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
  444. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  445. }
  446. _leave("");
  447. }
  448. EXPORT_SYMBOL(fscache_object_lookup_negative);
  449. /**
  450. * fscache_obtained_object - Note successful object lookup or creation
  451. * @object: Object pointing to cookie to mark
  452. *
  453. * Note successful lookup and/or creation, permitting those waiting to write
  454. * data to a backing object to continue.
  455. *
  456. * Note that after calling this, an object's cookie may be relinquished by the
  457. * netfs, and so must be accessed with object lock held.
  458. */
  459. void fscache_obtained_object(struct fscache_object *object)
  460. {
  461. struct fscache_cookie *cookie = object->cookie;
  462. _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
  463. /* if we were still looking up, then we must have a positive lookup
  464. * result, in which case there may be data available */
  465. if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
  466. fscache_stat(&fscache_n_object_lookups_positive);
  467. /* We do (presumably) have data */
  468. clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  469. clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  470. /* Allow write requests to begin stacking up and read requests
  471. * to begin shovelling data.
  472. */
  473. clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
  474. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  475. } else {
  476. fscache_stat(&fscache_n_object_created);
  477. }
  478. set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
  479. _leave("");
  480. }
  481. EXPORT_SYMBOL(fscache_obtained_object);
  482. /*
  483. * handle an object that has just become available
  484. */
  485. static const struct fscache_state *fscache_object_available(struct fscache_object *object,
  486. int event)
  487. {
  488. _enter("{OBJ%x},%d", object->debug_id, event);
  489. object->oob_table = fscache_osm_run_oob;
  490. spin_lock(&object->lock);
  491. fscache_done_parent_op(object);
  492. if (object->n_in_progress == 0) {
  493. if (object->n_ops > 0) {
  494. ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
  495. fscache_start_operations(object);
  496. } else {
  497. ASSERT(list_empty(&object->pending_ops));
  498. }
  499. }
  500. spin_unlock(&object->lock);
  501. fscache_stat(&fscache_n_cop_lookup_complete);
  502. object->cache->ops->lookup_complete(object);
  503. fscache_stat_d(&fscache_n_cop_lookup_complete);
  504. fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
  505. fscache_stat(&fscache_n_object_avail);
  506. _leave("");
  507. return transit_to(JUMPSTART_DEPS);
  508. }
  509. /*
  510. * Wake up this object's dependent objects now that we've become available.
  511. */
  512. static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
  513. int event)
  514. {
  515. _enter("{OBJ%x},%d", object->debug_id, event);
  516. if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
  517. return NO_TRANSIT; /* Not finished; requeue */
  518. return transit_to(WAIT_FOR_CMD);
  519. }
  520. /*
  521. * Handle lookup or creation failute.
  522. */
  523. static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
  524. int event)
  525. {
  526. struct fscache_cookie *cookie;
  527. _enter("{OBJ%x},%d", object->debug_id, event);
  528. object->oob_event_mask = 0;
  529. fscache_stat(&fscache_n_cop_lookup_complete);
  530. object->cache->ops->lookup_complete(object);
  531. fscache_stat_d(&fscache_n_cop_lookup_complete);
  532. set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
  533. cookie = object->cookie;
  534. set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
  535. if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
  536. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  537. fscache_done_parent_op(object);
  538. return transit_to(KILL_OBJECT);
  539. }
  540. /*
  541. * Wait for completion of all active operations on this object and the death of
  542. * all child objects of this object.
  543. */
  544. static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
  545. int event)
  546. {
  547. _enter("{OBJ%x,%d,%d},%d",
  548. object->debug_id, object->n_ops, object->n_children, event);
  549. fscache_mark_object_dead(object);
  550. object->oob_event_mask = 0;
  551. if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
  552. /* Reject any new read/write ops and abort any that are pending. */
  553. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  554. fscache_cancel_all_ops(object);
  555. }
  556. if (list_empty(&object->dependents) &&
  557. object->n_ops == 0 &&
  558. object->n_children == 0)
  559. return transit_to(DROP_OBJECT);
  560. if (object->n_in_progress == 0) {
  561. spin_lock(&object->lock);
  562. if (object->n_ops > 0 && object->n_in_progress == 0)
  563. fscache_start_operations(object);
  564. spin_unlock(&object->lock);
  565. }
  566. if (!list_empty(&object->dependents))
  567. return transit_to(KILL_DEPENDENTS);
  568. return transit_to(WAIT_FOR_CLEARANCE);
  569. }
  570. /*
  571. * Kill dependent objects.
  572. */
  573. static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
  574. int event)
  575. {
  576. _enter("{OBJ%x},%d", object->debug_id, event);
  577. if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
  578. return NO_TRANSIT; /* Not finished */
  579. return transit_to(WAIT_FOR_CLEARANCE);
  580. }
  581. /*
  582. * Drop an object's attachments
  583. */
  584. static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
  585. int event)
  586. {
  587. struct fscache_object *parent = object->parent;
  588. struct fscache_cookie *cookie = object->cookie;
  589. struct fscache_cache *cache = object->cache;
  590. bool awaken = false;
  591. _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
  592. ASSERT(cookie != NULL);
  593. ASSERT(!hlist_unhashed(&object->cookie_link));
  594. /* Make sure the cookie no longer points here and that the netfs isn't
  595. * waiting for us.
  596. */
  597. spin_lock(&cookie->lock);
  598. hlist_del_init(&object->cookie_link);
  599. if (hlist_empty(&cookie->backing_objects) &&
  600. test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
  601. awaken = true;
  602. spin_unlock(&cookie->lock);
  603. if (awaken)
  604. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
  605. if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
  606. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
  607. /* Prevent a race with our last child, which has to signal EV_CLEARED
  608. * before dropping our spinlock.
  609. */
  610. spin_lock(&object->lock);
  611. spin_unlock(&object->lock);
  612. /* Discard from the cache's collection of objects */
  613. spin_lock(&cache->object_list_lock);
  614. list_del_init(&object->cache_link);
  615. spin_unlock(&cache->object_list_lock);
  616. fscache_stat(&fscache_n_cop_drop_object);
  617. cache->ops->drop_object(object);
  618. fscache_stat_d(&fscache_n_cop_drop_object);
  619. /* The parent object wants to know when all it dependents have gone */
  620. if (parent) {
  621. _debug("release parent OBJ%x {%d}",
  622. parent->debug_id, parent->n_children);
  623. spin_lock(&parent->lock);
  624. parent->n_children--;
  625. if (parent->n_children == 0)
  626. fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
  627. spin_unlock(&parent->lock);
  628. object->parent = NULL;
  629. }
  630. /* this just shifts the object release to the work processor */
  631. fscache_put_object(object);
  632. fscache_stat(&fscache_n_object_dead);
  633. _leave("");
  634. return transit_to(OBJECT_DEAD);
  635. }
  636. /*
  637. * get a ref on an object
  638. */
  639. static int fscache_get_object(struct fscache_object *object)
  640. {
  641. int ret;
  642. fscache_stat(&fscache_n_cop_grab_object);
  643. ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
  644. fscache_stat_d(&fscache_n_cop_grab_object);
  645. return ret;
  646. }
  647. /*
  648. * Discard a ref on an object
  649. */
  650. static void fscache_put_object(struct fscache_object *object)
  651. {
  652. fscache_stat(&fscache_n_cop_put_object);
  653. object->cache->ops->put_object(object);
  654. fscache_stat_d(&fscache_n_cop_put_object);
  655. }
  656. /**
  657. * fscache_object_destroy - Note that a cache object is about to be destroyed
  658. * @object: The object to be destroyed
  659. *
  660. * Note the imminent destruction and deallocation of a cache object record.
  661. */
  662. void fscache_object_destroy(struct fscache_object *object)
  663. {
  664. fscache_objlist_remove(object);
  665. /* We can get rid of the cookie now */
  666. fscache_cookie_put(object->cookie);
  667. object->cookie = NULL;
  668. }
  669. EXPORT_SYMBOL(fscache_object_destroy);
  670. /*
  671. * enqueue an object for metadata-type processing
  672. */
  673. void fscache_enqueue_object(struct fscache_object *object)
  674. {
  675. _enter("{OBJ%x}", object->debug_id);
  676. if (fscache_get_object(object) >= 0) {
  677. wait_queue_head_t *cong_wq =
  678. &get_cpu_var(fscache_object_cong_wait);
  679. if (queue_work(fscache_object_wq, &object->work)) {
  680. if (fscache_object_congested())
  681. wake_up(cong_wq);
  682. } else
  683. fscache_put_object(object);
  684. put_cpu_var(fscache_object_cong_wait);
  685. }
  686. }
  687. /**
  688. * fscache_object_sleep_till_congested - Sleep until object wq is congested
  689. * @timeoutp: Scheduler sleep timeout
  690. *
  691. * Allow an object handler to sleep until the object workqueue is congested.
  692. *
  693. * The caller must set up a wake up event before calling this and must have set
  694. * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
  695. * condition before calling this function as no test is made here.
  696. *
  697. * %true is returned if the object wq is congested, %false otherwise.
  698. */
  699. bool fscache_object_sleep_till_congested(signed long *timeoutp)
  700. {
  701. wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
  702. DEFINE_WAIT(wait);
  703. if (fscache_object_congested())
  704. return true;
  705. add_wait_queue_exclusive(cong_wq, &wait);
  706. if (!fscache_object_congested())
  707. *timeoutp = schedule_timeout(*timeoutp);
  708. finish_wait(cong_wq, &wait);
  709. return fscache_object_congested();
  710. }
  711. EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
  712. /*
  713. * Enqueue the dependents of an object for metadata-type processing.
  714. *
  715. * If we don't manage to finish the list before the scheduler wants to run
  716. * again then return false immediately. We return true if the list was
  717. * cleared.
  718. */
  719. static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
  720. {
  721. struct fscache_object *dep;
  722. bool ret = true;
  723. _enter("{OBJ%x}", object->debug_id);
  724. if (list_empty(&object->dependents))
  725. return true;
  726. spin_lock(&object->lock);
  727. while (!list_empty(&object->dependents)) {
  728. dep = list_entry(object->dependents.next,
  729. struct fscache_object, dep_link);
  730. list_del_init(&dep->dep_link);
  731. fscache_raise_event(dep, event);
  732. fscache_put_object(dep);
  733. if (!list_empty(&object->dependents) && need_resched()) {
  734. ret = false;
  735. break;
  736. }
  737. }
  738. spin_unlock(&object->lock);
  739. return ret;
  740. }
  741. /*
  742. * remove an object from whatever queue it's waiting on
  743. */
  744. static void fscache_dequeue_object(struct fscache_object *object)
  745. {
  746. _enter("{OBJ%x}", object->debug_id);
  747. if (!list_empty(&object->dep_link)) {
  748. spin_lock(&object->parent->lock);
  749. list_del_init(&object->dep_link);
  750. spin_unlock(&object->parent->lock);
  751. }
  752. _leave("");
  753. }
  754. /**
  755. * fscache_check_aux - Ask the netfs whether an object on disk is still valid
  756. * @object: The object to ask about
  757. * @data: The auxiliary data for the object
  758. * @datalen: The size of the auxiliary data
  759. *
  760. * This function consults the netfs about the coherency state of an object.
  761. * The caller must be holding a ref on cookie->n_active (held by
  762. * fscache_look_up_object() on behalf of the cache backend during object lookup
  763. * and creation).
  764. */
  765. enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
  766. const void *data, uint16_t datalen)
  767. {
  768. enum fscache_checkaux result;
  769. if (!object->cookie->def->check_aux) {
  770. fscache_stat(&fscache_n_checkaux_none);
  771. return FSCACHE_CHECKAUX_OKAY;
  772. }
  773. result = object->cookie->def->check_aux(object->cookie->netfs_data,
  774. data, datalen);
  775. switch (result) {
  776. /* entry okay as is */
  777. case FSCACHE_CHECKAUX_OKAY:
  778. fscache_stat(&fscache_n_checkaux_okay);
  779. break;
  780. /* entry requires update */
  781. case FSCACHE_CHECKAUX_NEEDS_UPDATE:
  782. fscache_stat(&fscache_n_checkaux_update);
  783. break;
  784. /* entry requires deletion */
  785. case FSCACHE_CHECKAUX_OBSOLETE:
  786. fscache_stat(&fscache_n_checkaux_obsolete);
  787. break;
  788. default:
  789. BUG();
  790. }
  791. return result;
  792. }
  793. EXPORT_SYMBOL(fscache_check_aux);
  794. /*
  795. * Asynchronously invalidate an object.
  796. */
  797. static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
  798. int event)
  799. {
  800. struct fscache_operation *op;
  801. struct fscache_cookie *cookie = object->cookie;
  802. _enter("{OBJ%x},%d", object->debug_id, event);
  803. /* We're going to need the cookie. If the cookie is not available then
  804. * retire the object instead.
  805. */
  806. if (!fscache_use_cookie(object)) {
  807. ASSERT(object->cookie->stores.rnode == NULL);
  808. set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
  809. _leave(" [no cookie]");
  810. return transit_to(KILL_OBJECT);
  811. }
  812. /* Reject any new read/write ops and abort any that are pending. */
  813. fscache_invalidate_writes(cookie);
  814. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  815. fscache_cancel_all_ops(object);
  816. /* Now we have to wait for in-progress reads and writes */
  817. op = kzalloc(sizeof(*op), GFP_KERNEL);
  818. if (!op)
  819. goto nomem;
  820. fscache_operation_init(op, object->cache->ops->invalidate_object,
  821. NULL, NULL);
  822. op->flags = FSCACHE_OP_ASYNC |
  823. (1 << FSCACHE_OP_EXCLUSIVE) |
  824. (1 << FSCACHE_OP_UNUSE_COOKIE);
  825. spin_lock(&cookie->lock);
  826. if (fscache_submit_exclusive_op(object, op) < 0)
  827. goto submit_op_failed;
  828. spin_unlock(&cookie->lock);
  829. fscache_put_operation(op);
  830. /* Once we've completed the invalidation, we know there will be no data
  831. * stored in the cache and thus we can reinstate the data-check-skip
  832. * optimisation.
  833. */
  834. set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  835. /* We can allow read and write requests to come in once again. They'll
  836. * queue up behind our exclusive invalidation operation.
  837. */
  838. if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
  839. wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
  840. _leave(" [ok]");
  841. return transit_to(UPDATE_OBJECT);
  842. nomem:
  843. fscache_mark_object_dead(object);
  844. fscache_unuse_cookie(object);
  845. _leave(" [ENOMEM]");
  846. return transit_to(KILL_OBJECT);
  847. submit_op_failed:
  848. fscache_mark_object_dead(object);
  849. spin_unlock(&cookie->lock);
  850. fscache_unuse_cookie(object);
  851. kfree(op);
  852. _leave(" [EIO]");
  853. return transit_to(KILL_OBJECT);
  854. }
  855. static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
  856. int event)
  857. {
  858. const struct fscache_state *s;
  859. fscache_stat(&fscache_n_invalidates_run);
  860. fscache_stat(&fscache_n_cop_invalidate_object);
  861. s = _fscache_invalidate_object(object, event);
  862. fscache_stat_d(&fscache_n_cop_invalidate_object);
  863. return s;
  864. }
  865. /*
  866. * Asynchronously update an object.
  867. */
  868. static const struct fscache_state *fscache_update_object(struct fscache_object *object,
  869. int event)
  870. {
  871. _enter("{OBJ%x},%d", object->debug_id, event);
  872. fscache_stat(&fscache_n_updates_run);
  873. fscache_stat(&fscache_n_cop_update_object);
  874. object->cache->ops->update_object(object);
  875. fscache_stat_d(&fscache_n_cop_update_object);
  876. _leave("");
  877. return transit_to(WAIT_FOR_CMD);
  878. }
  879. /**
  880. * fscache_object_retrying_stale - Note retrying stale object
  881. * @object: The object that will be retried
  882. *
  883. * Note that an object lookup found an on-disk object that was adjudged to be
  884. * stale and has been deleted. The lookup will be retried.
  885. */
  886. void fscache_object_retrying_stale(struct fscache_object *object)
  887. {
  888. fscache_stat(&fscache_n_cache_no_space_reject);
  889. }
  890. EXPORT_SYMBOL(fscache_object_retrying_stale);
  891. /**
  892. * fscache_object_mark_killed - Note that an object was killed
  893. * @object: The object that was culled
  894. * @why: The reason the object was killed.
  895. *
  896. * Note that an object was killed. Returns true if the object was
  897. * already marked killed, false if it wasn't.
  898. */
  899. void fscache_object_mark_killed(struct fscache_object *object,
  900. enum fscache_why_object_killed why)
  901. {
  902. if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
  903. pr_err("Error: Object already killed by cache [%s]\n",
  904. object->cache->identifier);
  905. return;
  906. }
  907. switch (why) {
  908. case FSCACHE_OBJECT_NO_SPACE:
  909. fscache_stat(&fscache_n_cache_no_space_reject);
  910. break;
  911. case FSCACHE_OBJECT_IS_STALE:
  912. fscache_stat(&fscache_n_cache_stale_objects);
  913. break;
  914. case FSCACHE_OBJECT_WAS_RETIRED:
  915. fscache_stat(&fscache_n_cache_retired_objects);
  916. break;
  917. case FSCACHE_OBJECT_WAS_CULLED:
  918. fscache_stat(&fscache_n_cache_culled_objects);
  919. break;
  920. }
  921. }
  922. EXPORT_SYMBOL(fscache_object_mark_killed);
  923. /*
  924. * The object is dead. We can get here if an object gets queued by an event
  925. * that would lead to its death (such as EV_KILL) when the dispatcher is
  926. * already running (and so can be requeued) but hasn't yet cleared the event
  927. * mask.
  928. */
  929. static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
  930. int event)
  931. {
  932. if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
  933. &object->flags))
  934. return NO_TRANSIT;
  935. WARN(true, "FS-Cache object redispatched after death");
  936. return NO_TRANSIT;
  937. }