callback.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. /*
  2. * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
  3. *
  4. * This software may be freely redistributed under the terms of the
  5. * GNU General Public License.
  6. *
  7. * You should have received a copy of the GNU General Public License
  8. * along with this program; if not, write to the Free Software
  9. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  10. *
  11. * Authors: David Woodhouse <dwmw2@infradead.org>
  12. * David Howells <dhowells@redhat.com>
  13. *
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/init.h>
  18. #include <linux/circ_buf.h>
  19. #include <linux/sched.h>
  20. #include "internal.h"
  21. #if 0
  22. unsigned afs_vnode_update_timeout = 10;
  23. #endif /* 0 */
  24. #define afs_breakring_space(server) \
  25. CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail, \
  26. ARRAY_SIZE((server)->cb_break))
  27. //static void afs_callback_updater(struct work_struct *);
  28. static struct workqueue_struct *afs_callback_update_worker;
  29. /*
  30. * allow the fileserver to request callback state (re-)initialisation
  31. */
  32. void afs_init_callback_state(struct afs_server *server)
  33. {
  34. struct afs_vnode *vnode;
  35. _enter("{%p}", server);
  36. spin_lock(&server->cb_lock);
  37. /* kill all the promises on record from this server */
  38. while (!RB_EMPTY_ROOT(&server->cb_promises)) {
  39. vnode = rb_entry(server->cb_promises.rb_node,
  40. struct afs_vnode, cb_promise);
  41. _debug("UNPROMISE { vid=%x:%u uq=%u}",
  42. vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
  43. rb_erase(&vnode->cb_promise, &server->cb_promises);
  44. vnode->cb_promised = false;
  45. }
  46. spin_unlock(&server->cb_lock);
  47. _leave("");
  48. }
  49. /*
  50. * handle the data invalidation side of a callback being broken
  51. */
  52. void afs_broken_callback_work(struct work_struct *work)
  53. {
  54. struct afs_vnode *vnode =
  55. container_of(work, struct afs_vnode, cb_broken_work);
  56. _enter("");
  57. if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
  58. return;
  59. /* we're only interested in dealing with a broken callback on *this*
  60. * vnode and only if no-one else has dealt with it yet */
  61. if (!mutex_trylock(&vnode->validate_lock))
  62. return; /* someone else is dealing with it */
  63. if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
  64. if (S_ISDIR(vnode->vfs_inode.i_mode))
  65. afs_clear_permits(vnode);
  66. if (afs_vnode_fetch_status(vnode, NULL, NULL) < 0)
  67. goto out;
  68. if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
  69. goto out;
  70. /* if the vnode's data version number changed then its contents
  71. * are different */
  72. if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
  73. afs_zap_data(vnode);
  74. }
  75. out:
  76. mutex_unlock(&vnode->validate_lock);
  77. /* avoid the potential race whereby the mutex_trylock() in this
  78. * function happens again between the clear_bit() and the
  79. * mutex_unlock() */
  80. if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
  81. _debug("requeue");
  82. queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
  83. }
  84. _leave("");
  85. }
  86. /*
  87. * actually break a callback
  88. */
  89. static void afs_break_callback(struct afs_server *server,
  90. struct afs_vnode *vnode)
  91. {
  92. _enter("");
  93. set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
  94. if (vnode->cb_promised) {
  95. spin_lock(&vnode->lock);
  96. _debug("break callback");
  97. spin_lock(&server->cb_lock);
  98. if (vnode->cb_promised) {
  99. rb_erase(&vnode->cb_promise, &server->cb_promises);
  100. vnode->cb_promised = false;
  101. }
  102. spin_unlock(&server->cb_lock);
  103. queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
  104. if (list_empty(&vnode->granted_locks) &&
  105. !list_empty(&vnode->pending_locks))
  106. afs_lock_may_be_available(vnode);
  107. spin_unlock(&vnode->lock);
  108. }
  109. }
  110. /*
  111. * allow the fileserver to explicitly break one callback
  112. * - happens when
  113. * - the backing file is changed
  114. * - a lock is released
  115. */
  116. static void afs_break_one_callback(struct afs_server *server,
  117. struct afs_fid *fid)
  118. {
  119. struct afs_vnode *vnode;
  120. struct rb_node *p;
  121. _debug("find");
  122. spin_lock(&server->fs_lock);
  123. p = server->fs_vnodes.rb_node;
  124. while (p) {
  125. vnode = rb_entry(p, struct afs_vnode, server_rb);
  126. if (fid->vid < vnode->fid.vid)
  127. p = p->rb_left;
  128. else if (fid->vid > vnode->fid.vid)
  129. p = p->rb_right;
  130. else if (fid->vnode < vnode->fid.vnode)
  131. p = p->rb_left;
  132. else if (fid->vnode > vnode->fid.vnode)
  133. p = p->rb_right;
  134. else if (fid->unique < vnode->fid.unique)
  135. p = p->rb_left;
  136. else if (fid->unique > vnode->fid.unique)
  137. p = p->rb_right;
  138. else
  139. goto found;
  140. }
  141. /* not found so we just ignore it (it may have moved to another
  142. * server) */
  143. not_available:
  144. _debug("not avail");
  145. spin_unlock(&server->fs_lock);
  146. _leave("");
  147. return;
  148. found:
  149. _debug("found");
  150. ASSERTCMP(server, ==, vnode->server);
  151. if (!igrab(AFS_VNODE_TO_I(vnode)))
  152. goto not_available;
  153. spin_unlock(&server->fs_lock);
  154. afs_break_callback(server, vnode);
  155. iput(&vnode->vfs_inode);
  156. _leave("");
  157. }
  158. /*
  159. * allow the fileserver to break callback promises
  160. */
  161. void afs_break_callbacks(struct afs_server *server, size_t count,
  162. struct afs_callback callbacks[])
  163. {
  164. _enter("%p,%zu,", server, count);
  165. ASSERT(server != NULL);
  166. ASSERTCMP(count, <=, AFSCBMAX);
  167. for (; count > 0; callbacks++, count--) {
  168. _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }",
  169. callbacks->fid.vid,
  170. callbacks->fid.vnode,
  171. callbacks->fid.unique,
  172. callbacks->version,
  173. callbacks->expiry,
  174. callbacks->type
  175. );
  176. afs_break_one_callback(server, &callbacks->fid);
  177. }
  178. _leave("");
  179. return;
  180. }
  181. /*
  182. * record the callback for breaking
  183. * - the caller must hold server->cb_lock
  184. */
  185. static void afs_do_give_up_callback(struct afs_server *server,
  186. struct afs_vnode *vnode)
  187. {
  188. struct afs_callback *cb;
  189. _enter("%p,%p", server, vnode);
  190. cb = &server->cb_break[server->cb_break_head];
  191. cb->fid = vnode->fid;
  192. cb->version = vnode->cb_version;
  193. cb->expiry = vnode->cb_expiry;
  194. cb->type = vnode->cb_type;
  195. smp_wmb();
  196. server->cb_break_head =
  197. (server->cb_break_head + 1) &
  198. (ARRAY_SIZE(server->cb_break) - 1);
  199. /* defer the breaking of callbacks to try and collect as many as
  200. * possible to ship in one operation */
  201. switch (atomic_inc_return(&server->cb_break_n)) {
  202. case 1 ... AFSCBMAX - 1:
  203. queue_delayed_work(afs_callback_update_worker,
  204. &server->cb_break_work, HZ * 2);
  205. break;
  206. case AFSCBMAX:
  207. afs_flush_callback_breaks(server);
  208. break;
  209. default:
  210. break;
  211. }
  212. ASSERT(server->cb_promises.rb_node != NULL);
  213. rb_erase(&vnode->cb_promise, &server->cb_promises);
  214. vnode->cb_promised = false;
  215. _leave("");
  216. }
  217. /*
  218. * discard the callback on a deleted item
  219. */
  220. void afs_discard_callback_on_delete(struct afs_vnode *vnode)
  221. {
  222. struct afs_server *server = vnode->server;
  223. _enter("%d", vnode->cb_promised);
  224. if (!vnode->cb_promised) {
  225. _leave(" [not promised]");
  226. return;
  227. }
  228. ASSERT(server != NULL);
  229. spin_lock(&server->cb_lock);
  230. if (vnode->cb_promised) {
  231. ASSERT(server->cb_promises.rb_node != NULL);
  232. rb_erase(&vnode->cb_promise, &server->cb_promises);
  233. vnode->cb_promised = false;
  234. }
  235. spin_unlock(&server->cb_lock);
  236. _leave("");
  237. }
  238. /*
  239. * give up the callback registered for a vnode on the file server when the
  240. * inode is being cleared
  241. */
  242. void afs_give_up_callback(struct afs_vnode *vnode)
  243. {
  244. struct afs_server *server = vnode->server;
  245. DECLARE_WAITQUEUE(myself, current);
  246. _enter("%d", vnode->cb_promised);
  247. _debug("GIVE UP INODE %p", &vnode->vfs_inode);
  248. if (!vnode->cb_promised) {
  249. _leave(" [not promised]");
  250. return;
  251. }
  252. ASSERT(server != NULL);
  253. spin_lock(&server->cb_lock);
  254. if (vnode->cb_promised && afs_breakring_space(server) == 0) {
  255. add_wait_queue(&server->cb_break_waitq, &myself);
  256. for (;;) {
  257. set_current_state(TASK_UNINTERRUPTIBLE);
  258. if (!vnode->cb_promised ||
  259. afs_breakring_space(server) != 0)
  260. break;
  261. spin_unlock(&server->cb_lock);
  262. schedule();
  263. spin_lock(&server->cb_lock);
  264. }
  265. remove_wait_queue(&server->cb_break_waitq, &myself);
  266. __set_current_state(TASK_RUNNING);
  267. }
  268. /* of course, it's always possible for the server to break this vnode's
  269. * callback first... */
  270. if (vnode->cb_promised)
  271. afs_do_give_up_callback(server, vnode);
  272. spin_unlock(&server->cb_lock);
  273. _leave("");
  274. }
  275. /*
  276. * dispatch a deferred give up callbacks operation
  277. */
  278. void afs_dispatch_give_up_callbacks(struct work_struct *work)
  279. {
  280. struct afs_server *server =
  281. container_of(work, struct afs_server, cb_break_work.work);
  282. _enter("");
  283. /* tell the fileserver to discard the callback promises it has
  284. * - in the event of ENOMEM or some other error, we just forget that we
  285. * had callbacks entirely, and the server will call us later to break
  286. * them
  287. */
  288. afs_fs_give_up_callbacks(server, &afs_async_call);
  289. }
  290. /*
  291. * flush the outstanding callback breaks on a server
  292. */
  293. void afs_flush_callback_breaks(struct afs_server *server)
  294. {
  295. mod_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0);
  296. }
  297. #if 0
  298. /*
  299. * update a bunch of callbacks
  300. */
  301. static void afs_callback_updater(struct work_struct *work)
  302. {
  303. struct afs_server *server;
  304. struct afs_vnode *vnode, *xvnode;
  305. time64_t now;
  306. long timeout;
  307. int ret;
  308. server = container_of(work, struct afs_server, updater);
  309. _enter("");
  310. now = ktime_get_real_seconds();
  311. /* find the first vnode to update */
  312. spin_lock(&server->cb_lock);
  313. for (;;) {
  314. if (RB_EMPTY_ROOT(&server->cb_promises)) {
  315. spin_unlock(&server->cb_lock);
  316. _leave(" [nothing]");
  317. return;
  318. }
  319. vnode = rb_entry(rb_first(&server->cb_promises),
  320. struct afs_vnode, cb_promise);
  321. if (atomic_read(&vnode->usage) > 0)
  322. break;
  323. rb_erase(&vnode->cb_promise, &server->cb_promises);
  324. vnode->cb_promised = false;
  325. }
  326. timeout = vnode->update_at - now;
  327. if (timeout > 0) {
  328. queue_delayed_work(afs_vnode_update_worker,
  329. &afs_vnode_update, timeout * HZ);
  330. spin_unlock(&server->cb_lock);
  331. _leave(" [nothing]");
  332. return;
  333. }
  334. list_del_init(&vnode->update);
  335. atomic_inc(&vnode->usage);
  336. spin_unlock(&server->cb_lock);
  337. /* we can now perform the update */
  338. _debug("update %s", vnode->vldb.name);
  339. vnode->state = AFS_VL_UPDATING;
  340. vnode->upd_rej_cnt = 0;
  341. vnode->upd_busy_cnt = 0;
  342. ret = afs_vnode_update_record(vl, &vldb);
  343. switch (ret) {
  344. case 0:
  345. afs_vnode_apply_update(vl, &vldb);
  346. vnode->state = AFS_VL_UPDATING;
  347. break;
  348. case -ENOMEDIUM:
  349. vnode->state = AFS_VL_VOLUME_DELETED;
  350. break;
  351. default:
  352. vnode->state = AFS_VL_UNCERTAIN;
  353. break;
  354. }
  355. /* and then reschedule */
  356. _debug("reschedule");
  357. vnode->update_at = ktime_get_real_seconds() +
  358. afs_vnode_update_timeout;
  359. spin_lock(&server->cb_lock);
  360. if (!list_empty(&server->cb_promises)) {
  361. /* next update in 10 minutes, but wait at least 1 second more
  362. * than the newest record already queued so that we don't spam
  363. * the VL server suddenly with lots of requests
  364. */
  365. xvnode = list_entry(server->cb_promises.prev,
  366. struct afs_vnode, update);
  367. if (vnode->update_at <= xvnode->update_at)
  368. vnode->update_at = xvnode->update_at + 1;
  369. xvnode = list_entry(server->cb_promises.next,
  370. struct afs_vnode, update);
  371. timeout = xvnode->update_at - now;
  372. if (timeout < 0)
  373. timeout = 0;
  374. } else {
  375. timeout = afs_vnode_update_timeout;
  376. }
  377. list_add_tail(&vnode->update, &server->cb_promises);
  378. _debug("timeout %ld", timeout);
  379. queue_delayed_work(afs_vnode_update_worker,
  380. &afs_vnode_update, timeout * HZ);
  381. spin_unlock(&server->cb_lock);
  382. afs_put_vnode(vl);
  383. }
  384. #endif
  385. /*
  386. * initialise the callback update process
  387. */
  388. int __init afs_callback_update_init(void)
  389. {
  390. afs_callback_update_worker =
  391. create_singlethread_workqueue("kafs_callbackd");
  392. return afs_callback_update_worker ? 0 : -ENOMEM;
  393. }
  394. /*
  395. * shut down the callback update process
  396. */
  397. void afs_callback_update_kill(void)
  398. {
  399. destroy_workqueue(afs_callback_update_worker);
  400. }