recoverd.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. /******************************************************************************
  2. *******************************************************************************
  3. **
  4. ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  5. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
  6. **
  7. ** This copyrighted material is made available to anyone wishing to use,
  8. ** modify, copy, or redistribute it subject to the terms and conditions
  9. ** of the GNU General Public License v.2.
  10. **
  11. *******************************************************************************
  12. ******************************************************************************/
  13. #include "dlm_internal.h"
  14. #include "lockspace.h"
  15. #include "member.h"
  16. #include "dir.h"
  17. #include "ast.h"
  18. #include "recover.h"
  19. #include "lowcomms.h"
  20. #include "lock.h"
  21. #include "requestqueue.h"
  22. #include "recoverd.h"
  23. /* If the start for which we're re-enabling locking (seq) has been superseded
  24. by a newer stop (ls_recover_seq), we need to leave locking disabled.
  25. We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees
  26. locking stopped and b) adds a message to the requestqueue, but dlm_recoverd
  27. enables locking and clears the requestqueue between a and b. */
  28. static int enable_locking(struct dlm_ls *ls, uint64_t seq)
  29. {
  30. int error = -EINTR;
  31. down_write(&ls->ls_recv_active);
  32. spin_lock(&ls->ls_recover_lock);
  33. if (ls->ls_recover_seq == seq) {
  34. set_bit(LSFL_RUNNING, &ls->ls_flags);
  35. /* unblocks processes waiting to enter the dlm */
  36. up_write(&ls->ls_in_recovery);
  37. clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
  38. error = 0;
  39. }
  40. spin_unlock(&ls->ls_recover_lock);
  41. up_write(&ls->ls_recv_active);
  42. return error;
  43. }
  44. static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
  45. {
  46. unsigned long start;
  47. int error, neg = 0;
  48. log_rinfo(ls, "dlm_recover %llu", (unsigned long long)rv->seq);
  49. mutex_lock(&ls->ls_recoverd_active);
  50. dlm_callback_suspend(ls);
  51. dlm_clear_toss(ls);
  52. /*
  53. * This list of root rsb's will be the basis of most of the recovery
  54. * routines.
  55. */
  56. dlm_create_root_list(ls);
  57. /*
  58. * Add or remove nodes from the lockspace's ls_nodes list.
  59. */
  60. error = dlm_recover_members(ls, rv, &neg);
  61. if (error) {
  62. log_rinfo(ls, "dlm_recover_members error %d", error);
  63. goto fail;
  64. }
  65. dlm_recover_dir_nodeid(ls);
  66. ls->ls_recover_dir_sent_res = 0;
  67. ls->ls_recover_dir_sent_msg = 0;
  68. ls->ls_recover_locks_in = 0;
  69. dlm_set_recover_status(ls, DLM_RS_NODES);
  70. error = dlm_recover_members_wait(ls);
  71. if (error) {
  72. log_rinfo(ls, "dlm_recover_members_wait error %d", error);
  73. goto fail;
  74. }
  75. start = jiffies;
  76. /*
  77. * Rebuild our own share of the directory by collecting from all other
  78. * nodes their master rsb names that hash to us.
  79. */
  80. error = dlm_recover_directory(ls);
  81. if (error) {
  82. log_rinfo(ls, "dlm_recover_directory error %d", error);
  83. goto fail;
  84. }
  85. dlm_set_recover_status(ls, DLM_RS_DIR);
  86. error = dlm_recover_directory_wait(ls);
  87. if (error) {
  88. log_rinfo(ls, "dlm_recover_directory_wait error %d", error);
  89. goto fail;
  90. }
  91. log_rinfo(ls, "dlm_recover_directory %u out %u messages",
  92. ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg);
  93. /*
  94. * We may have outstanding operations that are waiting for a reply from
  95. * a failed node. Mark these to be resent after recovery. Unlock and
  96. * cancel ops can just be completed.
  97. */
  98. dlm_recover_waiters_pre(ls);
  99. error = dlm_recovery_stopped(ls);
  100. if (error)
  101. goto fail;
  102. if (neg || dlm_no_directory(ls)) {
  103. /*
  104. * Clear lkb's for departed nodes.
  105. */
  106. dlm_recover_purge(ls);
  107. /*
  108. * Get new master nodeid's for rsb's that were mastered on
  109. * departed nodes.
  110. */
  111. error = dlm_recover_masters(ls);
  112. if (error) {
  113. log_rinfo(ls, "dlm_recover_masters error %d", error);
  114. goto fail;
  115. }
  116. /*
  117. * Send our locks on remastered rsb's to the new masters.
  118. */
  119. error = dlm_recover_locks(ls);
  120. if (error) {
  121. log_rinfo(ls, "dlm_recover_locks error %d", error);
  122. goto fail;
  123. }
  124. dlm_set_recover_status(ls, DLM_RS_LOCKS);
  125. error = dlm_recover_locks_wait(ls);
  126. if (error) {
  127. log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
  128. goto fail;
  129. }
  130. log_rinfo(ls, "dlm_recover_locks %u in",
  131. ls->ls_recover_locks_in);
  132. /*
  133. * Finalize state in master rsb's now that all locks can be
  134. * checked. This includes conversion resolution and lvb
  135. * settings.
  136. */
  137. dlm_recover_rsbs(ls);
  138. } else {
  139. /*
  140. * Other lockspace members may be going through the "neg" steps
  141. * while also adding us to the lockspace, in which case they'll
  142. * be doing the recover_locks (RS_LOCKS) barrier.
  143. */
  144. dlm_set_recover_status(ls, DLM_RS_LOCKS);
  145. error = dlm_recover_locks_wait(ls);
  146. if (error) {
  147. log_rinfo(ls, "dlm_recover_locks_wait error %d", error);
  148. goto fail;
  149. }
  150. }
  151. dlm_release_root_list(ls);
  152. /*
  153. * Purge directory-related requests that are saved in requestqueue.
  154. * All dir requests from before recovery are invalid now due to the dir
  155. * rebuild and will be resent by the requesting nodes.
  156. */
  157. dlm_purge_requestqueue(ls);
  158. dlm_set_recover_status(ls, DLM_RS_DONE);
  159. error = dlm_recover_done_wait(ls);
  160. if (error) {
  161. log_rinfo(ls, "dlm_recover_done_wait error %d", error);
  162. goto fail;
  163. }
  164. dlm_clear_members_gone(ls);
  165. dlm_adjust_timeouts(ls);
  166. dlm_callback_resume(ls);
  167. error = enable_locking(ls, rv->seq);
  168. if (error) {
  169. log_rinfo(ls, "enable_locking error %d", error);
  170. goto fail;
  171. }
  172. error = dlm_process_requestqueue(ls);
  173. if (error) {
  174. log_rinfo(ls, "dlm_process_requestqueue error %d", error);
  175. goto fail;
  176. }
  177. error = dlm_recover_waiters_post(ls);
  178. if (error) {
  179. log_rinfo(ls, "dlm_recover_waiters_post error %d", error);
  180. goto fail;
  181. }
  182. dlm_recover_grant(ls);
  183. log_rinfo(ls, "dlm_recover %llu generation %u done: %u ms",
  184. (unsigned long long)rv->seq, ls->ls_generation,
  185. jiffies_to_msecs(jiffies - start));
  186. mutex_unlock(&ls->ls_recoverd_active);
  187. dlm_lsop_recover_done(ls);
  188. return 0;
  189. fail:
  190. dlm_release_root_list(ls);
  191. log_rinfo(ls, "dlm_recover %llu error %d",
  192. (unsigned long long)rv->seq, error);
  193. mutex_unlock(&ls->ls_recoverd_active);
  194. return error;
  195. }
  196. /* The dlm_ls_start() that created the rv we take here may already have been
  197. stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP
  198. flag set. */
  199. static void do_ls_recovery(struct dlm_ls *ls)
  200. {
  201. struct dlm_recover *rv = NULL;
  202. spin_lock(&ls->ls_recover_lock);
  203. rv = ls->ls_recover_args;
  204. ls->ls_recover_args = NULL;
  205. if (rv && ls->ls_recover_seq == rv->seq)
  206. clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
  207. spin_unlock(&ls->ls_recover_lock);
  208. if (rv) {
  209. ls_recover(ls, rv);
  210. kfree(rv->nodes);
  211. kfree(rv);
  212. }
  213. }
  214. static int dlm_recoverd(void *arg)
  215. {
  216. struct dlm_ls *ls;
  217. ls = dlm_find_lockspace_local(arg);
  218. if (!ls) {
  219. log_print("dlm_recoverd: no lockspace %p", arg);
  220. return -1;
  221. }
  222. down_write(&ls->ls_in_recovery);
  223. set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
  224. wake_up(&ls->ls_recover_lock_wait);
  225. while (!kthread_should_stop()) {
  226. set_current_state(TASK_INTERRUPTIBLE);
  227. if (!test_bit(LSFL_RECOVER_WORK, &ls->ls_flags) &&
  228. !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags))
  229. schedule();
  230. set_current_state(TASK_RUNNING);
  231. if (test_and_clear_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) {
  232. down_write(&ls->ls_in_recovery);
  233. set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
  234. wake_up(&ls->ls_recover_lock_wait);
  235. }
  236. if (test_and_clear_bit(LSFL_RECOVER_WORK, &ls->ls_flags))
  237. do_ls_recovery(ls);
  238. }
  239. if (test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags))
  240. up_write(&ls->ls_in_recovery);
  241. dlm_put_lockspace(ls);
  242. return 0;
  243. }
  244. int dlm_recoverd_start(struct dlm_ls *ls)
  245. {
  246. struct task_struct *p;
  247. int error = 0;
  248. p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
  249. if (IS_ERR(p))
  250. error = PTR_ERR(p);
  251. else
  252. ls->ls_recoverd_task = p;
  253. return error;
  254. }
  255. void dlm_recoverd_stop(struct dlm_ls *ls)
  256. {
  257. kthread_stop(ls->ls_recoverd_task);
  258. }
  259. void dlm_recoverd_suspend(struct dlm_ls *ls)
  260. {
  261. wake_up(&ls->ls_wait_general);
  262. mutex_lock(&ls->ls_recoverd_active);
  263. }
  264. void dlm_recoverd_resume(struct dlm_ls *ls)
  265. {
  266. mutex_unlock(&ls->ls_recoverd_active);
  267. }