lock.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. #include "reiserfs.h"
  2. #include <linux/mutex.h>
  3. /*
  4. * The previous reiserfs locking scheme was heavily based on
  5. * the tricky properties of the Bkl:
  6. *
  7. * - it was acquired recursively by a same task
  8. * - the performances relied on the release-while-schedule() property
  9. *
  10. * Now that we replace it by a mutex, we still want to keep the same
  11. * recursive property to avoid big changes in the code structure.
  12. * We use our own lock_owner here because the owner field on a mutex
  13. * is only available in SMP or mutex debugging, also we only need this field
  14. * for this mutex, no need for a system wide mutex facility.
  15. *
  16. * Also this lock is often released before a call that could block because
  17. * reiserfs performances were partially based on the release while schedule()
  18. * property of the Bkl.
  19. */
  20. void reiserfs_write_lock(struct super_block *s)
  21. {
  22. struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
  23. if (sb_i->lock_owner != current) {
  24. mutex_lock(&sb_i->lock);
  25. sb_i->lock_owner = current;
  26. }
  27. /* No need to protect it, only the current task touches it */
  28. sb_i->lock_depth++;
  29. }
  30. void reiserfs_write_unlock(struct super_block *s)
  31. {
  32. struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
  33. /*
  34. * Are we unlocking without even holding the lock?
  35. * Such a situation must raise a BUG() if we don't want
  36. * to corrupt the data.
  37. */
  38. BUG_ON(sb_i->lock_owner != current);
  39. if (--sb_i->lock_depth == -1) {
  40. sb_i->lock_owner = NULL;
  41. mutex_unlock(&sb_i->lock);
  42. }
  43. }
  44. int __must_check reiserfs_write_unlock_nested(struct super_block *s)
  45. {
  46. struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
  47. int depth;
  48. /* this can happen when the lock isn't always held */
  49. if (sb_i->lock_owner != current)
  50. return -1;
  51. depth = sb_i->lock_depth;
  52. sb_i->lock_depth = -1;
  53. sb_i->lock_owner = NULL;
  54. mutex_unlock(&sb_i->lock);
  55. return depth;
  56. }
  57. void reiserfs_write_lock_nested(struct super_block *s, int depth)
  58. {
  59. struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
  60. /* this can happen when the lock isn't always held */
  61. if (depth == -1)
  62. return;
  63. mutex_lock(&sb_i->lock);
  64. sb_i->lock_owner = current;
  65. sb_i->lock_depth = depth;
  66. }
  67. /*
  68. * Utility function to force a BUG if it is called without the superblock
  69. * write lock held. caller is the string printed just before calling BUG()
  70. */
  71. void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
  72. {
  73. struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
  74. WARN_ON(sb_i->lock_depth < 0);
  75. }
  76. #ifdef CONFIG_REISERFS_CHECK
  77. void reiserfs_lock_check_recursive(struct super_block *sb)
  78. {
  79. struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
  80. WARN_ONCE((sb_i->lock_depth > 0), "Unwanted recursive reiserfs lock!\n");
  81. }
  82. #endif