blockgroup_lock.h 1.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. #ifndef _LINUX_BLOCKGROUP_LOCK_H
  2. #define _LINUX_BLOCKGROUP_LOCK_H
  3. /*
  4. * Per-blockgroup locking for ext2 and ext3.
  5. *
  6. * Simple hashed spinlocking.
  7. */
  8. #include <linux/spinlock.h>
  9. #include <linux/cache.h>
  10. #ifdef CONFIG_SMP
  11. /*
  12. * We want a power-of-two. Is there a better way than this?
  13. */
  14. #if NR_CPUS >= 32
  15. #define NR_BG_LOCKS 128
  16. #elif NR_CPUS >= 16
  17. #define NR_BG_LOCKS 64
  18. #elif NR_CPUS >= 8
  19. #define NR_BG_LOCKS 32
  20. #elif NR_CPUS >= 4
  21. #define NR_BG_LOCKS 16
  22. #elif NR_CPUS >= 2
  23. #define NR_BG_LOCKS 8
  24. #else
  25. #define NR_BG_LOCKS 4
  26. #endif
  27. #else /* CONFIG_SMP */
  28. #define NR_BG_LOCKS 1
  29. #endif /* CONFIG_SMP */
  30. struct bgl_lock {
  31. spinlock_t lock;
  32. } ____cacheline_aligned_in_smp;
  33. struct blockgroup_lock {
  34. struct bgl_lock locks[NR_BG_LOCKS];
  35. };
  36. static inline void bgl_lock_init(struct blockgroup_lock *bgl)
  37. {
  38. int i;
  39. for (i = 0; i < NR_BG_LOCKS; i++)
  40. spin_lock_init(&bgl->locks[i].lock);
  41. }
  42. /*
  43. * The accessor is a macro so we can embed a blockgroup_lock into different
  44. * superblock types
  45. */
  46. static inline spinlock_t *
  47. bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group)
  48. {
  49. return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock;
  50. }
  51. #endif