lglock.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. /* See include/linux/lglock.h for description */
  2. #include <linux/module.h>
  3. #include <linux/lglock.h>
  4. #include <linux/cpu.h>
  5. #include <linux/string.h>
  6. /*
  7. * Note there is no uninit, so lglocks cannot be defined in
  8. * modules (but it's fine to use them from there)
  9. * Could be added though, just undo lg_lock_init
  10. */
  11. void lg_lock_init(struct lglock *lg, char *name)
  12. {
  13. LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
  14. }
  15. EXPORT_SYMBOL(lg_lock_init);
  16. void lg_local_lock(struct lglock *lg)
  17. {
  18. arch_spinlock_t *lock;
  19. preempt_disable();
  20. lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
  21. lock = this_cpu_ptr(lg->lock);
  22. arch_spin_lock(lock);
  23. }
  24. EXPORT_SYMBOL(lg_local_lock);
  25. void lg_local_unlock(struct lglock *lg)
  26. {
  27. arch_spinlock_t *lock;
  28. lock_release(&lg->lock_dep_map, 1, _RET_IP_);
  29. lock = this_cpu_ptr(lg->lock);
  30. arch_spin_unlock(lock);
  31. preempt_enable();
  32. }
  33. EXPORT_SYMBOL(lg_local_unlock);
  34. void lg_local_lock_cpu(struct lglock *lg, int cpu)
  35. {
  36. arch_spinlock_t *lock;
  37. preempt_disable();
  38. lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
  39. lock = per_cpu_ptr(lg->lock, cpu);
  40. arch_spin_lock(lock);
  41. }
  42. EXPORT_SYMBOL(lg_local_lock_cpu);
  43. void lg_local_unlock_cpu(struct lglock *lg, int cpu)
  44. {
  45. arch_spinlock_t *lock;
  46. lock_release(&lg->lock_dep_map, 1, _RET_IP_);
  47. lock = per_cpu_ptr(lg->lock, cpu);
  48. arch_spin_unlock(lock);
  49. preempt_enable();
  50. }
  51. EXPORT_SYMBOL(lg_local_unlock_cpu);
  52. void lg_double_lock(struct lglock *lg, int cpu1, int cpu2)
  53. {
  54. BUG_ON(cpu1 == cpu2);
  55. /* lock in cpu order, just like lg_global_lock */
  56. if (cpu2 < cpu1)
  57. swap(cpu1, cpu2);
  58. preempt_disable();
  59. lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
  60. arch_spin_lock(per_cpu_ptr(lg->lock, cpu1));
  61. arch_spin_lock(per_cpu_ptr(lg->lock, cpu2));
  62. }
  63. void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
  64. {
  65. lock_release(&lg->lock_dep_map, 1, _RET_IP_);
  66. arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1));
  67. arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2));
  68. preempt_enable();
  69. }
  70. void lg_global_lock(struct lglock *lg)
  71. {
  72. int i;
  73. preempt_disable();
  74. lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
  75. for_each_possible_cpu(i) {
  76. arch_spinlock_t *lock;
  77. lock = per_cpu_ptr(lg->lock, i);
  78. arch_spin_lock(lock);
  79. }
  80. }
  81. EXPORT_SYMBOL(lg_global_lock);
  82. void lg_global_unlock(struct lglock *lg)
  83. {
  84. int i;
  85. lock_release(&lg->lock_dep_map, 1, _RET_IP_);
  86. for_each_possible_cpu(i) {
  87. arch_spinlock_t *lock;
  88. lock = per_cpu_ptr(lg->lock, i);
  89. arch_spin_unlock(lock);
  90. }
  91. preempt_enable();
  92. }
  93. EXPORT_SYMBOL(lg_global_unlock);