lockref.h 1.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051
  1. #ifndef __LINUX_LOCKREF_H
  2. #define __LINUX_LOCKREF_H
  3. /*
  4. * Locked reference counts.
  5. *
  6. * These are different from just plain atomic refcounts in that they
  7. * are atomic with respect to the spinlock that goes with them. In
  8. * particular, there can be implementations that don't actually get
  9. * the spinlock for the common decrement/increment operations, but they
  10. * still have to check that the operation is done semantically as if
  11. * the spinlock had been taken (using a cmpxchg operation that covers
  12. * both the lock and the count word, or using memory transactions, for
  13. * example).
  14. */
  15. #include <linux/spinlock.h>
  16. #include <generated/bounds.h>
  17. #define USE_CMPXCHG_LOCKREF \
  18. (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
  19. IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
  20. struct lockref {
  21. union {
  22. #if USE_CMPXCHG_LOCKREF
  23. aligned_u64 lock_count;
  24. #endif
  25. struct {
  26. spinlock_t lock;
  27. int count;
  28. };
  29. };
  30. };
  31. extern void lockref_get(struct lockref *);
  32. extern int lockref_put_return(struct lockref *);
  33. extern int lockref_get_not_zero(struct lockref *);
  34. extern int lockref_get_or_lock(struct lockref *);
  35. extern int lockref_put_or_lock(struct lockref *);
  36. extern void lockref_mark_dead(struct lockref *);
  37. extern int lockref_get_not_dead(struct lockref *);
  38. /* Must be called under spinlock for reliable results */
  39. static inline int __lockref_is_dead(const struct lockref *l)
  40. {
  41. return ((int)l->count < 0);
  42. }
  43. #endif /* __LINUX_LOCKREF_H */