spinlock.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. #ifndef _SPINLOCK_H
  2. #define _SPINLOCK_H
  3. #include <api.h>
  4. #ifdef DEBUG
  5. #define DEBUG_SPINLOCKS
  6. #endif // DEBUG
  7. #if defined DEBUG_SPINLOCKS && defined IN_SHIM
  8. #define DEBUG_SPINLOCKS_SHIM
  9. #include <shim_types.h>
  10. pid_t shim_do_gettid(void);
  11. #endif // defined DEBUG_SPINLOCKS && defined IN_SHIM
  12. typedef struct {
  13. int lock;
  14. #ifdef DEBUG_SPINLOCKS_SHIM
  15. pid_t owner;
  16. #endif // DEBUG_SPINLOCKS_SHIM
  17. } spinlock_t;
  18. /* Use this to initialize spinlocks with *static* storage duration.
  19. * According to C standard, there only guarantee we have is that this initialization will happen
  20. * before main, which by itself is not enough (such store might not be visible before fist lock
  21. * acquire). Fortunately on gcc global zeroed variables will just end up in .bss - zeroed memory
  22. * mapped during process creation, hence we are fine.
  23. *
  24. * Rest of the struct is zeroed implicitly, hence no need for ifdef here. */
  25. #define INIT_SPINLOCK_UNLOCKED { .lock = 0 }
  26. #ifdef DEBUG_SPINLOCKS_SHIM
  27. static inline void debug_spinlock_take_ownership(spinlock_t* lock) {
  28. __atomic_store_n(&lock->owner, shim_do_gettid(), __ATOMIC_RELAXED);
  29. }
  30. static inline void debug_spinlock_giveup_ownership(spinlock_t* lock) {
  31. __atomic_store_n(&lock->owner, 0, __ATOMIC_RELAXED);
  32. }
  33. #else
  34. static inline void debug_spinlock_take_ownership(spinlock_t* lock) {
  35. __UNUSED(lock);
  36. }
  37. static inline void debug_spinlock_giveup_ownership(spinlock_t* lock) {
  38. __UNUSED(lock);
  39. }
  40. #endif // DEBUG_SPINLOCKS_SHIM
  41. /* Use this to initialize spinlocks with *dynamic* storage duration. */
  42. static inline void spinlock_init(spinlock_t *lock) {
  43. debug_spinlock_giveup_ownership(lock);
  44. __atomic_store_n(&lock->lock, 0, __ATOMIC_RELAXED);
  45. }
  46. /* Returns 0 if taking the lock succeded, 1 if it was already taken */
  47. static inline int spinlock_trylock(spinlock_t* lock) {
  48. if (__atomic_exchange_n(&lock->lock, 1, __ATOMIC_ACQUIRE) == 0) {
  49. debug_spinlock_take_ownership(lock);
  50. return 0;
  51. }
  52. return 1;
  53. }
  54. static inline void spinlock_lock(spinlock_t* lock) {
  55. int val;
  56. /* First check if lock is already free. */
  57. if (__atomic_exchange_n(&lock->lock, 1, __ATOMIC_ACQUIRE) == 0) {
  58. goto out;
  59. }
  60. do {
  61. /* This check imposes no inter-thread ordering, thus does not slow other threads. */
  62. while (__atomic_load_n(&lock->lock, __ATOMIC_RELAXED) != 0) {
  63. __asm__ volatile ("pause");
  64. }
  65. /* Seen lock as free, check if it still is, this time with acquire semantics (but only
  66. * if we really take it). */
  67. val = 0;
  68. } while (!__atomic_compare_exchange_n(&lock->lock, &val, 1, 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED));
  69. out:
  70. debug_spinlock_take_ownership(lock);
  71. }
  72. static inline void spinlock_unlock(spinlock_t* lock) {
  73. debug_spinlock_giveup_ownership(lock);
  74. __atomic_store_n(&lock->lock, 0, __ATOMIC_RELEASE);
  75. }
  76. #ifdef DEBUG_SPINLOCKS
  77. static inline bool _spinlock_is_locked(spinlock_t* lock) {
  78. return __atomic_load_n(&lock->lock, __ATOMIC_SEQ_CST) != 0;
  79. }
  80. #ifdef DEBUG_SPINLOCKS_SHIM
  81. static inline bool spinlock_is_locked(spinlock_t* lock) {
  82. if (!_spinlock_is_locked(lock)) {
  83. return false;
  84. }
  85. pid_t owner = __atomic_load_n(&lock->owner, __ATOMIC_RELAXED);
  86. if (owner != shim_do_gettid()) {
  87. debug("Unexpected lock ownership: owned by: %d, checked in: %d", owner, shim_do_gettid());
  88. return false;
  89. }
  90. return true;
  91. }
  92. #else
  93. static inline bool spinlock_is_locked(spinlock_t* lock) {
  94. return _spinlock_is_locked(lock);
  95. }
  96. #endif // DEBUG_SPINLOCKS_SHIM
  97. #endif // DEBUG_SPINLOCKS
  98. #endif // _SPINLOCK_H