spinlock.h 1.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849
  1. #ifndef _SPINLOCK_H
  2. #define _SPINLOCK_H
  3. typedef int spinlock_t;
  4. /* Use this to initialize spinlocks with *static* storage duration.
  5. * According to C standard, there only guarantee we have is that this initialization will happen
  6. * before main, which by itself is not enough (such store might not be visible before fist lock
  7. * acquire). Fortunately on gcc global zeroed variables will just end up in .bss - zeroed memory
  8. * mapped during process creation, hence we are fine. */
  9. #define INIT_SPINLOCK_UNLOCKED 0
  10. /* Use this to initialize spinlocks with *dynamic* storage duration. */
  11. static inline void spinlock_init(spinlock_t *lock) {
  12. __atomic_store_n(lock, 0, __ATOMIC_RELAXED);
  13. }
  14. /* Returns 0 if taking the lock succeded, 1 if it was already taken */
  15. static inline int spinlock_trylock(spinlock_t *lock) {
  16. if (__atomic_exchange_n(lock, 1, __ATOMIC_ACQUIRE) == 0) {
  17. return 0;
  18. }
  19. return 1;
  20. }
  21. static inline void spinlock_lock(spinlock_t *lock) {
  22. int val;
  23. /* First check if lock is already free. */
  24. if (__atomic_exchange_n(lock, 1, __ATOMIC_ACQUIRE) == 0) {
  25. return;
  26. }
  27. do {
  28. /* This check imposes no inter-thread ordering, thus does not slow other threads. */
  29. while (__atomic_load_n(lock, __ATOMIC_RELAXED) != 0) {
  30. __asm__ volatile ("pause");
  31. }
  32. /* Seen lock as free, check if it still is, this time with acquire semantics (but only
  33. * if we really take it). */
  34. val = 0;
  35. } while (!__atomic_compare_exchange_n(lock, &val, 1, 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED));
  36. }
  37. static inline void spinlock_unlock(spinlock_t *lock) {
  38. __atomic_store_n(lock, 0, __ATOMIC_RELEASE);
  39. }
  40. #endif // _SPINLOCK_H