sgx_mutex.cpp 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. //===------------------------- mutex.cpp ----------------------------------===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is dual licensed under the MIT and the University of Illinois Open
  6. // Source Licenses. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. #define _LIBCPP_BUILDING_MUTEX
  10. #include "mutex"
  11. #include "limits"
  12. #include "system_error"
  13. #include "cassert"
  14. #include "include/atomic_support.h"
  15. _LIBCPP_BEGIN_NAMESPACE_STD
  16. const defer_lock_t defer_lock = {};
  17. const try_to_lock_t try_to_lock = {};
  18. const adopt_lock_t adopt_lock = {};
  19. mutex::~mutex()
  20. {
  21. sgx_thread_mutex_destroy(&__m_);
  22. }
  23. void
  24. mutex::lock()
  25. {
  26. int ec = sgx_thread_mutex_lock(&__m_);
  27. if (ec)
  28. __throw_system_error(ec, "mutex lock failed");
  29. }
  30. bool
  31. mutex::try_lock() _NOEXCEPT
  32. {
  33. return sgx_thread_mutex_trylock(&__m_) == 0;
  34. }
  35. void
  36. mutex::unlock() _NOEXCEPT
  37. {
  38. int ec = sgx_thread_mutex_unlock(&__m_);
  39. (void)ec;
  40. assert(ec == 0);
  41. }
  42. // recursive_mutex
  43. recursive_mutex::recursive_mutex()
  44. {
  45. __m_ = SGX_THREAD_RECURSIVE_MUTEX_INITIALIZER;
  46. }
  47. recursive_mutex::~recursive_mutex()
  48. {
  49. int e = sgx_thread_mutex_destroy(&__m_);
  50. (void)e;
  51. assert(e == 0);
  52. }
  53. void
  54. recursive_mutex::lock()
  55. {
  56. int ec = sgx_thread_mutex_lock(&__m_);
  57. if (ec)
  58. __throw_system_error(ec, "recursive_mutex lock failed");
  59. }
  60. void
  61. recursive_mutex::unlock() _NOEXCEPT
  62. {
  63. int e = sgx_thread_mutex_unlock(&__m_);
  64. (void)e;
  65. assert(e == 0);
  66. }
  67. bool
  68. recursive_mutex::try_lock() _NOEXCEPT
  69. {
  70. return sgx_thread_mutex_trylock(&__m_) == 0;
  71. }
  72. // If dispatch_once_f ever handles C++ exceptions, and if one can get to it
  73. // without illegal macros (unexpected macros not beginning with _UpperCase or
  74. // __lowercase), and if it stops spinning waiting threads, then call_once should
  75. // call into dispatch_once_f instead of here. Relevant radar this code needs to
  76. // keep in sync with: 7741191.
  77. #if defined(_LIBCPP_SGX_HAS_CXX_ATOMIC)
  78. static sgx_thread_mutex_t mut = SGX_THREAD_MUTEX_INITIALIZER;
  79. static sgx_thread_cond_t cv = SGX_THREAD_COND_INITIALIZER;
  80. /// NOTE: Changes to flag are done via relaxed atomic stores
  81. /// even though the accesses are protected by a mutex because threads
  82. /// just entering 'call_once' concurrently read from flag.
  83. void
  84. __call_once(volatile unsigned long & flag, void* arg, void(*func)(void*))
  85. {
  86. sgx_thread_mutex_lock(&mut);
  87. while (flag == 1)
  88. sgx_thread_cond_wait(&cv, &mut);
  89. if (flag == 0)
  90. {
  91. try
  92. {
  93. __libcpp_relaxed_store(&flag, 1ul);
  94. sgx_thread_mutex_unlock(&mut);
  95. func(arg);
  96. sgx_thread_mutex_lock(&mut);
  97. __libcpp_relaxed_store(&flag, ~0ul);
  98. sgx_thread_mutex_unlock(&mut);
  99. sgx_thread_cond_broadcast(&cv);
  100. }
  101. catch (...)
  102. {
  103. sgx_thread_mutex_lock(&mut);
  104. __libcpp_relaxed_store(&flag, 0ul);
  105. sgx_thread_mutex_unlock(&mut);
  106. sgx_thread_cond_broadcast(&cv);
  107. throw;
  108. }
  109. }
  110. else
  111. sgx_thread_mutex_unlock(&mut);
  112. }
  113. #endif // defined(_LIBCPP_SGX_HAS_CXX_ATOMIC)
  114. _LIBCPP_END_NAMESPACE_STD