// -*- C++ -*- //===--------------------------- mutex ------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef _LIBCPP_SGX_MUTEX #define _LIBCPP_SGX_MUTEX /* mutex synopsis namespace std { class mutex { public: constexpr mutex() noexcept; ~mutex(); mutex(const mutex&) = delete; mutex& operator=(const mutex&) = delete; void lock(); bool try_lock(); void unlock(); typedef sgx_thread_mutex_t* native_handle_type; native_handle_type native_handle(); }; class recursive_mutex { public: recursive_mutex(); ~recursive_mutex(); recursive_mutex(const recursive_mutex&) = delete; recursive_mutex& operator=(const recursive_mutex&) = delete; void lock(); bool try_lock() noexcept; void unlock(); typedef sgx_thread_mutex_t* native_handle_type; native_handle_type native_handle(); }; Not supported (time API) : class timed_mutex { public: timed_mutex(); ~timed_mutex(); timed_mutex(const timed_mutex&) = delete; timed_mutex& operator=(const timed_mutex&) = delete; void lock(); bool try_lock(); template bool try_lock_for(const chrono::duration& rel_time); template bool try_lock_until(const chrono::time_point& abs_time); void unlock(); }; Not supported (time API) : class recursive_timed_mutex { public: recursive_timed_mutex(); ~recursive_timed_mutex(); recursive_timed_mutex(const recursive_timed_mutex&) = delete; recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete; void lock(); bool try_lock() noexcept; template bool try_lock_for(const chrono::duration& rel_time); template bool try_lock_until(const chrono::time_point& abs_time); void unlock(); }; struct defer_lock_t {}; struct try_to_lock_t {}; struct adopt_lock_t {}; constexpr defer_lock_t defer_lock{}; constexpr try_to_lock_t try_to_lock{}; constexpr adopt_lock_t adopt_lock{}; template class lock_guard { public: typedef Mutex mutex_type; explicit lock_guard(mutex_type& m); lock_guard(mutex_type& m, adopt_lock_t); ~lock_guard(); lock_guard(lock_guard const&) = delete; lock_guard& operator=(lock_guard const&) = delete; }; template class unique_lock { public: typedef Mutex mutex_type; unique_lock() noexcept; explicit unique_lock(mutex_type& m); unique_lock(mutex_type& m, defer_lock_t) noexcept; unique_lock(mutex_type& m, try_to_lock_t); unique_lock(mutex_type& m, adopt_lock_t); template unique_lock(mutex_type& m, const chrono::time_point& abs_time); template unique_lock(mutex_type& m, const chrono::duration& rel_time); ~unique_lock(); unique_lock(unique_lock const&) = delete; unique_lock& operator=(unique_lock const&) = delete; unique_lock(unique_lock&& u) noexcept; unique_lock& operator=(unique_lock&& u) noexcept; void lock(); bool try_lock(); template bool try_lock_for(const chrono::duration& rel_time); template bool try_lock_until(const chrono::time_point& abs_time); void unlock(); void swap(unique_lock& u) noexcept; mutex_type* release() noexcept; bool owns_lock() const noexcept; explicit operator bool () const noexcept; mutex_type* mutex() const noexcept; }; template void swap(unique_lock& x, unique_lock& y) noexcept; template int try_lock(L1&, L2&, L3&...); template void lock(L1&, L2&, L3&...); struct once_flag { constexpr once_flag() noexcept; once_flag(const once_flag&) = delete; once_flag& operator=(const once_flag&) = delete; }; template void call_once(once_flag& flag, Callable&& func, Args&&... args); } // std */ #include <__config> #include #include #include #include #include #include <__undef_min_max> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) #pragma GCC system_header #endif _LIBCPP_BEGIN_NAMESPACE_STD class _LIBCPP_TYPE_VIS recursive_mutex { sgx_thread_mutex_t __m_; public: recursive_mutex(); ~recursive_mutex(); recursive_mutex(const recursive_mutex&) = delete; recursive_mutex& operator=(const recursive_mutex&) = delete; public: void lock(); bool try_lock() _NOEXCEPT; void unlock() _NOEXCEPT; typedef sgx_thread_mutex_t* native_handle_type; _LIBCPP_INLINE_VISIBILITY native_handle_type native_handle() { return &__m_; } }; // We don't support timed_mutex and recursive_timed_mutex because they need time API template int try_lock(_L0& __l0, _L1& __l1) { unique_lock<_L0> __u0(__l0, try_to_lock); if (__u0.owns_lock()) { if (__l1.try_lock()) { __u0.release(); return -1; } else return 1; } return 0; } template int try_lock(_L0& __l0, _L1& __l1, _L2& __l2, _L3&... __l3) { int __r = 0; unique_lock<_L0> __u0(__l0, try_to_lock); if (__u0.owns_lock()) { __r = try_lock(__l1, __l2, __l3...); if (__r == -1) __u0.release(); else ++__r; } return __r; } template void lock(_L0& __l0, _L1& __l1) { while (true) { { unique_lock<_L0> __u0(__l0); if (__l1.try_lock()) { __u0.release(); break; } } #if 0 // We don't have sched_yield sched_yield(); #endif { unique_lock<_L1> __u1(__l1); if (__l0.try_lock()) { __u1.release(); break; } } #if 0 // We don't have sched_yield sched_yield(); #endif } } template void __lock_first(int __i, _L0& __l0, _L1& __l1, _L2& __l2, _L3& ...__l3) { while (true) { switch (__i) { case 0: { unique_lock<_L0> __u0(__l0); __i = try_lock(__l1, __l2, __l3...); if (__i == -1) { __u0.release(); return; } } ++__i; #if 0 // We don't have sched_yield sched_yield(); #endif break; case 1: { unique_lock<_L1> __u1(__l1); __i = try_lock(__l2, __l3..., __l0); if (__i == -1) { __u1.release(); return; } } if (__i == sizeof...(_L3)+1) __i = 0; else __i += 2; #if 0 // We don't have sched_yield sched_yield(); #endif break; default: __lock_first(__i - 2, __l2, __l3..., __l0, __l1); return; } } } template inline _LIBCPP_INLINE_VISIBILITY void lock(_L0& __l0, _L1& __l1, _L2& __l2, _L3& ...__l3) { __lock_first(0, __l0, __l1, __l2, __l3...); } #if defined(_LIBCPP_SGX_HAS_CXX_ATOMIC) struct _LIBCPP_TYPE_VIS_ONLY once_flag; template _LIBCPP_INLINE_VISIBILITY void call_once(once_flag&, _Callable&&, _Args&&...); struct _LIBCPP_TYPE_VIS_ONLY once_flag { _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR once_flag() _NOEXCEPT : __state_(0) {} private: once_flag(const once_flag&); // = delete; once_flag& operator=(const once_flag&); // = delete; unsigned long __state_; template friend void call_once(once_flag&, _Callable&&, _Args&&...); }; template class __call_once_param { _Fp& __f_; public: _LIBCPP_INLINE_VISIBILITY explicit __call_once_param(_Fp& __f) : __f_(__f) {} _LIBCPP_INLINE_VISIBILITY void operator()() { typedef typename __make_tuple_indices::value, 1>::type _Index; __execute(_Index()); } private: template _LIBCPP_INLINE_VISIBILITY void __execute(__tuple_indices<_Indices...>) { __invoke(_VSTD::get<0>(_VSTD::move(__f_)), _VSTD::get<_Indices>(_VSTD::move(__f_))...); } }; template void __call_once_proxy(void* __vp) { __call_once_param<_Fp>* __p = static_cast<__call_once_param<_Fp>*>(__vp); (*__p)(); } void _LIBCPP_FUNC_VIS __call_once(volatile unsigned long&, void*, void(*)(void*)); template inline _LIBCPP_INLINE_VISIBILITY void call_once(once_flag& __flag, _Callable&& __func, _Args&&... __args) { if(__libcpp_relaxed_load(& __flag.__state_) != ~0ul) { typedef tuple<_Callable&&, _Args&&...> _Gp; _Gp __f(_VSTD::forward<_Callable>(__func), _VSTD::forward<_Args>(__args)...); __call_once_param<_Gp> __p(__f); __call_once(__flag.__state_, &__p, &__call_once_proxy<_Gp>); } } #endif // defined(_LIBCPP_SGX_HAS_CXX_ATOMIC) _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP_SGX_MUTEX