_threads.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /*
  2. * Copyright (c) 1997-1999
  3. * Silicon Graphics Computer Systems, Inc.
  4. *
  5. * Copyright (c) 1999
  6. * Boris Fomitchev
  7. *
  8. * This material is provided "as is", with absolutely no warranty expressed
  9. * or implied. Any use is at your own risk.
  10. *
  11. * Permission to use or copy this software for any purpose is hereby granted
  12. * without fee, provided the above notices are retained on all copies.
  13. * Permission to modify the code and to distribute modified code is granted,
  14. * provided the above notices are retained, and a notice that the code was
  15. * modified is included with the above copyright notice.
  16. *
  17. */
  18. // WARNING: This is an internal header file, included by other C++
  19. // standard library headers. You should not attempt to use this header
  20. // file directly.
  21. #ifndef _STLP_INTERNAL_THREADS_H
  22. #define _STLP_INTERNAL_THREADS_H
  23. // Supported threading models are native SGI, pthreads, uithreads
  24. // (similar to pthreads, but based on an earlier draft of the Posix
  25. // threads standard), and Win32 threads. Uithread support by Jochen
  26. // Schlick, 1999, and Solaris threads generalized to them.
  27. #ifndef _STLP_INTERNAL_CSTDDEF
  28. # include <stl/_cstddef.h>
  29. #endif
  30. #ifndef _STLP_INTERNAL_CSTDLIB
  31. # include <stl/_cstdlib.h>
  32. #endif
  33. // On SUN and Mac OS X gcc, zero-initialization works just fine...
  34. #if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__))
  35. # define _STLP_MUTEX_INITIALIZER
  36. #endif
  37. /* This header defines the following atomic operation that platform should
  38. * try to support as much as possible. Atomic operation are exposed as macro
  39. * in order to easily test for their existance. They are:
  40. * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) :
  41. * increment *__ptr by 1 and returns the new value
  42. * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) :
  43. * decrement *__ptr by 1 and returns the new value
  44. * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
  45. * assign __val to *__target and returns former *__target value
  46. * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) :
  47. * assign __ptr to *__target and returns former *__target value
  48. */
  49. #if defined (_STLP_THREADS)
  50. # if defined (_STLP_SGI_THREADS)
  51. # include <mutex.h>
  52. // Hack for SGI o32 compilers.
  53. # if !defined(__add_and_fetch) && \
  54. (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
  55. # define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
  56. # define __test_and_set(__l,__v) test_and_set(__l,__v)
  57. # endif /* o32 */
  58. # if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
  59. # define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
  60. # else
  61. # define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
  62. # endif
  63. # define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
  64. # define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
  65. typedef long __stl_atomic_t;
  66. # elif defined (_STLP_PTHREADS)
  67. # include <pthread.h>
  68. # if !defined (_STLP_USE_PTHREAD_SPINLOCK)
  69. # if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT)
  70. # define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
  71. # endif
  72. //HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
  73. # if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_)
  74. # define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
  75. # else
  76. # define _STLP_PTHREAD_ATTR_DEFAULT 0
  77. # endif
  78. # else
  79. # if defined (__OpenBSD__)
  80. # include <sgx_spinlock.h>
  81. # endif
  82. # endif
  83. # if defined (__GNUC__) && defined (__i386__)
  84. # if !defined (_STLP_ATOMIC_INCREMENT)
  85. inline long _STLP_atomic_increment_gcc_x86(long volatile* p) {
  86. long result;
  87. __asm__ __volatile__
  88. ("lock; xaddl %1, %0;"
  89. :"=m" (*p), "=r" (result)
  90. :"m" (*p), "1" (1)
  91. :"cc");
  92. return result + 1;
  93. }
  94. # define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x))
  95. # endif
  96. # if !defined (_STLP_ATOMIC_DECREMENT)
  97. inline long _STLP_atomic_decrement_gcc_x86(long volatile* p) {
  98. long result;
  99. __asm__ __volatile__
  100. ("lock; xaddl %1, %0;"
  101. :"=m" (*p), "=r" (result)
  102. :"m" (*p), "1" (-1)
  103. :"cc");
  104. return result - 1;
  105. }
  106. # define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x))
  107. # endif
  108. typedef long __stl_atomic_t;
  109. # else
  110. typedef size_t __stl_atomic_t;
  111. # endif /* if defined(__GNUC__) && defined(__i386__) */
  112. # elif defined (_STLP_WIN32THREADS)
  113. # if !defined (_STLP_ATOMIC_INCREMENT)
  114. # if !defined (_STLP_NEW_PLATFORM_SDK)
  115. # define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement(__CONST_CAST(long*, __x))
  116. # define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement(__CONST_CAST(long*, __x))
  117. # define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange(__CONST_CAST(long*, __x), __y)
  118. # else
  119. # define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement(__x)
  120. # define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement(__x)
  121. # define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange(__x, __y)
  122. # endif
  123. # define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y) STLPInterlockedExchangePointer(__x, __y)
  124. # endif
  125. typedef long __stl_atomic_t;
  126. # elif defined (__DECC) || defined (__DECCXX)
  127. # include <machine/builtins.h>
  128. # define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
  129. # define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
  130. # define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
  131. typedef long __stl_atomic_t;
  132. # elif defined (_STLP_SPARC_SOLARIS_THREADS)
  133. typedef long __stl_atomic_t;
  134. # include <stl/_sparc_atomic.h>
  135. # elif defined (_STLP_UITHREADS)
  136. // this inclusion is potential hazard to bring up all sorts
  137. // of old-style headers. Let's assume vendor already know how
  138. // to deal with that.
  139. # ifndef _STLP_INTERNAL_CTIME
  140. # include <stl/_ctime.h>
  141. # endif
  142. # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
  143. using _STLP_VENDOR_CSTD::time_t;
  144. # endif
  145. # include <synch.h>
  146. # ifndef _STLP_INTERNAL_CSTDIO
  147. # include <stl/_cstdio.h>
  148. # endif
  149. # ifndef _STLP_INTERNAL_CWCHAR
  150. # include <stl/_cwchar.h>
  151. # endif
  152. typedef size_t __stl_atomic_t;
  153. # elif defined (_STLP_BETHREADS)
  154. # include <OS.h>
  155. # include <cassert>
  156. # include <stdio.h>
  157. # define _STLP_MUTEX_INITIALIZER = { 0 }
  158. typedef size_t __stl_atomic_t;
  159. # elif defined (_STLP_NWTHREADS)
  160. # include <nwthread.h>
  161. # include <nwsemaph.h>
  162. typedef size_t __stl_atomic_t;
  163. # elif defined(_STLP_OS2THREADS)
  164. # if defined (__GNUC__)
  165. # define INCL_DOSSEMAPHORES
  166. # include <os2.h>
  167. # else
  168. // This section serves to replace os2.h for VisualAge C++
  169. typedef unsigned long ULONG;
  170. # if !defined (__HEV__) /* INCL_SEMAPHORE may also define HEV */
  171. # define __HEV__
  172. typedef ULONG HEV;
  173. typedef HEV* PHEV;
  174. # endif
  175. typedef ULONG APIRET;
  176. typedef ULONG HMTX;
  177. typedef HMTX* PHMTX;
  178. typedef const char* PCSZ;
  179. typedef ULONG BOOL32;
  180. APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);
  181. APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
  182. APIRET _System DosReleaseMutexSem(HMTX hmtx);
  183. APIRET _System DosCloseMutexSem(HMTX hmtx);
  184. # define _STLP_MUTEX_INITIALIZER = { 0 }
  185. # endif /* GNUC */
  186. typedef size_t __stl_atomic_t;
  187. # else
  188. typedef size_t __stl_atomic_t;
  189. # endif
  190. #else
  191. /* no threads */
  192. # define _STLP_ATOMIC_INCREMENT(__x) ++(*__x)
  193. # define _STLP_ATOMIC_DECREMENT(__x) --(*__x)
  194. /* We do not grant other atomic operations as they are useless if STLport do not have
  195. * to be thread safe
  196. */
  197. typedef size_t __stl_atomic_t;
  198. #endif
  199. #if !defined (_STLP_MUTEX_INITIALIZER)
  200. # if defined(_STLP_ATOMIC_EXCHANGE)
  201. # define _STLP_MUTEX_INITIALIZER = { 0 }
  202. # elif defined(_STLP_UITHREADS)
  203. # define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
  204. # else
  205. # define _STLP_MUTEX_INITIALIZER
  206. # endif
  207. #endif
  208. _STLP_BEGIN_NAMESPACE
  209. #if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
  210. // Helper struct. This is a workaround for various compilers that don't
  211. // handle static variables in inline functions properly.
  212. template <int __inst>
  213. struct _STLP_mutex_spin {
  214. enum { __low_max = 30, __high_max = 1000 };
  215. // Low if we suspect uniprocessor, high for multiprocessor.
  216. static unsigned __max;
  217. static unsigned __last;
  218. static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
  219. static void _STLP_CALL _S_nsec_sleep(int __log_nsec, unsigned int& __iteration);
  220. };
  221. #endif // !_STLP_USE_PTHREAD_SPINLOCK
  222. // Locking class. Note that this class *does not have a constructor*.
  223. // It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
  224. // or dynamically, by explicitly calling the _M_initialize member function.
  225. // (This is similar to the ways that a pthreads mutex can be initialized.)
  226. // There are explicit member functions for acquiring and releasing the lock.
  227. // There is no constructor because static initialization is essential for
  228. // some uses, and only a class aggregate (see section 8.5.1 of the C++
  229. // standard) can be initialized that way. That means we must have no
  230. // constructors, no base classes, no virtual functions, and no private or
  231. // protected members.
  232. // For non-static cases, clients should use _STLP_mutex.
  233. struct _STLP_CLASS_DECLSPEC _STLP_mutex_base {
  234. #if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS)
  235. // It should be relatively easy to get this to work on any modern Unix.
  236. volatile __stl_atomic_t _M_lock;
  237. #endif
  238. #if defined (_STLP_THREADS)
  239. # if defined (_STLP_ATOMIC_EXCHANGE)
  240. inline void _M_initialize() { _M_lock = 0; }
  241. inline void _M_destroy() {}
  242. void _M_acquire_lock() {
  243. _STLP_mutex_spin<0>::_M_do_lock(&_M_lock);
  244. }
  245. inline void _M_release_lock() {
  246. volatile __stl_atomic_t* __lock = &_M_lock;
  247. # if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
  248. asm("sync");
  249. *__lock = 0;
  250. # elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \
  251. (defined (_ABIN32) || defined(_ABI64))
  252. __lock_release(__lock);
  253. # elif defined (_STLP_SPARC_SOLARIS_THREADS)
  254. # if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
  255. asm("membar #StoreStore ; membar #LoadStore");
  256. # else
  257. asm(" stbar ");
  258. # endif
  259. *__lock = 0;
  260. # else
  261. *__lock = 0;
  262. // This is not sufficient on many multiprocessors, since
  263. // writes to protected variables and the lock may be reordered.
  264. # endif
  265. }
  266. # elif defined (_STLP_PTHREADS)
  267. # if defined (_STLP_USE_PTHREAD_SPINLOCK)
  268. # if !defined (__OpenBSD__)
  269. pthread_spinlock_t _M_lock;
  270. inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
  271. inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
  272. // sorry, but no static initializer for pthread_spinlock_t;
  273. // this will not work for compilers that has problems with call
  274. // constructor of static object...
  275. // _STLP_mutex_base()
  276. // { pthread_spin_init( &_M_lock, 0 ); }
  277. // ~_STLP_mutex_base()
  278. // { pthread_spin_destroy( &_M_lock ); }
  279. inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock ); }
  280. inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
  281. # else // __OpenBSD__
  282. sgx_spinlock_t _M_lock;
  283. inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock ); }
  284. inline void _M_destroy() { }
  285. inline void _M_acquire_lock() { _SPINLOCK( &_M_lock ); }
  286. inline void _M_release_lock() { _SPINUNLOCK( &_M_lock ); }
  287. # endif // __OpenBSD__
  288. # else // !_STLP_USE_PTHREAD_SPINLOCK
  289. pthread_mutex_t _M_lock;
  290. inline void _M_initialize()
  291. { pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); }
  292. inline void _M_destroy()
  293. { pthread_mutex_destroy(&_M_lock); }
  294. inline void _M_acquire_lock() {
  295. # if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER)
  296. if (!_M_lock.field1) _M_initialize();
  297. # endif
  298. pthread_mutex_lock(&_M_lock);
  299. }
  300. inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
  301. # endif // !_STLP_USE_PTHREAD_SPINLOCK
  302. # elif defined (_STLP_UITHREADS)
  303. mutex_t _M_lock;
  304. inline void _M_initialize()
  305. { mutex_init(&_M_lock, 0, NULL); }
  306. inline void _M_destroy()
  307. { mutex_destroy(&_M_lock); }
  308. inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
  309. inline void _M_release_lock() { mutex_unlock(&_M_lock); }
  310. # elif defined (_STLP_OS2THREADS)
  311. HMTX _M_lock;
  312. inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
  313. inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
  314. inline void _M_acquire_lock() {
  315. if (!_M_lock) _M_initialize();
  316. DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
  317. }
  318. inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
  319. # elif defined (_STLP_BETHREADS)
  320. sem_id sem;
  321. inline void _M_initialize() {
  322. sem = create_sem(1, "STLPort");
  323. assert(sem > 0);
  324. }
  325. inline void _M_destroy() {
  326. int t = delete_sem(sem);
  327. assert(t == B_NO_ERROR);
  328. }
  329. inline void _M_acquire_lock();
  330. inline void _M_release_lock() {
  331. status_t t = release_sem(sem);
  332. assert(t == B_NO_ERROR);
  333. }
  334. # elif defined (_STLP_NWTHREADS)
  335. LONG _M_lock;
  336. inline void _M_initialize()
  337. { _M_lock = OpenLocalSemaphore(1); }
  338. inline void _M_destroy()
  339. { CloseLocalSemaphore(_M_lock); }
  340. inline void _M_acquire_lock()
  341. { WaitOnLocalSemaphore(_M_lock); }
  342. inline void _M_release_lock() { SignalLocalSemaphore(_M_lock); }
  343. # else //*ty 11/24/2001 - added configuration check
  344. # error "Unknown thread facility configuration"
  345. # endif
  346. #else /* No threads */
  347. inline void _M_initialize() {}
  348. inline void _M_destroy() {}
  349. inline void _M_acquire_lock() {}
  350. inline void _M_release_lock() {}
  351. #endif // _STLP_PTHREADS
  352. };
  353. // Locking class. The constructor initializes the lock, the destructor destroys it.
  354. // Well - behaving class, does not need static initializer
  355. class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_base {
  356. public:
  357. inline _STLP_mutex () { _M_initialize(); }
  358. inline ~_STLP_mutex () { _M_destroy(); }
  359. private:
  360. _STLP_mutex(const _STLP_mutex&);
  361. void operator=(const _STLP_mutex&);
  362. };
  363. // A locking class that uses _STLP_STATIC_MUTEX. The constructor takes
  364. // a reference to an _STLP_STATIC_MUTEX, and acquires a lock. The destructor
  365. // releases the lock.
  366. // It's not clear that this is exactly the right functionality.
  367. // It will probably change in the future.
  368. struct _STLP_CLASS_DECLSPEC _STLP_auto_lock {
  369. _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
  370. { _M_lock._M_acquire_lock(); }
  371. ~_STLP_auto_lock()
  372. { _M_lock._M_release_lock(); }
  373. private:
  374. _STLP_STATIC_MUTEX& _M_lock;
  375. void operator=(const _STLP_auto_lock&);
  376. _STLP_auto_lock(const _STLP_auto_lock&);
  377. };
  378. /*
  379. * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
  380. * _M_ref_count, and member functions _M_incr and _M_decr, which perform
  381. * atomic preincrement/predecrement. The constructor initializes
  382. * _M_ref_count.
  383. */
  384. class _STLP_CLASS_DECLSPEC _Refcount_Base {
  385. // The data member _M_ref_count
  386. #if defined (__DMC__)
  387. public:
  388. #endif
  389. _STLP_VOLATILE __stl_atomic_t _M_ref_count;
  390. #if defined (_STLP_THREADS) && \
  391. (!defined (_STLP_ATOMIC_INCREMENT) || !defined (_STLP_ATOMIC_DECREMENT) || \
  392. defined (_STLP_WIN95_LIKE))
  393. # define _STLP_USE_MUTEX
  394. _STLP_mutex _M_mutex;
  395. #endif
  396. public:
  397. // Constructor
  398. _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
  399. #if defined (__BORLANDC__)
  400. ~_Refcount_Base(){};
  401. #endif
  402. // _M_incr and _M_decr
  403. #if defined (_STLP_THREADS)
  404. # if !defined (_STLP_USE_MUTEX)
  405. __stl_atomic_t _M_incr() { return _STLP_ATOMIC_INCREMENT(&_M_ref_count); }
  406. __stl_atomic_t _M_decr() { return _STLP_ATOMIC_DECREMENT(&_M_ref_count); }
  407. # else
  408. # undef _STLP_USE_MUTEX
  409. __stl_atomic_t _M_incr() {
  410. _STLP_auto_lock l(_M_mutex);
  411. return ++_M_ref_count;
  412. }
  413. __stl_atomic_t _M_decr() {
  414. _STLP_auto_lock l(_M_mutex);
  415. return --_M_ref_count;
  416. }
  417. # endif
  418. #else /* No threads */
  419. __stl_atomic_t _M_incr() { return ++_M_ref_count; }
  420. __stl_atomic_t _M_decr() { return --_M_ref_count; }
  421. #endif
  422. };
  423. /* Atomic swap on __stl_atomic_t
  424. * This is guaranteed to behave as though it were atomic only if all
  425. * possibly concurrent updates use _Atomic_swap.
  426. * In some cases the operation is emulated with a lock.
  427. * Idem for _Atomic_swap_ptr
  428. */
  429. /* Helper struct to handle following cases:
  430. * - on platforms where sizeof(__stl_atomic_t) == sizeof(void*) atomic
  431. * exchange can be done on pointers
  432. * - on platform without atomic operation swap is done in a critical section,
  433. * portable but inefficient.
  434. */
  435. template <int __use_ptr_atomic_swap>
  436. class _Atomic_swap_struct {
  437. public:
  438. #if defined (_STLP_THREADS) && \
  439. !defined (_STLP_ATOMIC_EXCHANGE) && \
  440. (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
  441. defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
  442. # define _STLP_USE_ATOMIC_SWAP_MUTEX
  443. static _STLP_STATIC_MUTEX _S_swap_lock;
  444. #endif
  445. static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
  446. #if defined (_STLP_THREADS)
  447. # if defined (_STLP_ATOMIC_EXCHANGE)
  448. return _STLP_ATOMIC_EXCHANGE(__p, __q);
  449. # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
  450. _S_swap_lock._M_acquire_lock();
  451. __stl_atomic_t __result = *__p;
  452. *__p = __q;
  453. _S_swap_lock._M_release_lock();
  454. return __result;
  455. # else
  456. # error Missing atomic swap implementation
  457. # endif
  458. #else
  459. /* no threads */
  460. __stl_atomic_t __result = *__p;
  461. *__p = __q;
  462. return __result;
  463. #endif // _STLP_THREADS
  464. }
  465. static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
  466. #if defined (_STLP_THREADS)
  467. # if defined (_STLP_ATOMIC_EXCHANGE_PTR)
  468. return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
  469. # elif defined (_STLP_ATOMIC_EXCHANGE)
  470. _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
  471. return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
  472. __REINTERPRET_CAST(__stl_atomic_t, __q))
  473. );
  474. # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
  475. _S_swap_lock._M_acquire_lock();
  476. void *__result = *__p;
  477. *__p = __q;
  478. _S_swap_lock._M_release_lock();
  479. return __result;
  480. # else
  481. # error Missing pointer atomic swap implementation
  482. # endif
  483. #else
  484. /* no thread */
  485. void *__result = *__p;
  486. *__p = __q;
  487. return __result;
  488. #endif
  489. }
  490. };
  491. _STLP_TEMPLATE_NULL
  492. class _Atomic_swap_struct<0> {
  493. public:
  494. #if defined (_STLP_THREADS) && \
  495. (!defined (_STLP_ATOMIC_EXCHANGE) || !defined (_STLP_ATOMIC_EXCHANGE_PTR)) && \
  496. (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
  497. defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
  498. # define _STLP_USE_ATOMIC_SWAP_MUTEX
  499. static _STLP_STATIC_MUTEX _S_swap_lock;
  500. #endif
  501. static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
  502. #if defined (_STLP_THREADS)
  503. # if defined (_STLP_ATOMIC_EXCHANGE)
  504. return _STLP_ATOMIC_EXCHANGE(__p, __q);
  505. # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
  506. /* This should be portable, but performance is expected
  507. * to be quite awful. This really needs platform specific
  508. * code.
  509. */
  510. _S_swap_lock._M_acquire_lock();
  511. __stl_atomic_t __result = *__p;
  512. *__p = __q;
  513. _S_swap_lock._M_release_lock();
  514. return __result;
  515. # else
  516. # error Missing atomic swap implementation
  517. # endif
  518. #else
  519. /* no threads */
  520. __stl_atomic_t __result = *__p;
  521. *__p = __q;
  522. return __result;
  523. #endif // _STLP_THREADS
  524. }
  525. static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
  526. #if defined (_STLP_THREADS)
  527. # if defined (_STLP_ATOMIC_EXCHANGE_PTR)
  528. return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
  529. # elif defined (_STLP_ATOMIC_EXCHANGE)
  530. _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
  531. return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
  532. __REINTERPRET_CAST(__stl_atomic_t, __q))
  533. );
  534. # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
  535. _S_swap_lock._M_acquire_lock();
  536. void *__result = *__p;
  537. *__p = __q;
  538. _S_swap_lock._M_release_lock();
  539. return __result;
  540. # else
  541. # error Missing pointer atomic swap implementation
  542. # endif
  543. #else
  544. /* no thread */
  545. void *__result = *__p;
  546. *__p = __q;
  547. return __result;
  548. #endif
  549. }
  550. };
  551. #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
  552. # pragma warning (push)
  553. # pragma warning (disable : 4189) //__use_ptr_atomic_swap initialized but not used
  554. #endif
  555. inline __stl_atomic_t _STLP_CALL _Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p, __stl_atomic_t __q) {
  556. const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
  557. return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap(__p, __q);
  558. }
  559. inline void* _STLP_CALL _Atomic_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
  560. const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
  561. return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap_ptr(__p, __q);
  562. }
  563. #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
  564. # pragma warning (pop)
  565. #endif
  566. #if defined (_STLP_BETHREADS)
  567. template <int __inst>
  568. struct _STLP_beos_static_lock_data {
  569. static bool is_init;
  570. struct mutex_t : public _STLP_mutex {
  571. mutex_t()
  572. { _STLP_beos_static_lock_data<0>::is_init = true; }
  573. ~mutex_t()
  574. { _STLP_beos_static_lock_data<0>::is_init = false; }
  575. };
  576. static mutex_t mut;
  577. };
  578. template <int __inst>
  579. bool _STLP_beos_static_lock_data<__inst>::is_init = false;
  580. template <int __inst>
  581. typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
  582. inline void _STLP_mutex_base::_M_acquire_lock() {
  583. if (sem == 0) {
  584. // we need to initialise on demand here
  585. // to prevent race conditions use our global
  586. // mutex if it's available:
  587. if (_STLP_beos_static_lock_data<0>::is_init) {
  588. _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
  589. if (sem == 0) _M_initialize();
  590. }
  591. else {
  592. // no lock available, we must still be
  593. // in startup code, THERE MUST BE ONE THREAD
  594. // ONLY active at this point.
  595. _M_initialize();
  596. }
  597. }
  598. status_t t;
  599. t = acquire_sem(sem);
  600. assert(t == B_NO_ERROR);
  601. }
  602. #endif
  603. _STLP_END_NAMESPACE
  604. #if !defined (_STLP_LINK_TIME_INSTANTIATION)
  605. # include <stl/_threads.c>
  606. #endif
  607. #endif /* _STLP_INTERNAL_THREADS_H */
  608. // Local Variables:
  609. // mode:C++
  610. // End: