allocators.cpp 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121
  1. /*
  2. *
  3. * Copyright (c) 1996,1997
  4. * Silicon Graphics Computer Systems, Inc.
  5. *
  6. * Copyright (c) 1997
  7. * Moscow Center for SPARC Technology
  8. *
  9. * Copyright (c) 1999
  10. * Boris Fomitchev
  11. *
  12. * This material is provided "as is", with absolutely no warranty expressed
  13. * or implied. Any use is at your own risk.
  14. *
  15. * Permission to use or copy this software for any purpose is hereby granted
  16. * without fee, provided the above notices are retained on all copies.
  17. * Permission to modify the code and to distribute modified code is granted,
  18. * provided the above notices are retained, and a notice that the code was
  19. * modified is included with the above copyright notice.
  20. *
  21. */
  22. #include "stlport_prefix.h"
  23. #include <memory>
  24. #if defined (__GNUC__) && (defined (__CYGWIN__) || defined (__MINGW32__))
  25. # include <malloc.h>
  26. #endif
  27. #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
  28. # include <pthread_alloc>
  29. # include <cerrno>
  30. #endif
  31. #include <stl/_threads.h>
  32. #include "lock_free_slist.h"
  33. #if defined (__WATCOMC__)
  34. # pragma warning 13 9
  35. # pragma warning 367 9
  36. # pragma warning 368 9
  37. #endif
  38. #if defined (_STLP_SGI_THREADS)
  39. // We test whether threads are in use before locking.
  40. // Perhaps this should be moved into stl_threads.h, but that
  41. // probably makes it harder to avoid the procedure call when
  42. // it isn't needed.
  43. extern "C" {
  44. extern int __us_rsthread_malloc;
  45. }
  46. #endif
  47. // Specialised debug form of new operator which does not provide "false"
  48. // memory leaks when run with debug CRT libraries.
  49. #if defined (_STLP_MSVC) && (_STLP_MSVC >= 1020 && defined (_STLP_DEBUG_ALLOC)) && !defined (_STLP_WCE)
  50. # include <crtdbg.h>
  51. inline char* __stlp_new_chunk(size_t __bytes) {
  52. void *__chunk = _STLP_CHECK_NULL_ALLOC(::operator new(__bytes, __FILE__, __LINE__));
  53. return __STATIC_CAST(char*, __chunk);
  54. }
  55. inline void __stlp_delete_chunck(void* __p) { ::operator delete(__p, __FILE__, __LINE__); }
  56. #else
  57. # ifdef _STLP_NODE_ALLOC_USE_MALLOC
  58. # include <cstdlib>
  59. inline char* __stlp_new_chunk(size_t __bytes) {
  60. // do not use _STLP_CHECK_NULL_ALLOC, this macro is dedicated to new operator.
  61. void *__chunk = _STLP_VENDOR_CSTD::malloc(__bytes);
  62. if (__chunk == 0) {
  63. _STLP_THROW_BAD_ALLOC;
  64. }
  65. return __STATIC_CAST(char*, __chunk);
  66. }
  67. inline void __stlp_delete_chunck(void* __p) { _STLP_VENDOR_CSTD::free(__p); }
  68. # else
  69. inline char* __stlp_new_chunk(size_t __bytes)
  70. { return __STATIC_CAST(char*, _STLP_STD::__stl_new(__bytes)); }
  71. inline void __stlp_delete_chunck(void* __p) { _STLP_STD::__stl_delete(__p); }
  72. # endif
  73. #endif
  74. /* This is an additional atomic operations to the ones already defined in
  75. * stl/_threads.h, platform should try to support it to improve performance.
  76. * __add_atomic_t _STLP_ATOMIC_ADD(volatile __add_atomic_t* __target, __add_atomic_t __val) :
  77. * does *__target = *__target + __val and returns the old *__target value */
  78. typedef long __add_atomic_t;
  79. typedef unsigned long __uadd_atomic_t;
  80. #if defined (__GNUC__) && defined (__i386__)
  81. inline long _STLP_atomic_add_gcc_x86(long volatile* p, long addend) {
  82. long result;
  83. __asm__ __volatile__
  84. ("lock; xaddl %1, %0;"
  85. :"=m" (*p), "=r" (result)
  86. :"m" (*p), "1" (addend)
  87. :"cc");
  88. return result + addend;
  89. }
  90. # define _STLP_ATOMIC_ADD(__dst, __val) _STLP_atomic_add_gcc_x86(__dst, __val)
  91. #elif defined (_STLP_WIN32THREADS)
  92. // The Win32 API function InterlockedExchangeAdd is not available on Windows 95.
  93. # if !defined (_STLP_WIN95_LIKE)
  94. # if defined (_STLP_NEW_PLATFORM_SDK)
  95. # define _STLP_ATOMIC_ADD(__dst, __val) InterlockedExchangeAdd(__dst, __val)
  96. # else
  97. # define _STLP_ATOMIC_ADD(__dst, __val) InterlockedExchangeAdd(__CONST_CAST(__add_atomic_t*, __dst), __val)
  98. # endif
  99. # endif
  100. #endif
  101. #if defined (__OS400__)
  102. // dums 02/05/2007: is it really necessary ?
  103. enum { _ALIGN = 16, _ALIGN_SHIFT = 4 };
  104. #else
  105. enum { _ALIGN = 2 * sizeof(void*), _ALIGN_SHIFT = 2 + sizeof(void*) / 4 };
  106. #endif
  107. #define _S_FREELIST_INDEX(__bytes) ((__bytes - size_t(1)) >> (int)_ALIGN_SHIFT)
  108. _STLP_BEGIN_NAMESPACE
  109. // malloc_alloc out-of-memory handling
  110. static __oom_handler_type __oom_handler = __STATIC_CAST(__oom_handler_type, 0);
  111. #ifdef _STLP_THREADS
  112. _STLP_mutex __oom_handler_lock;
  113. #endif
  114. void* _STLP_CALL __malloc_alloc::allocate(size_t __n)
  115. {
  116. void *__result = malloc(__n);
  117. if ( 0 == __result ) {
  118. __oom_handler_type __my_malloc_handler;
  119. for (;;) {
  120. {
  121. #ifdef _STLP_THREADS
  122. _STLP_auto_lock _l( __oom_handler_lock );
  123. #endif
  124. __my_malloc_handler = __oom_handler;
  125. }
  126. if ( 0 == __my_malloc_handler) {
  127. _STLP_THROW_BAD_ALLOC;
  128. }
  129. (*__my_malloc_handler)();
  130. __result = malloc(__n);
  131. if ( __result )
  132. return __result;
  133. }
  134. }
  135. return __result;
  136. }
  137. __oom_handler_type _STLP_CALL __malloc_alloc::set_malloc_handler(__oom_handler_type __f)
  138. {
  139. #ifdef _STLP_THREADS
  140. _STLP_auto_lock _l( __oom_handler_lock );
  141. #endif
  142. __oom_handler_type __old = __oom_handler;
  143. __oom_handler = __f;
  144. return __old;
  145. }
  146. // *******************************************************
  147. // Default node allocator.
  148. // With a reasonable compiler, this should be roughly as fast as the
  149. // original STL class-specific allocators, but with less fragmentation.
  150. //
  151. // Important implementation properties:
  152. // 1. If the client request an object of size > _MAX_BYTES, the resulting
  153. // object will be obtained directly from malloc.
  154. // 2. In all other cases, we allocate an object of size exactly
  155. // _S_round_up(requested_size). Thus the client has enough size
  156. // information that we can return the object to the proper free list
  157. // without permanently losing part of the object.
  158. //
  159. #define _STLP_NFREELISTS 16
  160. #if defined (_STLP_LEAKS_PEDANTIC) && defined (_STLP_USE_DYNAMIC_LIB)
  161. /*
  162. * We can only do cleanup of the node allocator memory pool if we are
  163. * sure that the STLport library is used as a shared one as it guaranties
  164. * the unicity of the node allocator instance. Without that guaranty node
  165. * allocator instances might exchange memory blocks making the implementation
  166. * of a cleaning process much more complicated.
  167. */
  168. # define _STLP_DO_CLEAN_NODE_ALLOC
  169. #endif
  170. /* When STLport is used without multi threaded safety we use the node allocator
  171. * implementation with locks as locks becomes no-op. The lock free implementation
  172. * always use system specific atomic operations which are slower than 'normal'
  173. * ones.
  174. */
  175. #if defined (_STLP_THREADS) && \
  176. defined (_STLP_HAS_ATOMIC_FREELIST) && defined (_STLP_ATOMIC_ADD)
  177. /*
  178. * We have an implementation of the atomic freelist (_STLP_atomic_freelist)
  179. * for this architecture and compiler. That means we can use the non-blocking
  180. * implementation of the node-allocation engine.*/
  181. # define _STLP_USE_LOCK_FREE_IMPLEMENTATION
  182. #endif
  183. #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  184. # if defined (_STLP_THREADS)
  185. class _Node_Alloc_Lock {
  186. static _STLP_STATIC_MUTEX& _S_Mutex() {
  187. static _STLP_STATIC_MUTEX mutex _STLP_MUTEX_INITIALIZER;
  188. return mutex;
  189. }
  190. public:
  191. _Node_Alloc_Lock() {
  192. # if defined (_STLP_SGI_THREADS)
  193. if (__us_rsthread_malloc)
  194. # endif
  195. _S_Mutex()._M_acquire_lock();
  196. }
  197. ~_Node_Alloc_Lock() {
  198. # if defined (_STLP_SGI_THREADS)
  199. if (__us_rsthread_malloc)
  200. # endif
  201. _S_Mutex()._M_release_lock();
  202. }
  203. };
  204. # else
  205. class _Node_Alloc_Lock {
  206. public:
  207. _Node_Alloc_Lock() { }
  208. ~_Node_Alloc_Lock() { }
  209. };
  210. # endif
  211. struct _Node_alloc_obj {
  212. _Node_alloc_obj * _M_next;
  213. };
  214. #endif
  215. class __node_alloc_impl {
  216. static inline size_t _STLP_CALL _S_round_up(size_t __bytes)
  217. { return (((__bytes) + (size_t)_ALIGN-1) & ~((size_t)_ALIGN - 1)); }
  218. #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  219. typedef _STLP_atomic_freelist::item _Obj;
  220. typedef _STLP_atomic_freelist _Freelist;
  221. typedef _STLP_atomic_freelist _ChunkList;
  222. // Header of blocks of memory that have been allocated as part of
  223. // a larger chunk but have not yet been chopped up into nodes.
  224. struct _FreeBlockHeader : public _STLP_atomic_freelist::item {
  225. char* _M_end; // pointer to end of free memory
  226. };
  227. #else
  228. typedef _Node_alloc_obj _Obj;
  229. typedef _Obj* _STLP_VOLATILE _Freelist;
  230. typedef _Obj* _ChunkList;
  231. #endif
  232. private:
  233. // Returns an object of size __n, and optionally adds to size __n free list.
  234. static _Obj* _S_refill(size_t __n);
  235. // Allocates a chunk for nobjs of size __p_size. nobjs may be reduced
  236. // if it is inconvenient to allocate the requested number.
  237. static char* _S_chunk_alloc(size_t __p_size, int& __nobjs);
  238. // Chunk allocation state.
  239. static _Freelist _S_free_list[_STLP_NFREELISTS];
  240. // Amount of total allocated memory
  241. #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  242. static _STLP_VOLATILE __add_atomic_t _S_heap_size;
  243. #else
  244. static size_t _S_heap_size;
  245. #endif
  246. #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  247. // List of blocks of free memory
  248. static _STLP_atomic_freelist _S_free_mem_blocks;
  249. #else
  250. // Start of the current free memory buffer
  251. static char* _S_start_free;
  252. // End of the current free memory buffer
  253. static char* _S_end_free;
  254. #endif
  255. #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  256. public:
  257. // Methods to report alloc/dealloc calls to the counter system.
  258. # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  259. typedef _STLP_VOLATILE __stl_atomic_t _AllocCounter;
  260. # else
  261. typedef __stl_atomic_t _AllocCounter;
  262. # endif
  263. static _AllocCounter& _STLP_CALL _S_alloc_counter();
  264. static void _S_alloc_call();
  265. static void _S_dealloc_call();
  266. private:
  267. // Free all the allocated chuncks of memory
  268. static void _S_chunk_dealloc();
  269. // Beginning of the linked list of allocated chunks of memory
  270. static _ChunkList _S_chunks;
  271. #endif /* _STLP_DO_CLEAN_NODE_ALLOC */
  272. public:
  273. /* __n must be > 0 */
  274. static void* _M_allocate(size_t& __n);
  275. /* __p may not be 0 */
  276. static void _M_deallocate(void *__p, size_t __n);
  277. };
  278. #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  279. void* __node_alloc_impl::_M_allocate(size_t& __n) {
  280. __n = _S_round_up(__n);
  281. _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
  282. _Obj *__r;
  283. // Acquire the lock here with a constructor call.
  284. // This ensures that it is released in exit or during stack
  285. // unwinding.
  286. _Node_Alloc_Lock __lock_instance;
  287. if ( (__r = *__my_free_list) != 0 ) {
  288. *__my_free_list = __r->_M_next;
  289. } else {
  290. __r = _S_refill(__n);
  291. }
  292. # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  293. _S_alloc_call();
  294. # endif
  295. // lock is released here
  296. return __r;
  297. }
  298. void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
  299. _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
  300. _Obj * __pobj = __STATIC_CAST(_Obj*, __p);
  301. // acquire lock
  302. _Node_Alloc_Lock __lock_instance;
  303. __pobj->_M_next = *__my_free_list;
  304. *__my_free_list = __pobj;
  305. # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  306. _S_dealloc_call();
  307. # endif
  308. // lock is released here
  309. }
  310. # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  311. # define _STLP_OFFSET sizeof(_Obj)
  312. # else
  313. # define _STLP_OFFSET 0
  314. # endif
  315. /* We allocate memory in large chunks in order to avoid fragmenting */
  316. /* the malloc heap too much. */
  317. /* We assume that size is properly aligned. */
  318. /* We hold the allocation lock. */
  319. char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
  320. char* __result;
  321. size_t __total_bytes = _p_size * __nobjs;
  322. size_t __bytes_left = _S_end_free - _S_start_free;
  323. if (__bytes_left > 0) {
  324. if (__bytes_left >= __total_bytes) {
  325. __result = _S_start_free;
  326. _S_start_free += __total_bytes;
  327. return __result;
  328. }
  329. if (__bytes_left >= _p_size) {
  330. __nobjs = (int)(__bytes_left / _p_size);
  331. __total_bytes = _p_size * __nobjs;
  332. __result = _S_start_free;
  333. _S_start_free += __total_bytes;
  334. return __result;
  335. }
  336. // Try to make use of the left-over piece.
  337. _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__bytes_left);
  338. __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = *__my_free_list;
  339. *__my_free_list = __REINTERPRET_CAST(_Obj*, _S_start_free);
  340. _S_start_free = _S_end_free = 0;
  341. }
  342. size_t __bytes_to_get = 2 * __total_bytes + _S_round_up(_S_heap_size) + _STLP_OFFSET;
  343. _STLP_TRY {
  344. _S_start_free = __stlp_new_chunk(__bytes_to_get);
  345. }
  346. #if defined (_STLP_USE_EXCEPTIONS)
  347. catch (const _STLP_STD::bad_alloc&) {
  348. _Obj* _STLP_VOLATILE* __my_free_list;
  349. _Obj* __p;
  350. // Try to do with what we have. That can't hurt.
  351. // We do not try smaller requests, since that tends
  352. // to result in disaster on multi-process machines.
  353. for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
  354. __my_free_list = _S_free_list + _S_FREELIST_INDEX(__i);
  355. __p = *__my_free_list;
  356. if (0 != __p) {
  357. *__my_free_list = __p -> _M_next;
  358. _S_start_free = __REINTERPRET_CAST(char*, __p);
  359. _S_end_free = _S_start_free + __i;
  360. return _S_chunk_alloc(_p_size, __nobjs);
  361. // Any leftover piece will eventually make it to the
  362. // right free list.
  363. }
  364. }
  365. __bytes_to_get = __total_bytes + _STLP_OFFSET;
  366. _S_start_free = __stlp_new_chunk(__bytes_to_get);
  367. }
  368. #endif
  369. _S_heap_size += __bytes_to_get >> 4;
  370. # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  371. __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = _S_chunks;
  372. _S_chunks = __REINTERPRET_CAST(_Obj*, _S_start_free);
  373. # endif
  374. _S_end_free = _S_start_free + __bytes_to_get;
  375. _S_start_free += _STLP_OFFSET;
  376. return _S_chunk_alloc(_p_size, __nobjs);
  377. }
  378. /* Returns an object of size __n, and optionally adds to size __n free list.*/
  379. /* We assume that __n is properly aligned. */
  380. /* We hold the allocation lock. */
  381. _Node_alloc_obj* __node_alloc_impl::_S_refill(size_t __n) {
  382. int __nobjs = 20;
  383. char* __chunk = _S_chunk_alloc(__n, __nobjs);
  384. if (1 == __nobjs) return __REINTERPRET_CAST(_Obj*, __chunk);
  385. _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
  386. _Obj* __result;
  387. _Obj* __current_obj;
  388. _Obj* __next_obj;
  389. /* Build free list in chunk */
  390. __result = __REINTERPRET_CAST(_Obj*, __chunk);
  391. *__my_free_list = __next_obj = __REINTERPRET_CAST(_Obj*, __chunk + __n);
  392. for (--__nobjs; --__nobjs; ) {
  393. __current_obj = __next_obj;
  394. __next_obj = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __next_obj) + __n);
  395. __current_obj->_M_next = __next_obj;
  396. }
  397. __next_obj->_M_next = 0;
  398. return __result;
  399. }
  400. # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  401. void __node_alloc_impl::_S_alloc_call()
  402. { ++_S_alloc_counter(); }
  403. void __node_alloc_impl::_S_dealloc_call() {
  404. __stl_atomic_t &counter = _S_alloc_counter();
  405. if (--counter == 0)
  406. { _S_chunk_dealloc(); }
  407. }
  408. /* We deallocate all the memory chunks */
  409. void __node_alloc_impl::_S_chunk_dealloc() {
  410. _Obj *__pcur = _S_chunks, *__pnext;
  411. while (__pcur != 0) {
  412. __pnext = __pcur->_M_next;
  413. __stlp_delete_chunck(__pcur);
  414. __pcur = __pnext;
  415. }
  416. _S_chunks = 0;
  417. _S_start_free = _S_end_free = 0;
  418. _S_heap_size = 0;
  419. memset(__REINTERPRET_CAST(char*, __CONST_CAST(_Obj**, &_S_free_list[0])), 0, _STLP_NFREELISTS * sizeof(_Obj*));
  420. }
  421. # endif
  422. #else
  423. void* __node_alloc_impl::_M_allocate(size_t& __n) {
  424. __n = _S_round_up(__n);
  425. _Obj* __r = _S_free_list[_S_FREELIST_INDEX(__n)].pop();
  426. if (__r == 0)
  427. { __r = _S_refill(__n); }
  428. # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  429. _S_alloc_call();
  430. # endif
  431. return __r;
  432. }
  433. void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
  434. _S_free_list[_S_FREELIST_INDEX(__n)].push(__STATIC_CAST(_Obj*, __p));
  435. # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  436. _S_dealloc_call();
  437. # endif
  438. }
  439. /* Returns an object of size __n, and optionally adds additional ones to */
  440. /* freelist of objects of size __n. */
  441. /* We assume that __n is properly aligned. */
  442. __node_alloc_impl::_Obj* __node_alloc_impl::_S_refill(size_t __n) {
  443. int __nobjs = 20;
  444. char* __chunk = _S_chunk_alloc(__n, __nobjs);
  445. if (__nobjs <= 1)
  446. return __REINTERPRET_CAST(_Obj*, __chunk);
  447. // Push all new nodes (minus first one) onto freelist
  448. _Obj* __result = __REINTERPRET_CAST(_Obj*, __chunk);
  449. _Obj* __cur_item = __result;
  450. _Freelist* __my_freelist = _S_free_list + _S_FREELIST_INDEX(__n);
  451. for (--__nobjs; __nobjs != 0; --__nobjs) {
  452. __cur_item = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __cur_item) + __n);
  453. __my_freelist->push(__cur_item);
  454. }
  455. return __result;
  456. }
  457. # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  458. # define _STLP_OFFSET _ALIGN
  459. # else
  460. # define _STLP_OFFSET 0
  461. # endif
  462. /* We allocate memory in large chunks in order to avoid fragmenting */
  463. /* the malloc heap too much. */
  464. /* We assume that size is properly aligned. */
  465. char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
  466. # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  467. //We are going to add a small memory block to keep all the allocated blocks
  468. //address, we need to do so respecting the memory alignment. The following
  469. //static assert checks that the reserved block is big enough to store a pointer.
  470. _STLP_STATIC_ASSERT(sizeof(_Obj) <= _ALIGN)
  471. # endif
  472. char* __result = 0;
  473. __add_atomic_t __total_bytes = __STATIC_CAST(__add_atomic_t, _p_size) * __nobjs;
  474. _FreeBlockHeader* __block = __STATIC_CAST(_FreeBlockHeader*, _S_free_mem_blocks.pop());
  475. if (__block != 0) {
  476. // We checked a block out and can now mess with it with impugnity.
  477. // We'll put the remainder back into the list if we're done with it below.
  478. char* __buf_start = __REINTERPRET_CAST(char*, __block);
  479. __add_atomic_t __bytes_left = __block->_M_end - __buf_start;
  480. if ((__bytes_left < __total_bytes) && (__bytes_left >= __STATIC_CAST(__add_atomic_t, _p_size))) {
  481. // There's enough left for at least one object, but not as much as we wanted
  482. __result = __buf_start;
  483. __nobjs = (int)(__bytes_left/_p_size);
  484. __total_bytes = __STATIC_CAST(__add_atomic_t, _p_size) * __nobjs;
  485. __bytes_left -= __total_bytes;
  486. __buf_start += __total_bytes;
  487. }
  488. else if (__bytes_left >= __total_bytes) {
  489. // The block has enough left to satisfy all that was asked for
  490. __result = __buf_start;
  491. __bytes_left -= __total_bytes;
  492. __buf_start += __total_bytes;
  493. }
  494. if (__bytes_left != 0) {
  495. // There is still some memory left over in block after we satisfied our request.
  496. if ((__result != 0) && (__bytes_left >= (__add_atomic_t)sizeof(_FreeBlockHeader))) {
  497. // We were able to allocate at least one object and there is still enough
  498. // left to put remainder back into list.
  499. _FreeBlockHeader* __newblock = __REINTERPRET_CAST(_FreeBlockHeader*, __buf_start);
  500. __newblock->_M_end = __block->_M_end;
  501. _S_free_mem_blocks.push(__newblock);
  502. }
  503. else {
  504. // We were not able to allocate enough for at least one object.
  505. // Shove into freelist of nearest (rounded-down!) size.
  506. size_t __rounded_down = _S_round_up(__bytes_left + 1) - (size_t)_ALIGN;
  507. if (__rounded_down > 0)
  508. _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push((_Obj*)__buf_start);
  509. }
  510. }
  511. if (__result != 0)
  512. return __result;
  513. }
  514. // We couldn't satisfy it from the list of free blocks, get new memory.
  515. __add_atomic_t __bytes_to_get = 2 * __total_bytes +
  516. __STATIC_CAST(__add_atomic_t,
  517. _S_round_up(__STATIC_CAST(__uadd_atomic_t, _STLP_ATOMIC_ADD(&_S_heap_size, 0)))) +
  518. _STLP_OFFSET;
  519. _STLP_TRY {
  520. __result = __stlp_new_chunk(__bytes_to_get);
  521. }
  522. #if defined (_STLP_USE_EXCEPTIONS)
  523. catch (const bad_alloc&) {
  524. // Allocation failed; try to canibalize from freelist of a larger object size.
  525. for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
  526. _Obj* __p = _S_free_list[_S_FREELIST_INDEX(__i)].pop();
  527. if (0 != __p) {
  528. if (__i < sizeof(_FreeBlockHeader)) {
  529. // Not enough to put into list of free blocks, divvy it up here.
  530. // Use as much as possible for this request and shove remainder into freelist.
  531. __nobjs = (int)(__i/_p_size);
  532. __total_bytes = __nobjs * __STATIC_CAST(__add_atomic_t, _p_size);
  533. size_t __bytes_left = __i - __total_bytes;
  534. size_t __rounded_down = _S_round_up(__bytes_left+1) - (size_t)_ALIGN;
  535. if (__rounded_down > 0) {
  536. _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push(__REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __p) + __total_bytes));
  537. }
  538. return __REINTERPRET_CAST(char*, __p);
  539. }
  540. else {
  541. // Add node to list of available blocks and recursively allocate from it.
  542. _FreeBlockHeader* __newblock = (_FreeBlockHeader*)__p;
  543. __newblock->_M_end = __REINTERPRET_CAST(char*, __p) + __i;
  544. _S_free_mem_blocks.push(__newblock);
  545. return _S_chunk_alloc(_p_size, __nobjs);
  546. }
  547. }
  548. }
  549. // We were not able to find something in a freelist, try to allocate a smaller amount.
  550. __bytes_to_get = __total_bytes + _STLP_OFFSET;
  551. __result = __stlp_new_chunk(__bytes_to_get);
  552. // This should either throw an exception or remedy the situation.
  553. // Thus we assume it succeeded.
  554. }
  555. #endif
  556. // Alignment check
  557. _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0),
  558. _StlMsg_DBA_DELETED_TWICE)
  559. _STLP_ATOMIC_ADD(&_S_heap_size, __bytes_to_get >> 4);
  560. # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  561. // We have to track the allocated memory chunks for release on exit.
  562. _S_chunks.push(__REINTERPRET_CAST(_Obj*, __result));
  563. __result += _ALIGN;
  564. __bytes_to_get -= _ALIGN;
  565. # endif
  566. if (__bytes_to_get > __total_bytes) {
  567. // Push excess memory allocated in this chunk into list of free memory blocks
  568. _FreeBlockHeader* __freeblock = __REINTERPRET_CAST(_FreeBlockHeader*, __result + __total_bytes);
  569. __freeblock->_M_end = __result + __bytes_to_get;
  570. _S_free_mem_blocks.push(__freeblock);
  571. }
  572. return __result;
  573. }
  574. # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  575. void __node_alloc_impl::_S_alloc_call()
  576. { _STLP_ATOMIC_INCREMENT(&_S_alloc_counter()); }
  577. void __node_alloc_impl::_S_dealloc_call() {
  578. _STLP_VOLATILE __stl_atomic_t *pcounter = &_S_alloc_counter();
  579. if (_STLP_ATOMIC_DECREMENT(pcounter) == 0)
  580. _S_chunk_dealloc();
  581. }
  582. /* We deallocate all the memory chunks */
  583. void __node_alloc_impl::_S_chunk_dealloc() {
  584. // Note: The _Node_alloc_helper class ensures that this function
  585. // will only be called when the (shared) library is unloaded or the
  586. // process is shutdown. It's thus not possible that another thread
  587. // is currently trying to allocate a node (we're not thread-safe here).
  588. //
  589. // Clear the free blocks and all freelistst. This makes sure that if
  590. // for some reason more memory is allocated again during shutdown
  591. // (it'd also be really nasty to leave references to deallocated memory).
  592. _S_free_mem_blocks.clear();
  593. _S_heap_size = 0;
  594. for (size_t __i = 0; __i < _STLP_NFREELISTS; ++__i) {
  595. _S_free_list[__i].clear();
  596. }
  597. // Detach list of chunks and free them all
  598. _Obj* __chunk = _S_chunks.clear();
  599. while (__chunk != 0) {
  600. _Obj* __next = __chunk->_M_next;
  601. __stlp_delete_chunck(__chunk);
  602. __chunk = __next;
  603. }
  604. }
  605. # endif
  606. #endif
  607. #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  608. struct __node_alloc_cleaner {
  609. ~__node_alloc_cleaner()
  610. { __node_alloc_impl::_S_dealloc_call(); }
  611. };
  612. # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  613. _STLP_VOLATILE __stl_atomic_t& _STLP_CALL
  614. # else
  615. __stl_atomic_t& _STLP_CALL
  616. # endif
  617. __node_alloc_impl::_S_alloc_counter() {
  618. static _AllocCounter _S_counter = 1;
  619. static __node_alloc_cleaner _S_node_alloc_cleaner;
  620. return _S_counter;
  621. }
  622. #endif
  623. #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  624. _Node_alloc_obj * _STLP_VOLATILE
  625. __node_alloc_impl::_S_free_list[_STLP_NFREELISTS]
  626. = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  627. // The 16 zeros are necessary to make version 4.1 of the SunPro
  628. // compiler happy. Otherwise it appears to allocate too little
  629. // space for the array.
  630. #else
  631. _STLP_atomic_freelist __node_alloc_impl::_S_free_list[_STLP_NFREELISTS];
  632. _STLP_atomic_freelist __node_alloc_impl::_S_free_mem_blocks;
  633. #endif
  634. #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  635. char *__node_alloc_impl::_S_start_free = 0;
  636. char *__node_alloc_impl::_S_end_free = 0;
  637. #endif
  638. #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  639. _STLP_VOLATILE __add_atomic_t
  640. #else
  641. size_t
  642. #endif
  643. __node_alloc_impl::_S_heap_size = 0;
  644. #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
  645. # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
  646. _STLP_atomic_freelist __node_alloc_impl::_S_chunks;
  647. # else
  648. _Node_alloc_obj* __node_alloc_impl::_S_chunks = 0;
  649. # endif
  650. #endif
  651. void * _STLP_CALL __node_alloc::_M_allocate(size_t& __n)
  652. { return __node_alloc_impl::_M_allocate(__n); }
  653. void _STLP_CALL __node_alloc::_M_deallocate(void *__p, size_t __n)
  654. { __node_alloc_impl::_M_deallocate(__p, __n); }
  655. #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
  656. # define _STLP_DATA_ALIGNMENT 8
  657. _STLP_MOVE_TO_PRIV_NAMESPACE
  658. // *******************************************************
  659. // __perthread_alloc implementation
  660. union _Pthread_alloc_obj {
  661. union _Pthread_alloc_obj * __free_list_link;
  662. char __client_data[_STLP_DATA_ALIGNMENT]; /* The client sees this. */
  663. };
  664. // Pthread allocators don't appear to the client to have meaningful
  665. // instances. We do in fact need to associate some state with each
  666. // thread. That state is represented by _Pthread_alloc_per_thread_state.
  667. struct _Pthread_alloc_per_thread_state {
  668. typedef _Pthread_alloc_obj __obj;
  669. enum { _S_NFREELISTS = _MAX_BYTES / _STLP_DATA_ALIGNMENT };
  670. // Free list link for list of available per thread structures.
  671. // When one of these becomes available for reuse due to thread
  672. // termination, any objects in its free list remain associated
  673. // with it. The whole structure may then be used by a newly
  674. // created thread.
  675. _Pthread_alloc_per_thread_state() : __next(0)
  676. { memset((void *)__CONST_CAST(_Pthread_alloc_obj**, __free_list), 0, (size_t)_S_NFREELISTS * sizeof(__obj *)); }
  677. // Returns an object of size __n, and possibly adds to size n free list.
  678. void *_M_refill(size_t __n);
  679. _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS];
  680. _Pthread_alloc_per_thread_state *__next;
  681. // this data member is only to be used by per_thread_allocator, which returns memory to the originating thread.
  682. _STLP_mutex _M_lock;
  683. };
  684. // Pthread-specific allocator.
  685. class _Pthread_alloc_impl {
  686. public: // but only for internal use:
  687. typedef _Pthread_alloc_per_thread_state __state_type;
  688. typedef char value_type;
  689. // Allocates a chunk for nobjs of size size. nobjs may be reduced
  690. // if it is inconvenient to allocate the requested number.
  691. static char *_S_chunk_alloc(size_t __size, size_t &__nobjs, __state_type*);
  692. enum {_S_ALIGN = _STLP_DATA_ALIGNMENT};
  693. static size_t _S_round_up(size_t __bytes)
  694. { return (((__bytes) + (int)_S_ALIGN - 1) & ~((int)_S_ALIGN - 1)); }
  695. static size_t _S_freelist_index(size_t __bytes)
  696. { return (((__bytes) + (int)_S_ALIGN - 1) / (int)_S_ALIGN - 1); }
  697. private:
  698. // Chunk allocation state. And other shared state.
  699. // Protected by _S_chunk_allocator_lock.
  700. static _STLP_STATIC_MUTEX _S_chunk_allocator_lock;
  701. static char *_S_start_free;
  702. static char *_S_end_free;
  703. static size_t _S_heap_size;
  704. static __state_type *_S_free_per_thread_states;
  705. static pthread_key_t _S_key;
  706. static bool _S_key_initialized;
  707. // Pthread key under which per thread state is stored.
  708. // Allocator instances that are currently unclaimed by any thread.
  709. static void _S_destructor(void *instance);
  710. // Function to be called on thread exit to reclaim per thread
  711. // state.
  712. static __state_type *_S_new_per_thread_state();
  713. public:
  714. // Return a recycled or new per thread state.
  715. static __state_type *_S_get_per_thread_state();
  716. private:
  717. // ensure that the current thread has an associated
  718. // per thread state.
  719. class _M_lock;
  720. friend class _M_lock;
  721. class _M_lock {
  722. public:
  723. _M_lock () { _S_chunk_allocator_lock._M_acquire_lock(); }
  724. ~_M_lock () { _S_chunk_allocator_lock._M_release_lock(); }
  725. };
  726. public:
  727. /* n must be > 0 */
  728. static void * allocate(size_t& __n);
  729. /* p may not be 0 */
  730. static void deallocate(void *__p, size_t __n);
  731. // boris : versions for per_thread_allocator
  732. /* n must be > 0 */
  733. static void * allocate(size_t& __n, __state_type* __a);
  734. /* p may not be 0 */
  735. static void deallocate(void *__p, size_t __n, __state_type* __a);
  736. static void * reallocate(void *__p, size_t __old_sz, size_t& __new_sz);
  737. };
  738. /* Returns an object of size n, and optionally adds to size n free list.*/
  739. /* We assume that n is properly aligned. */
  740. /* We hold the allocation lock. */
  741. void *_Pthread_alloc_per_thread_state::_M_refill(size_t __n) {
  742. typedef _Pthread_alloc_obj __obj;
  743. size_t __nobjs = 128;
  744. char * __chunk = _Pthread_alloc_impl::_S_chunk_alloc(__n, __nobjs, this);
  745. __obj * volatile * __my_free_list;
  746. __obj * __result;
  747. __obj * __current_obj, * __next_obj;
  748. size_t __i;
  749. if (1 == __nobjs) {
  750. return __chunk;
  751. }
  752. __my_free_list = __free_list + _Pthread_alloc_impl::_S_freelist_index(__n);
  753. /* Build free list in chunk */
  754. __result = (__obj *)__chunk;
  755. *__my_free_list = __next_obj = (__obj *)(__chunk + __n);
  756. for (__i = 1; ; ++__i) {
  757. __current_obj = __next_obj;
  758. __next_obj = (__obj *)((char *)__next_obj + __n);
  759. if (__nobjs - 1 == __i) {
  760. __current_obj -> __free_list_link = 0;
  761. break;
  762. } else {
  763. __current_obj -> __free_list_link = __next_obj;
  764. }
  765. }
  766. return __result;
  767. }
  768. void _Pthread_alloc_impl::_S_destructor(void *__instance) {
  769. _M_lock __lock_instance; // Need to acquire lock here.
  770. _Pthread_alloc_per_thread_state* __s = (_Pthread_alloc_per_thread_state*)__instance;
  771. __s -> __next = _S_free_per_thread_states;
  772. _S_free_per_thread_states = __s;
  773. }
  774. _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_new_per_thread_state() {
  775. /* lock already held here. */
  776. if (0 != _S_free_per_thread_states) {
  777. _Pthread_alloc_per_thread_state *__result = _S_free_per_thread_states;
  778. _S_free_per_thread_states = _S_free_per_thread_states -> __next;
  779. return __result;
  780. }
  781. else {
  782. return new _Pthread_alloc_per_thread_state;
  783. }
  784. }
  785. _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_get_per_thread_state() {
  786. int __ret_code;
  787. __state_type* __result;
  788. if (_S_key_initialized && (__result = (__state_type*) pthread_getspecific(_S_key)))
  789. return __result;
  790. /*REFERENCED*/
  791. _M_lock __lock_instance; // Need to acquire lock here.
  792. if (!_S_key_initialized) {
  793. if (pthread_key_create(&_S_key, _S_destructor)) {
  794. _STLP_THROW_BAD_ALLOC; // failed
  795. }
  796. _S_key_initialized = true;
  797. }
  798. __result = _S_new_per_thread_state();
  799. __ret_code = pthread_setspecific(_S_key, __result);
  800. if (__ret_code) {
  801. if (__ret_code == ENOMEM) {
  802. _STLP_THROW_BAD_ALLOC;
  803. } else {
  804. // EINVAL
  805. _STLP_ABORT();
  806. }
  807. }
  808. return __result;
  809. }
  810. /* We allocate memory in large chunks in order to avoid fragmenting */
  811. /* the malloc heap too much. */
  812. /* We assume that size is properly aligned. */
  813. char *_Pthread_alloc_impl::_S_chunk_alloc(size_t __p_size, size_t &__nobjs, _Pthread_alloc_per_thread_state *__a) {
  814. typedef _Pthread_alloc_obj __obj;
  815. {
  816. char * __result;
  817. size_t __total_bytes;
  818. size_t __bytes_left;
  819. /*REFERENCED*/
  820. _M_lock __lock_instance; // Acquire lock for this routine
  821. __total_bytes = __p_size * __nobjs;
  822. __bytes_left = _S_end_free - _S_start_free;
  823. if (__bytes_left >= __total_bytes) {
  824. __result = _S_start_free;
  825. _S_start_free += __total_bytes;
  826. return __result;
  827. } else if (__bytes_left >= __p_size) {
  828. __nobjs = __bytes_left/__p_size;
  829. __total_bytes = __p_size * __nobjs;
  830. __result = _S_start_free;
  831. _S_start_free += __total_bytes;
  832. return __result;
  833. } else {
  834. size_t __bytes_to_get = 2 * __total_bytes + _S_round_up(_S_heap_size);
  835. // Try to make use of the left-over piece.
  836. if (__bytes_left > 0) {
  837. __obj * volatile * __my_free_list = __a->__free_list + _S_freelist_index(__bytes_left);
  838. ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list;
  839. *__my_free_list = (__obj *)_S_start_free;
  840. }
  841. # ifdef _SGI_SOURCE
  842. // Try to get memory that's aligned on something like a
  843. // cache line boundary, so as to avoid parceling out
  844. // parts of the same line to different threads and thus
  845. // possibly different processors.
  846. {
  847. const int __cache_line_size = 128; // probable upper bound
  848. __bytes_to_get &= ~(__cache_line_size-1);
  849. _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get);
  850. if (0 == _S_start_free) {
  851. _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
  852. }
  853. }
  854. # else /* !SGI_SOURCE */
  855. _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
  856. # endif
  857. _S_heap_size += __bytes_to_get >> 4;
  858. _S_end_free = _S_start_free + __bytes_to_get;
  859. }
  860. }
  861. // lock is released here
  862. return _S_chunk_alloc(__p_size, __nobjs, __a);
  863. }
  864. /* n must be > 0 */
  865. void *_Pthread_alloc_impl::allocate(size_t& __n) {
  866. typedef _Pthread_alloc_obj __obj;
  867. __obj * volatile * __my_free_list;
  868. __obj * __result;
  869. __state_type* __a;
  870. if (__n > _MAX_BYTES) {
  871. return __malloc_alloc::allocate(__n);
  872. }
  873. __n = _S_round_up(__n);
  874. __a = _S_get_per_thread_state();
  875. __my_free_list = __a->__free_list + _S_freelist_index(__n);
  876. __result = *__my_free_list;
  877. if (__result == 0) {
  878. void *__r = __a->_M_refill(__n);
  879. return __r;
  880. }
  881. *__my_free_list = __result->__free_list_link;
  882. return __result;
  883. };
  884. /* p may not be 0 */
  885. void _Pthread_alloc_impl::deallocate(void *__p, size_t __n) {
  886. typedef _Pthread_alloc_obj __obj;
  887. __obj *__q = (__obj *)__p;
  888. __obj * volatile * __my_free_list;
  889. __state_type* __a;
  890. if (__n > _MAX_BYTES) {
  891. __malloc_alloc::deallocate(__p, __n);
  892. return;
  893. }
  894. __a = _S_get_per_thread_state();
  895. __my_free_list = __a->__free_list + _S_freelist_index(__n);
  896. __q -> __free_list_link = *__my_free_list;
  897. *__my_free_list = __q;
  898. }
  899. // boris : versions for per_thread_allocator
  900. /* n must be > 0 */
  901. void *_Pthread_alloc_impl::allocate(size_t& __n, __state_type* __a) {
  902. typedef _Pthread_alloc_obj __obj;
  903. __obj * volatile * __my_free_list;
  904. __obj * __result;
  905. if (__n > _MAX_BYTES) {
  906. return __malloc_alloc::allocate(__n);
  907. }
  908. __n = _S_round_up(__n);
  909. // boris : here, we have to lock per thread state, as we may be getting memory from
  910. // different thread pool.
  911. _STLP_auto_lock __lock(__a->_M_lock);
  912. __my_free_list = __a->__free_list + _S_freelist_index(__n);
  913. __result = *__my_free_list;
  914. if (__result == 0) {
  915. void *__r = __a->_M_refill(__n);
  916. return __r;
  917. }
  918. *__my_free_list = __result->__free_list_link;
  919. return __result;
  920. };
  921. /* p may not be 0 */
  922. void _Pthread_alloc_impl::deallocate(void *__p, size_t __n, __state_type* __a) {
  923. typedef _Pthread_alloc_obj __obj;
  924. __obj *__q = (__obj *)__p;
  925. __obj * volatile * __my_free_list;
  926. if (__n > _MAX_BYTES) {
  927. __malloc_alloc::deallocate(__p, __n);
  928. return;
  929. }
  930. // boris : here, we have to lock per thread state, as we may be returning memory from
  931. // different thread.
  932. _STLP_auto_lock __lock(__a->_M_lock);
  933. __my_free_list = __a->__free_list + _S_freelist_index(__n);
  934. __q -> __free_list_link = *__my_free_list;
  935. *__my_free_list = __q;
  936. }
  937. void *_Pthread_alloc_impl::reallocate(void *__p, size_t __old_sz, size_t& __new_sz) {
  938. void * __result;
  939. size_t __copy_sz;
  940. if (__old_sz > _MAX_BYTES && __new_sz > _MAX_BYTES) {
  941. return realloc(__p, __new_sz);
  942. }
  943. if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return __p;
  944. __result = allocate(__new_sz);
  945. __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
  946. memcpy(__result, __p, __copy_sz);
  947. deallocate(__p, __old_sz);
  948. return __result;
  949. }
  950. _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_free_per_thread_states = 0;
  951. pthread_key_t _Pthread_alloc_impl::_S_key = 0;
  952. _STLP_STATIC_MUTEX _Pthread_alloc_impl::_S_chunk_allocator_lock _STLP_MUTEX_INITIALIZER;
  953. bool _Pthread_alloc_impl::_S_key_initialized = false;
  954. char *_Pthread_alloc_impl::_S_start_free = 0;
  955. char *_Pthread_alloc_impl::_S_end_free = 0;
  956. size_t _Pthread_alloc_impl::_S_heap_size = 0;
  957. void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n)
  958. { return _Pthread_alloc_impl::allocate(__n); }
  959. void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n)
  960. { _Pthread_alloc_impl::deallocate(__p, __n); }
  961. void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n, __state_type* __a)
  962. { return _Pthread_alloc_impl::allocate(__n, __a); }
  963. void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n, __state_type* __a)
  964. { _Pthread_alloc_impl::deallocate(__p, __n, __a); }
  965. void * _STLP_CALL _Pthread_alloc::reallocate(void *__p, size_t __old_sz, size_t& __new_sz)
  966. { return _Pthread_alloc_impl::reallocate(__p, __old_sz, __new_sz); }
  967. _Pthread_alloc_per_thread_state* _STLP_CALL _Pthread_alloc::_S_get_per_thread_state()
  968. { return _Pthread_alloc_impl::_S_get_per_thread_state(); }
  969. _STLP_MOVE_TO_STD_NAMESPACE
  970. #endif
  971. _STLP_END_NAMESPACE
  972. #undef _S_FREELIST_INDEX