atomicops-internals-macosx.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
  2. /* Copyright (c) 2006, Google Inc.
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are
  7. * met:
  8. *
  9. * * Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * * Redistributions in binary form must reproduce the above
  12. * copyright notice, this list of conditions and the following disclaimer
  13. * in the documentation and/or other materials provided with the
  14. * distribution.
  15. * * Neither the name of Google Inc. nor the names of its
  16. * contributors may be used to endorse or promote products derived from
  17. * this software without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. */
  31. // Implementation of atomic operations for Mac OS X. This file should not
  32. // be included directly. Clients should instead include
  33. // "base/atomicops.h".
  34. #ifndef BASE_ATOMICOPS_INTERNALS_MACOSX_H_
  35. #define BASE_ATOMICOPS_INTERNALS_MACOSX_H_
  36. typedef int32_t Atomic32;
  37. // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
  38. // on the Mac, even when they are the same size. Similarly, on __ppc64__,
  39. // AtomicWord and Atomic64 are always different. Thus, we need explicit
  40. // casting.
  41. #ifdef __LP64__
  42. #define AtomicWordCastType base::subtle::Atomic64
  43. #else
  44. #define AtomicWordCastType Atomic32
  45. #endif
  46. #if defined(__LP64__) || defined(__i386__)
  47. #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
  48. #endif
  49. #include <libkern/OSAtomic.h>
  50. namespace base {
  51. namespace subtle {
  52. #if !defined(__LP64__) && defined(__ppc__)
  53. // The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC,
  54. // while the underlying assembly instructions are available only some
  55. // implementations of PowerPC.
  56. // The following inline functions will fail with the error message at compile
  57. // time ONLY IF they are called. So it is safe to use this header if user
  58. // code only calls AtomicWord and Atomic32 operations.
  59. //
  60. // NOTE(vchen): Implementation notes to implement the atomic ops below may
  61. // be found in "PowerPC Virtual Environment Architecture, Book II,
  62. // Version 2.02", January 28, 2005, Appendix B, page 46. Unfortunately,
  63. // extra care must be taken to ensure data are properly 8-byte aligned, and
  64. // that data are returned correctly according to Mac OS X ABI specs.
  65. inline int64_t OSAtomicCompareAndSwap64(
  66. int64_t oldValue, int64_t newValue, int64_t *theValue) {
  67. __asm__ __volatile__(
  68. "_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t");
  69. return 0;
  70. }
  71. inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) {
  72. __asm__ __volatile__(
  73. "_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t");
  74. return 0;
  75. }
  76. inline int64_t OSAtomicCompareAndSwap64Barrier(
  77. int64_t oldValue, int64_t newValue, int64_t *theValue) {
  78. int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
  79. OSMemoryBarrier();
  80. return prev;
  81. }
  82. inline int64_t OSAtomicAdd64Barrier(
  83. int64_t theAmount, int64_t *theValue) {
  84. int64_t new_val = OSAtomicAdd64(theAmount, theValue);
  85. OSMemoryBarrier();
  86. return new_val;
  87. }
  88. #endif
  89. typedef int64_t Atomic64;
  90. inline void MemoryBarrier() {
  91. OSMemoryBarrier();
  92. }
  93. // 32-bit Versions.
  94. inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
  95. Atomic32 old_value,
  96. Atomic32 new_value) {
  97. Atomic32 prev_value;
  98. do {
  99. if (OSAtomicCompareAndSwap32(old_value, new_value,
  100. const_cast<Atomic32*>(ptr))) {
  101. return old_value;
  102. }
  103. prev_value = *ptr;
  104. } while (prev_value == old_value);
  105. return prev_value;
  106. }
  107. inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
  108. Atomic32 new_value) {
  109. Atomic32 old_value;
  110. do {
  111. old_value = *ptr;
  112. } while (!OSAtomicCompareAndSwap32(old_value, new_value,
  113. const_cast<Atomic32*>(ptr)));
  114. return old_value;
  115. }
  116. inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
  117. Atomic32 new_value) {
  118. Atomic32 old_value;
  119. do {
  120. old_value = *ptr;
  121. } while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value,
  122. const_cast<Atomic32*>(ptr)));
  123. return old_value;
  124. }
  125. inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
  126. Atomic32 new_value) {
  127. return Acquire_AtomicExchange(ptr, new_value);
  128. }
  129. inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
  130. Atomic32 old_value,
  131. Atomic32 new_value) {
  132. Atomic32 prev_value;
  133. do {
  134. if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
  135. const_cast<Atomic32*>(ptr))) {
  136. return old_value;
  137. }
  138. prev_value = *ptr;
  139. } while (prev_value == old_value);
  140. return prev_value;
  141. }
  142. inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
  143. Atomic32 old_value,
  144. Atomic32 new_value) {
  145. return Acquire_CompareAndSwap(ptr, old_value, new_value);
  146. }
  147. inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
  148. *ptr = value;
  149. }
  150. inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
  151. *ptr = value;
  152. MemoryBarrier();
  153. }
  154. inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
  155. MemoryBarrier();
  156. *ptr = value;
  157. }
  158. inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
  159. return *ptr;
  160. }
  161. inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
  162. Atomic32 value = *ptr;
  163. MemoryBarrier();
  164. return value;
  165. }
  166. inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
  167. MemoryBarrier();
  168. return *ptr;
  169. }
  170. // 64-bit version
  171. inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
  172. Atomic64 old_value,
  173. Atomic64 new_value) {
  174. Atomic64 prev_value;
  175. do {
  176. if (OSAtomicCompareAndSwap64(old_value, new_value,
  177. const_cast<Atomic64*>(ptr))) {
  178. return old_value;
  179. }
  180. prev_value = *ptr;
  181. } while (prev_value == old_value);
  182. return prev_value;
  183. }
  184. inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
  185. Atomic64 new_value) {
  186. Atomic64 old_value;
  187. do {
  188. old_value = *ptr;
  189. } while (!OSAtomicCompareAndSwap64(old_value, new_value,
  190. const_cast<Atomic64*>(ptr)));
  191. return old_value;
  192. }
  193. inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
  194. Atomic64 new_value) {
  195. Atomic64 old_value;
  196. do {
  197. old_value = *ptr;
  198. } while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value,
  199. const_cast<Atomic64*>(ptr)));
  200. return old_value;
  201. }
  202. inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
  203. Atomic64 new_value) {
  204. return Acquire_AtomicExchange(ptr, new_value);
  205. }
  206. inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
  207. Atomic64 old_value,
  208. Atomic64 new_value) {
  209. Atomic64 prev_value;
  210. do {
  211. if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
  212. const_cast<Atomic64*>(ptr))) {
  213. return old_value;
  214. }
  215. prev_value = *ptr;
  216. } while (prev_value == old_value);
  217. return prev_value;
  218. }
  219. inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
  220. Atomic64 old_value,
  221. Atomic64 new_value) {
  222. // The lib kern interface does not distinguish between
  223. // Acquire and Release memory barriers; they are equivalent.
  224. return Acquire_CompareAndSwap(ptr, old_value, new_value);
  225. }
  226. #ifdef __LP64__
  227. // 64-bit implementation on 64-bit platform
  228. inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
  229. *ptr = value;
  230. }
  231. inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
  232. *ptr = value;
  233. MemoryBarrier();
  234. }
  235. inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
  236. MemoryBarrier();
  237. *ptr = value;
  238. }
  239. inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
  240. return *ptr;
  241. }
  242. inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
  243. Atomic64 value = *ptr;
  244. MemoryBarrier();
  245. return value;
  246. }
  247. inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
  248. MemoryBarrier();
  249. return *ptr;
  250. }
  251. #else
  252. // 64-bit implementation on 32-bit platform
  253. #if defined(__ppc__)
  254. inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
  255. __asm__ __volatile__(
  256. "_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t");
  257. }
  258. inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
  259. __asm__ __volatile__(
  260. "_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t");
  261. return 0;
  262. }
  263. #elif defined(__i386__)
  264. inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
  265. __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
  266. "movq %%mm0, %0\n\t" // moves (ptr could be read-only)
  267. "emms\n\t" // Reset FP registers
  268. : "=m" (*ptr)
  269. : "m" (value)
  270. : // mark the FP stack and mmx registers as clobbered
  271. "st", "st(1)", "st(2)", "st(3)", "st(4)",
  272. "st(5)", "st(6)", "st(7)", "mm0", "mm1",
  273. "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
  274. }
  275. inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
  276. Atomic64 value;
  277. __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
  278. "movq %%mm0, %0\n\t" // moves (ptr could be read-only)
  279. "emms\n\t" // Reset FP registers
  280. : "=m" (value)
  281. : "m" (*ptr)
  282. : // mark the FP stack and mmx registers as clobbered
  283. "st", "st(1)", "st(2)", "st(3)", "st(4)",
  284. "st(5)", "st(6)", "st(7)", "mm0", "mm1",
  285. "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
  286. return value;
  287. }
  288. #endif
  289. inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
  290. NoBarrier_Store(ptr, value);
  291. MemoryBarrier();
  292. }
  293. inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
  294. MemoryBarrier();
  295. NoBarrier_Store(ptr, value);
  296. }
  297. inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
  298. Atomic64 value = NoBarrier_Load(ptr);
  299. MemoryBarrier();
  300. return value;
  301. }
  302. inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
  303. MemoryBarrier();
  304. return NoBarrier_Load(ptr);
  305. }
  306. #endif // __LP64__
  307. } // namespace base::subtle
  308. } // namespace base
  309. #endif // BASE_ATOMICOPS_INTERNALS_MACOSX_H_