123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370 |
- // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
- /* Copyright (c) 2006, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
- // Implementation of atomic operations for Mac OS X. This file should not
- // be included directly. Clients should instead include
- // "base/atomicops.h".
- #ifndef BASE_ATOMICOPS_INTERNALS_MACOSX_H_
- #define BASE_ATOMICOPS_INTERNALS_MACOSX_H_
- typedef int32_t Atomic32;
- // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
- // on the Mac, even when they are the same size. Similarly, on __ppc64__,
- // AtomicWord and Atomic64 are always different. Thus, we need explicit
- // casting.
- #ifdef __LP64__
- #define AtomicWordCastType base::subtle::Atomic64
- #else
- #define AtomicWordCastType Atomic32
- #endif
- #if defined(__LP64__) || defined(__i386__)
- #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic*
- #endif
- #include <libkern/OSAtomic.h>
- namespace base {
- namespace subtle {
- #if !defined(__LP64__) && defined(__ppc__)
- // The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC,
- // while the underlying assembly instructions are available only some
- // implementations of PowerPC.
- // The following inline functions will fail with the error message at compile
- // time ONLY IF they are called. So it is safe to use this header if user
- // code only calls AtomicWord and Atomic32 operations.
- //
- // NOTE(vchen): Implementation notes to implement the atomic ops below may
- // be found in "PowerPC Virtual Environment Architecture, Book II,
- // Version 2.02", January 28, 2005, Appendix B, page 46. Unfortunately,
- // extra care must be taken to ensure data are properly 8-byte aligned, and
- // that data are returned correctly according to Mac OS X ABI specs.
- inline int64_t OSAtomicCompareAndSwap64(
- int64_t oldValue, int64_t newValue, int64_t *theValue) {
- __asm__ __volatile__(
- "_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t");
- return 0;
- }
- inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) {
- __asm__ __volatile__(
- "_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t");
- return 0;
- }
- inline int64_t OSAtomicCompareAndSwap64Barrier(
- int64_t oldValue, int64_t newValue, int64_t *theValue) {
- int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
- OSMemoryBarrier();
- return prev;
- }
- inline int64_t OSAtomicAdd64Barrier(
- int64_t theAmount, int64_t *theValue) {
- int64_t new_val = OSAtomicAdd64(theAmount, theValue);
- OSMemoryBarrier();
- return new_val;
- }
- #endif
- typedef int64_t Atomic64;
- inline void MemoryBarrier() {
- OSMemoryBarrier();
- }
- // 32-bit Versions.
- inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
- }
- inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap32(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
- return old_value;
- }
- inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
- Atomic32 new_value) {
- Atomic32 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap32Barrier(old_value, new_value,
- const_cast<Atomic32*>(ptr)));
- return old_value;
- }
- inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
- Atomic32 new_value) {
- return Acquire_AtomicExchange(ptr, new_value);
- }
- inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 prev_value;
- do {
- if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
- const_cast<Atomic32*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
- }
- inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
- }
- inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- *ptr = value;
- }
- inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
- *ptr = value;
- MemoryBarrier();
- }
- inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
- MemoryBarrier();
- *ptr = value;
- }
- inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return *ptr;
- }
- inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
- Atomic32 value = *ptr;
- MemoryBarrier();
- return value;
- }
- inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
- MemoryBarrier();
- return *ptr;
- }
- // 64-bit version
- inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
- }
- inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
- Atomic64 new_value) {
- Atomic64 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap64(old_value, new_value,
- const_cast<Atomic64*>(ptr)));
- return old_value;
- }
- inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
- Atomic64 new_value) {
- Atomic64 old_value;
- do {
- old_value = *ptr;
- } while (!OSAtomicCompareAndSwap64Barrier(old_value, new_value,
- const_cast<Atomic64*>(ptr)));
- return old_value;
- }
- inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
- Atomic64 new_value) {
- return Acquire_AtomicExchange(ptr, new_value);
- }
- inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 prev_value;
- do {
- if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
- const_cast<Atomic64*>(ptr))) {
- return old_value;
- }
- prev_value = *ptr;
- } while (prev_value == old_value);
- return prev_value;
- }
- inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- // The lib kern interface does not distinguish between
- // Acquire and Release memory barriers; they are equivalent.
- return Acquire_CompareAndSwap(ptr, old_value, new_value);
- }
- #ifdef __LP64__
- // 64-bit implementation on 64-bit platform
- inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- *ptr = value;
- }
- inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
- *ptr = value;
- MemoryBarrier();
- }
- inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
- MemoryBarrier();
- *ptr = value;
- }
- inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return *ptr;
- }
- inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
- Atomic64 value = *ptr;
- MemoryBarrier();
- return value;
- }
- inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
- MemoryBarrier();
- return *ptr;
- }
- #else
- // 64-bit implementation on 32-bit platform
- #if defined(__ppc__)
- inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- __asm__ __volatile__(
- "_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t");
- }
- inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- __asm__ __volatile__(
- "_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t");
- return 0;
- }
- #elif defined(__i386__)
- inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
- "movq %%mm0, %0\n\t" // moves (ptr could be read-only)
- "emms\n\t" // Reset FP registers
- : "=m" (*ptr)
- : "m" (value)
- : // mark the FP stack and mmx registers as clobbered
- "st", "st(1)", "st(2)", "st(3)", "st(4)",
- "st(5)", "st(6)", "st(7)", "mm0", "mm1",
- "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
- }
- inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- Atomic64 value;
- __asm__ __volatile__("movq %1, %%mm0\n\t" // Use mmx reg for 64-bit atomic
- "movq %%mm0, %0\n\t" // moves (ptr could be read-only)
- "emms\n\t" // Reset FP registers
- : "=m" (value)
- : "m" (*ptr)
- : // mark the FP stack and mmx registers as clobbered
- "st", "st(1)", "st(2)", "st(3)", "st(4)",
- "st(5)", "st(6)", "st(7)", "mm0", "mm1",
- "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
- return value;
- }
- #endif
- inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
- NoBarrier_Store(ptr, value);
- MemoryBarrier();
- }
- inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
- MemoryBarrier();
- NoBarrier_Store(ptr, value);
- }
- inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
- Atomic64 value = NoBarrier_Load(ptr);
- MemoryBarrier();
- return value;
- }
- inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
- MemoryBarrier();
- return NoBarrier_Load(ptr);
- }
- #endif // __LP64__
- } // namespace base::subtle
- } // namespace base
- #endif // BASE_ATOMICOPS_INTERNALS_MACOSX_H_
|