spinlock.cc 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
  2. /* Copyright (c) 2006, Google Inc.
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are
  7. * met:
  8. *
  9. * * Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * * Redistributions in binary form must reproduce the above
  12. * copyright notice, this list of conditions and the following disclaimer
  13. * in the documentation and/or other materials provided with the
  14. * distribution.
  15. * * Neither the name of Google Inc. nor the names of its
  16. * contributors may be used to endorse or promote products derived from
  17. * this software without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. *
  31. * ---
  32. * Author: Sanjay Ghemawat
  33. */
  34. #include <config.h>
  35. #include "base/spinlock.h"
  36. #ifndef TCMALLOC_SGX
  37. #include "base/spinlock_internal.h"
  38. #include "base/sysinfo.h" /* for GetSystemCPUsCount() */
  39. #endif
  40. // NOTE on the Lock-state values:
  41. //
  42. // kSpinLockFree represents the unlocked state
  43. // kSpinLockHeld represents the locked state with no waiters
  44. // kSpinLockSleeper represents the locked state with waiters
  45. static int adaptive_spin_count = 0;
  46. const base::LinkerInitialized SpinLock::LINKER_INITIALIZED =
  47. base::LINKER_INITIALIZED;
  48. namespace {
  49. struct SpinLock_InitHelper {
  50. SpinLock_InitHelper() {
  51. // On multi-cpu machines, spin for longer before yielding
  52. // the processor or sleeping. Reduces idle time significantly.
  53. if (GetSystemCPUsCount() > 1) {
  54. adaptive_spin_count = 1000;
  55. }
  56. }
  57. };
  58. // Hook into global constructor execution:
  59. // We do not do adaptive spinning before that,
  60. // but nothing lock-intensive should be going on at that time.
  61. static SpinLock_InitHelper init_helper;
  62. inline void SpinlockPause(void) {
  63. #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
  64. __asm__ __volatile__("rep; nop" : : );
  65. #endif
  66. }
  67. } // unnamed namespace
  68. // Monitor the lock to see if its value changes within some time
  69. // period (adaptive_spin_count loop iterations). The last value read
  70. // from the lock is returned from the method.
  71. Atomic32 SpinLock::SpinLoop() {
  72. int c = adaptive_spin_count;
  73. while (base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree && --c > 0) {
  74. SpinlockPause();
  75. }
  76. return base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
  77. kSpinLockSleeper);
  78. }
  79. void SpinLock::SlowLock() {
  80. Atomic32 lock_value = SpinLoop();
  81. #ifndef TCMALLOC_SGX
  82. int lock_wait_call_count = 0;
  83. #endif
  84. while (lock_value != kSpinLockFree) {
  85. // If the lock is currently held, but not marked as having a sleeper, mark
  86. // it as having a sleeper.
  87. if (lock_value == kSpinLockHeld) {
  88. // Here, just "mark" that the thread is going to sleep. Don't store the
  89. // lock wait time in the lock as that will cause the current lock
  90. // owner to think it experienced contention.
  91. lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
  92. kSpinLockHeld,
  93. kSpinLockSleeper);
  94. if (lock_value == kSpinLockHeld) {
  95. // Successfully transitioned to kSpinLockSleeper. Pass
  96. // kSpinLockSleeper to the SpinLockDelay routine to properly indicate
  97. // the last lock_value observed.
  98. lock_value = kSpinLockSleeper;
  99. } else if (lock_value == kSpinLockFree) {
  100. // Lock is free again, so try and acquire it before sleeping. The
  101. // new lock state will be the number of cycles this thread waited if
  102. // this thread obtains the lock.
  103. lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
  104. kSpinLockFree,
  105. kSpinLockSleeper);
  106. continue; // skip the delay at the end of the loop
  107. }
  108. }
  109. #ifndef TCMALLOC_SGX /*disable beacuse inside SGX can't do sleep*/
  110. // Wait for an OS specific delay.
  111. base::internal::SpinLockDelay(&lockword_, lock_value,
  112. ++lock_wait_call_count);
  113. #endif
  114. // Spin again after returning from the wait routine to give this thread
  115. // some chance of obtaining the lock.
  116. lock_value = SpinLoop();
  117. }
  118. }
  119. void SpinLock::SlowUnlock() {
  120. #ifndef TCMALLOC_SGX
  121. // wake waiter if necessary
  122. base::internal::SpinLockWake(&lockword_, false);
  123. #endif
  124. }