atomic.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. #ifndef _SHIM_ATOMIC_H_
  2. #define _SHIM_ATOMIC_H_
  3. /* Copyright (C) 2014 Stony Brook University
  4. * Copyright (C) 2017 Fortanix Inc, and University of North Carolina
  5. * at Chapel Hill.
  6. *
  7. * This file defines atomic operations (And barriers) for use in
  8. * Graphene.
  9. *
  10. * The atomic operation assembly code is taken from musl libc, which
  11. * is subject to the MIT license.
  12. *
  13. * At this point, we primarily focus on x86_64; there are some vestigial
  14. * 32-bit definitions here, but a more portable version would need to
  15. * move and reimplement portions of this for 32-bit x86 (or other architectures).
  16. */
  17. /*
  18. /----------------------------------------------------------------------
  19. Copyright (C) 2005-2014 Rich Felker, et al.
  20. Permission is hereby granted, free of charge, to any person obtaining
  21. a copy of this software and associated documentation files (the
  22. "Software"), to deal in the Software without restriction, including
  23. without limitation the rights to use, copy, modify, merge, publish,
  24. distribute, sublicense, and/or sell copies of the Software, and to
  25. permit persons to whom the Software is furnished to do so, subject to
  26. the following conditions:
  27. The above copyright notice and this permission notice shall be
  28. included in all copies or substantial portions of the Software.
  29. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30. EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31. MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  32. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  33. CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  34. TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  35. SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  36. ----------------------------------------------------------------------
  37. */
  38. #include <stdint.h>
  39. /* Optimization barrier */
  40. #define COMPILER_BARRIER() __asm__ __volatile__("": : :"memory")
  41. #define CPU_RELAX() __asm__ __volatile__("rep; nop" ::: "memory")
  42. #ifdef __i386__
  43. # define RMB() __asm__ __volatile__("lock; addl $0,0(%%esp)" ::: "memory")
  44. struct atomic_int {
  45. volatile int32_t counter;
  46. };
  47. #endif
  48. /* The return types below effectively assume we are dealing with a 64-bit
  49. * signed value.
  50. */
  51. #ifdef __x86_64__
  52. /*
  53. * Some non-Intel clones support out of order store. WMB() ceases to be a
  54. * nop for these.
  55. */
  56. # define MB() __asm__ __volatile__ ("mfence" ::: "memory")
  57. # define RMB() __asm__ __volatile__ ("lfence" ::: "memory")
  58. # define WMB() __asm__ __volatile__ ("sfence" ::: "memory")
  59. struct atomic_int {
  60. volatile int64_t counter;
  61. };
  62. #endif
  63. #define LOCK_PREFIX "\n\tlock; "
  64. #define ATOMIC_INIT(i) { (i) }
  65. /* Read the value currently stored in the atomic_int */
  66. static inline int64_t atomic_read (const struct atomic_int * v)
  67. {
  68. // Effectively:
  69. // return v->counter;
  70. int64_t i;
  71. /* Use inline assembly to ensure this is one instruction */
  72. __asm__ __volatile__("mov %1, %0"
  73. : "=r"(i) :
  74. "m"(v->counter));
  75. return i;
  76. }
  77. /* Does a blind write to the atomic variable */
  78. static inline void atomic_set (struct atomic_int * v, int64_t i)
  79. {
  80. // Effectively:
  81. // v->counter = i;
  82. /* Use inline assembly to ensure this is one instruction */
  83. __asm__ __volatile__("mov %2, %0"
  84. : "=m"(v->counter) :
  85. "m"(v->counter), "r"(i));
  86. }
  87. /* Helper function that atomically adds a value to an atomic_int,
  88. * and returns the _new_ value. */
  89. static inline int64_t _atomic_add (int64_t i, struct atomic_int * v)
  90. {
  91. int64_t increment = i;
  92. __asm__ __volatile__(
  93. "lock ; xadd %0, %1"
  94. : "=r"(i), "=m"(v->counter) : "0"(i) : "cc");
  95. return i + increment;
  96. }
  97. /* Atomically adds i to v. Does not return a value. */
  98. static inline void atomic_add (int64_t i, struct atomic_int * v)
  99. {
  100. _atomic_add(i, v);
  101. }
  102. /* Atomically substracts i from v. Does not return a value. */
  103. static inline void atomic_sub (int64_t i, struct atomic_int * v)
  104. {
  105. _atomic_add(-i, v);
  106. }
  107. /* Atomically adds 1 to v. Does not return a value. */
  108. static inline void atomic_inc (struct atomic_int * v)
  109. {
  110. __asm__ __volatile__(
  111. "lock ; incl %0"
  112. : "=m"(v->counter) : "m"(v->counter) : "cc");
  113. }
  114. /* Atomically substracts 1 from v. Does not return a value. */
  115. static inline void atomic_dec (struct atomic_int * v)
  116. {
  117. __asm__ __volatile__(
  118. "lock ; decl %0"
  119. : "=m"(v->counter) : "m"(v->counter) : "cc");
  120. }
  121. /* Atomically substracts 1 from v. Returns 1 if this causes the
  122. value to reach 0; returns 0 otherwise. */
  123. static inline int64_t atomic_dec_and_test (struct atomic_int * v)
  124. {
  125. int64_t i = _atomic_add(-1, v);
  126. return i == 0;
  127. }
  128. /* Helper function to atomically compare-and-swap the value pointed to by p.
  129. * t is the old value, s is the new value. Returns
  130. * the value originally in p. */
  131. static inline int64_t cmpxchg(volatile int64_t *p, int64_t t, int64_t s)
  132. {
  133. __asm__ __volatile__ (
  134. "lock ; cmpxchg %3, %1"
  135. : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "cc");
  136. return t;
  137. }
  138. #define atomic_add_return(i, v) _atomic_add(i, v)
  139. #define atomic_inc_return(v) _atomic_add(1, v)
  140. /* Helper function to atomically compare-and-swap the value in v.
  141. * If v == old, it sets v = new.
  142. * Returns the value originally in v. */
  143. static inline int64_t atomic_cmpxchg (struct atomic_int * v, int64_t old, int64_t new)
  144. {
  145. return cmpxchg(&v->counter, old, new);
  146. }
  147. #endif /* _ATOMIC_INT_H_ */