atomic.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. #ifndef _SHIM_ATOMIC_H_
  4. #define _SHIM_ATOMIC_H_
  5. /* Copyright (C) 2014 Stony Brook University
  6. * Copyright (C) 2017 Fortanix Inc, and University of North Carolina
  7. * at Chapel Hill.
  8. *
  9. * This file defines atomic operations (And barriers) for use in
  10. * Graphene.
  11. *
  12. * The atomic operation assembly code is taken from musl libc, which
  13. * is subject to the MIT license.
  14. *
  15. * At this point, we primarily focus on x86_64; there are some vestigial
  16. * 32-bit definitions here, but a more portable version would need to
  17. * move and reimplement portions of this for 32-bit x86 (or other architectures).
  18. */
  19. /*
  20. /----------------------------------------------------------------------
  21. Copyright (C) 2005-2014 Rich Felker, et al.
  22. Permission is hereby granted, free of charge, to any person obtaining
  23. a copy of this software and associated documentation files (the
  24. "Software"), to deal in the Software without restriction, including
  25. without limitation the rights to use, copy, modify, merge, publish,
  26. distribute, sublicense, and/or sell copies of the Software, and to
  27. permit persons to whom the Software is furnished to do so, subject to
  28. the following conditions:
  29. The above copyright notice and this permission notice shall be
  30. included in all copies or substantial portions of the Software.
  31. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  32. EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  33. MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  34. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  35. CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  36. TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  37. SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  38. ----------------------------------------------------------------------
  39. */
  40. /* Optimization barrier */
  41. #define barrier() __asm__ __volatile__("": : :"memory")
  42. # define cpu_relax() __asm__ __volatile__("rep; nop" ::: "memory");
  43. #ifdef __i386__
  44. # define rmb() __asm__ __volatile__("lock; addl $0,0(%%esp)" ::: "memory")
  45. struct atomic_int {
  46. volatile int32_t counter;
  47. }
  48. #ifdef __GNUC__
  49. __attribute__((aligned(sizeof(uint32_t))))
  50. #endif
  51. ;
  52. #endif
  53. /* The return types below effectively assume we are dealing with a 64-bit
  54. * signed value.
  55. */
  56. #ifdef __x86_64__
  57. /*
  58. * Some non-Intel clones support out of order store. wmb() ceases to be a
  59. * nop for these.
  60. */
  61. # define mb() __asm__ __volatile__ ("mfence" ::: "memory")
  62. # define rmb() __asm__ __volatile__ ("lfence" ::: "memory")
  63. # define wmb() __asm__ __volatile__ ("sfence" ::: "memory")
  64. struct atomic_int {
  65. volatile int64_t counter;
  66. }
  67. #ifdef __GNUC__
  68. __attribute__((aligned(sizeof(uint64_t))))
  69. #endif
  70. ;
  71. #endif
  72. #define LOCK_PREFIX "\n\tlock; "
  73. #define ATOMIC_INIT(i) { (i) }
  74. /* Read the value currently stored in the atomic_int */
  75. static inline int64_t atomic_read (const struct atomic_int * v)
  76. {
  77. // Effectively:
  78. // return v->counter;
  79. int64_t i;
  80. /* Use inline assembly to ensure this is one instruction */
  81. __asm__ __volatile__("mov %1, %0"
  82. : "=r"(i) :
  83. "m"(v->counter) : "memory");
  84. return i;
  85. }
  86. /* Does a blind write to the atomic variable */
  87. static inline void atomic_set (struct atomic_int * v, int64_t i)
  88. {
  89. // Effectively:
  90. // v->counter = i;
  91. /* Use inline assembly to ensure this is one instruction */
  92. __asm__ __volatile__("mov %2, %0"
  93. : "=m"(v->counter) :
  94. "m"(v->counter), "r"(i) : "memory");
  95. }
  96. /* Helper function that atomically adds a value to an atomic_int,
  97. * and returns the _new_ value. */
  98. static inline int64_t _atomic_add (int64_t i, struct atomic_int * v)
  99. {
  100. int64_t increment = i;
  101. __asm__ __volatile__(
  102. "lock ; xadd %0, %1"
  103. : "=r"(i), "=m"(v->counter) : "0"(i) : "memory", "cc");
  104. return i + increment;
  105. }
  106. /* Atomically adds i to v. Does not return a value. */
  107. static inline void atomic_add (int64_t i, struct atomic_int * v)
  108. {
  109. _atomic_add(i, v);
  110. }
  111. /* Atomically substracts i from v. Does not return a value. */
  112. static inline void atomic_sub (int64_t i, struct atomic_int * v)
  113. {
  114. _atomic_add(-i, v);
  115. }
  116. /* Atomically adds 1 to v. Does not return a value. */
  117. static inline void atomic_inc (struct atomic_int * v)
  118. {
  119. __asm__ __volatile__(
  120. "lock ; incl %0"
  121. : "=m"(v->counter) : "m"(v->counter) : "memory", "cc");
  122. }
  123. /* Atomically substracts 1 from v. Does not return a value. */
  124. static inline void atomic_dec (struct atomic_int * v)
  125. {
  126. __asm__ __volatile__(
  127. "lock ; decl %0"
  128. : "=m"(v->counter) : "m"(v->counter) : "memory", "cc");
  129. }
  130. /* Atomically substracts 1 from v. Returns 1 if this causes the
  131. value to reach 0; returns 0 otherwise. */
  132. static inline int64_t atomic_dec_and_test (struct atomic_int * v)
  133. {
  134. int64_t i = _atomic_add(-1, v);
  135. return i == 0;
  136. }
  137. /* Helper function to atomically compare-and-swap the value pointed to by p.
  138. * t is the old value, s is the new value. Returns
  139. * the value originally in p. */
  140. static inline int64_t cmpxchg(volatile int64_t *p, int64_t t, int64_t s)
  141. {
  142. __asm__ __volatile__ (
  143. "lock ; cmpxchg %3, %1"
  144. : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory", "cc");
  145. return t;
  146. }
  147. /* Helper function to atomically compare-and-swap the value in v.
  148. * If v == old, it sets v = new.
  149. * Returns the value originally in v. */
  150. static inline int64_t atomic_cmpxchg (struct atomic_int * v, int64_t old, int64_t new)
  151. {
  152. return cmpxchg(&v->counter, old, new);
  153. }
  154. #endif /* _ATOMIC_INT_H_ */