cmpxchg_64.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * cmpxchg_64.h
  15. */
  16. #ifndef _ASM_X86_CMPXCHG_64_H
  17. #define _ASM_X86_CMPXCHG_64_H
  18. //#include <asm/alternative.h> /* Provides LOCK_PREFIX */
  19. /*
  20. Including the definition of LOCK_PREFIX directly here
  21. */
  22. #define LOCK_PREFIX "\n\tlock; "
  23. #define __xg(x) ((volatile long *)(x))
  24. /*static inline void set_64bit(volatile u64 *ptr, u64 val)
  25. {
  26. *ptr = val;
  27. }*/
  28. extern void __xchg_wrong_size(void);
  29. extern void __cmpxchg_wrong_size(void);
  30. /*
  31. * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  32. * Note 2: xchg has side effect, so that attribute volatile is necessary,
  33. * but generally the primitive is invalid, *ptr is output argument. --ANK
  34. */
  35. #define __xchg(x, ptr, size) \
  36. ({ \
  37. __typeof(*(ptr)) __x = (x); \
  38. switch (size) { \
  39. case 1: \
  40. asm volatile("lock; xchgb %b0,%1" \
  41. : "=q" (__x), "+m" (*__xg(ptr)) \
  42. : "0" (__x) \
  43. : "memory"); \
  44. break; \
  45. case 2: \
  46. asm volatile("lock; xchgw %w0,%1" \
  47. : "=r" (__x), "+m" (*__xg(ptr)) \
  48. : "0" (__x) \
  49. : "memory"); \
  50. break; \
  51. case 4: \
  52. asm volatile("lock; xchgl %k0,%1" \
  53. : "=r" (__x), "+m" (*__xg(ptr)) \
  54. : "0" (__x) \
  55. : "memory"); \
  56. break; \
  57. case 8: \
  58. asm volatile("lock; xchgq %0,%1" \
  59. : "=r" (__x), "+m" (*__xg(ptr)) \
  60. : "0" (__x) \
  61. : "memory"); \
  62. break; \
  63. default: \
  64. __xchg_wrong_size(); \
  65. } \
  66. __x; \
  67. })
  68. #define xchg(ptr, v) \
  69. __xchg((v), (ptr), sizeof(*ptr))
  70. #define __HAVE_ARCH_CMPXCHG 1
  71. /*
  72. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  73. * store NEW in MEM. Return the initial value in MEM. Success is
  74. * indicated by comparing RETURN with OLD.
  75. */
  76. #define __raw_cmpxchg(ptr, old, new, size, lock) \
  77. ({ \
  78. __typeof__(*(ptr)) __ret; \
  79. __typeof__(*(ptr)) __old = (old); \
  80. __typeof__(*(ptr)) __new = (new); \
  81. switch (size) { \
  82. case 1: \
  83. asm volatile(lock "cmpxchgb %b2,%1" \
  84. : "=a" (__ret), "+m" (*__xg(ptr)) \
  85. : "q" (__new), "0" (__old) \
  86. : "memory"); \
  87. break; \
  88. case 2: \
  89. asm volatile(lock "cmpxchgw %w2,%1" \
  90. : "=a" (__ret), "+m" (*__xg(ptr)) \
  91. : "r" (__new), "0" (__old) \
  92. : "memory"); \
  93. break; \
  94. case 4: \
  95. asm volatile(lock "cmpxchgl %k2,%1" \
  96. : "=a" (__ret), "+m" (*__xg(ptr)) \
  97. : "r" (__new), "0" (__old) \
  98. : "memory"); \
  99. break; \
  100. case 8: \
  101. asm volatile(lock "cmpxchgq %2,%1" \
  102. : "=a" (__ret), "+m" (*__xg(ptr)) \
  103. : "r" (__new), "0" (__old) \
  104. : "memory"); \
  105. break; \
  106. default: \
  107. __cmpxchg_wrong_size(); \
  108. } \
  109. __ret; \
  110. })
  111. #define __cmpxchg(ptr, old, new, size) \
  112. __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
  113. #define __sync_cmpxchg(ptr, old, new, size) \
  114. __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
  115. #define __cmpxchg_local(ptr, old, new, size) \
  116. __raw_cmpxchg((ptr), (old), (new), (size), "")
  117. #define cmpxchg(ptr, old, new) \
  118. __cmpxchg((ptr), (old), (new), sizeof(*ptr))
  119. #define sync_cmpxchg(ptr, old, new) \
  120. __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
  121. #define cmpxchg_local(ptr, old, new) \
  122. __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
  123. #define cmpxchg64(ptr, o, n) \
  124. ({ \
  125. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  126. cmpxchg((ptr), (o), (n)); \
  127. })
  128. #define cmpxchg64_local(ptr, o, n) \
  129. ({ \
  130. BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
  131. cmpxchg_local((ptr), (o), (n)); \
  132. })
  133. #endif /* _ASM_X86_CMPXCHG_64_H */