cmpxchg_32.h 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. #ifndef _ASM_X86_CMPXCHG_32_H
  2. #define _ASM_X86_CMPXCHG_32_H
  3. #define LOCK_PREFIX "\n\tlock; "
  4. /*
  5. * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
  6. * you need to test for the feature in boot_cpu_data.
  7. */
  8. extern void __xchg_wrong_size(void);
  9. /*
  10. * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  11. * Note 2: xchg has side effect, so that attribute volatile is necessary,
  12. * but generally the primitive is invalid, *ptr is output argument. --ANK
  13. */
  14. struct __xchg_dummy {
  15. unsigned long a[100];
  16. };
  17. #define __xg(x) ((struct __xchg_dummy *)(x))
  18. #define __xchg(x, ptr, size) \
  19. ({ \
  20. __typeof(*(ptr)) __x = (x); \
  21. switch (size) { \
  22. case 1: \
  23. asm volatile("lock; xchgb %b0,%1" \
  24. : "=q" (__x), "+m" (*__xg(ptr)) \
  25. : "0" (__x) \
  26. : "memory"); \
  27. break; \
  28. case 2: \
  29. asm volatile("lock; xchgw %w0,%1" \
  30. : "=r" (__x), "+m" (*__xg(ptr)) \
  31. : "0" (__x) \
  32. : "memory"); \
  33. break; \
  34. case 4: \
  35. asm volatile("lock; xchgl %0,%1" \
  36. : "=r" (__x), "+m" (*__xg(ptr)) \
  37. : "0" (__x) \
  38. : "memory"); \
  39. break; \
  40. default: \
  41. __xchg_wrong_size(); \
  42. } \
  43. __x; \
  44. })
  45. #define xchg(ptr, v) \
  46. __xchg((v), (ptr), sizeof(*ptr))
  47. /*
  48. * CMPXCHG8B only writes to the target if we had the previous
  49. * value in registers, otherwise it acts as a read and gives us the
  50. * "new previous" value. That is why there is a loop. Preloading
  51. * EDX:EAX is a performance optimization: in the common case it means
  52. * we need only one locked operation.
  53. *
  54. * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
  55. * least an FPU save and/or %cr0.ts manipulation.
  56. *
  57. * cmpxchg8b must be used with the lock prefix here to allow the
  58. * instruction to be executed atomically. We need to have the reader
  59. * side to see the coherent 64bit value.
  60. */
  61. static inline void set_64bit(volatile u64 *ptr, u64 value)
  62. {
  63. u32 low = value;
  64. u32 high = value >> 32;
  65. u64 prev = *ptr;
  66. asm volatile("\n1:\t"
  67. LOCK_PREFIX "cmpxchg8b %0\n\t"
  68. "jnz 1b"
  69. : "=m" (*ptr), "+A" (prev)
  70. : "b" (low), "c" (high)
  71. : "memory");
  72. }
  73. extern void __cmpxchg_wrong_size(void);
  74. /*
  75. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  76. * store NEW in MEM. Return the initial value in MEM. Success is
  77. * indicated by comparing RETURN with OLD.
  78. */
  79. #define __raw_cmpxchg(ptr, old, new, size, lock) \
  80. ({ \
  81. __typeof__(*(ptr)) __ret; \
  82. __typeof__(*(ptr)) __old = (old); \
  83. __typeof__(*(ptr)) __new = (new); \
  84. switch (size) { \
  85. case 1: \
  86. asm volatile(lock "cmpxchgb %b2,%1" \
  87. : "=a" (__ret), "+m" (*__xg(ptr)) \
  88. : "q" (__new), "0" (__old) \
  89. : "memory"); \
  90. break; \
  91. case 2: \
  92. asm volatile(lock "cmpxchgw %w2,%1" \
  93. : "=a" (__ret), "+m" (*__xg(ptr)) \
  94. : "r" (__new), "0" (__old) \
  95. : "memory"); \
  96. break; \
  97. case 4: \
  98. asm volatile(lock "cmpxchgl %2,%1" \
  99. : "=a" (__ret), "+m" (*__xg(ptr)) \
  100. : "r" (__new), "0" (__old) \
  101. : "memory"); \
  102. break; \
  103. default: \
  104. __cmpxchg_wrong_size(); \
  105. } \
  106. __ret; \
  107. })
  108. #define __cmpxchg(ptr, old, new, size) \
  109. __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
  110. #define __sync_cmpxchg(ptr, old, new, size) \
  111. __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
  112. #define __cmpxchg_local(ptr, old, new, size) \
  113. __raw_cmpxchg((ptr), (old), (new), (size), "")
  114. #ifdef CONFIG_X86_CMPXCHG
  115. #define __HAVE_ARCH_CMPXCHG 1
  116. #define cmpxchg(ptr, old, new) \
  117. __cmpxchg((ptr), (old), (new), sizeof(*ptr))
  118. #define sync_cmpxchg(ptr, old, new) \
  119. __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
  120. #define cmpxchg_local(ptr, old, new) \
  121. __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
  122. #endif
  123. #ifdef CONFIG_X86_CMPXCHG64
  124. #define cmpxchg64(ptr, o, n) \
  125. ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
  126. (unsigned long long)(n)))
  127. #define cmpxchg64_local(ptr, o, n) \
  128. ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
  129. (unsigned long long)(n)))
  130. #endif
  131. static inline unsigned long long __cmpxchg64(volatile void *ptr,
  132. unsigned long long old,
  133. unsigned long long new)
  134. {
  135. unsigned long long prev;
  136. asm volatile(LOCK_PREFIX "cmpxchg8b %1"
  137. : "=A" (prev),
  138. "+m" (*__xg(ptr))
  139. : "b" ((unsigned long)new),
  140. "c" ((unsigned long)(new >> 32)),
  141. "0" (old)
  142. : "memory");
  143. return prev;
  144. }
  145. static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
  146. unsigned long long old,
  147. unsigned long long new)
  148. {
  149. unsigned long long prev;
  150. asm volatile("cmpxchg8b %1"
  151. : "=A" (prev),
  152. "+m" (*__xg(ptr))
  153. : "b" ((unsigned long)new),
  154. "c" ((unsigned long)(new >> 32)),
  155. "0" (old)
  156. : "memory");
  157. return prev;
  158. }
  159. #ifndef CONFIG_X86_CMPXCHG
  160. /*
  161. * Building a kernel capable running on 80386. It may be necessary to
  162. * simulate the cmpxchg on the 80386 CPU. For that purpose we define
  163. * a function for each of the sizes we support.
  164. */
  165. extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
  166. extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
  167. extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
  168. static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
  169. unsigned long new, int size)
  170. {
  171. switch (size) {
  172. case 1:
  173. return cmpxchg_386_u8(ptr, old, new);
  174. case 2:
  175. return cmpxchg_386_u16(ptr, old, new);
  176. case 4:
  177. return cmpxchg_386_u32(ptr, old, new);
  178. }
  179. return old;
  180. }
  181. #define cmpxchg(ptr, o, n) \
  182. ({ \
  183. __typeof__(*(ptr)) __ret; \
  184. if (likely(boot_cpu_data.x86 > 3)) \
  185. __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
  186. (unsigned long)(o), (unsigned long)(n), \
  187. sizeof(*(ptr))); \
  188. else \
  189. __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
  190. (unsigned long)(o), (unsigned long)(n), \
  191. sizeof(*(ptr))); \
  192. __ret; \
  193. })
  194. #define cmpxchg_local(ptr, o, n) \
  195. ({ \
  196. __typeof__(*(ptr)) __ret; \
  197. if (likely(boot_cpu_data.x86 > 3)) \
  198. __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
  199. (unsigned long)(o), (unsigned long)(n), \
  200. sizeof(*(ptr))); \
  201. else \
  202. __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
  203. (unsigned long)(o), (unsigned long)(n), \
  204. sizeof(*(ptr))); \
  205. __ret; \
  206. })
  207. #endif
  208. #ifndef CONFIG_X86_CMPXCHG64
  209. /*
  210. * Building a kernel capable running on 80386 and 80486. It may be necessary
  211. * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
  212. */
  213. extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
  214. #define cmpxchg64(ptr, o, n) \
  215. ({ \
  216. __typeof__(*(ptr)) __ret; \
  217. __typeof__(*(ptr)) __old = (o); \
  218. __typeof__(*(ptr)) __new = (n); \
  219. alternative_io(LOCK_PREFIX_HERE \
  220. "call cmpxchg8b_emu", \
  221. "lock; cmpxchg8b (%%esi)" , \
  222. X86_FEATURE_CX8, \
  223. "=A" (__ret), \
  224. "S" ((ptr)), "0" (__old), \
  225. "b" ((unsigned int)__new), \
  226. "c" ((unsigned int)(__new>>32)) \
  227. : "memory"); \
  228. __ret; })
  229. #define cmpxchg64_local(ptr, o, n) \
  230. ({ \
  231. __typeof__(*(ptr)) __ret; \
  232. if (likely(boot_cpu_data.x86 > 4)) \
  233. __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
  234. (unsigned long long)(o), \
  235. (unsigned long long)(n)); \
  236. else \
  237. __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
  238. (unsigned long long)(o), \
  239. (unsigned long long)(n)); \
  240. __ret; \
  241. })
  242. #endif
  243. #endif /* _ASM_X86_CMPXCHG_32_H */