51 #ifndef __ATOMIC_OPS_UNIX_H__
52 #define __ATOMIC_OPS_UNIX_H__
56 #if defined(__arm__) || defined(__riscv)
61 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_1
62 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_2
63 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_4
64 # define JE_FORCE_SYNC_COMPARE_AND_SWAP_8
70 #undef ATOMIC_FORCE_USE_FALLBACK
83 int pad[32 -
sizeof(int)];
88 while (__sync_lock_test_and_set(&
lock->lock, 1)) {
96 __sync_lock_release(&
lock->lock);
108 #define __atomic_impl_load_generic(v) (__sync_synchronize(), *(v))
109 #define __atomic_impl_store_generic(p, v) \
112 __sync_synchronize(); \
127 #define ATOMIC_LOCKING_OP_AND_FETCH_DEFINE(_type, _op_name, _op) \
128 ATOMIC_INLINE _type##_t atomic_##_op_name##_and_fetch_##_type(_type##_t *p, _type##_t x) \
130 atomic_spin_lock(&_atomic_global_lock); \
131 const _type##_t original_value = *(p); \
132 const _type##_t new_value = original_value _op(x); \
134 atomic_spin_unlock(&_atomic_global_lock); \
138 #define ATOMIC_LOCKING_FETCH_AND_OP_DEFINE(_type, _op_name, _op) \
139 ATOMIC_INLINE _type##_t atomic_fetch_and_##_op_name##_##_type(_type##_t *p, _type##_t x) \
141 atomic_spin_lock(&_atomic_global_lock); \
142 const _type##_t original_value = *(p); \
143 *(p) = original_value _op(x); \
144 atomic_spin_unlock(&_atomic_global_lock); \
145 return original_value; \
148 #define ATOMIC_LOCKING_ADD_AND_FETCH_DEFINE(_type) \
149 ATOMIC_LOCKING_OP_AND_FETCH_DEFINE(_type, add, +)
151 #define ATOMIC_LOCKING_SUB_AND_FETCH_DEFINE(_type) \
152 ATOMIC_LOCKING_OP_AND_FETCH_DEFINE(_type, sub, -)
154 #define ATOMIC_LOCKING_FETCH_AND_ADD_DEFINE(_type) \
155 ATOMIC_LOCKING_FETCH_AND_OP_DEFINE(_type, add, +)
157 #define ATOMIC_LOCKING_FETCH_AND_SUB_DEFINE(_type) \
158 ATOMIC_LOCKING_FETCH_AND_OP_DEFINE(_type, sub, -)
160 #define ATOMIC_LOCKING_FETCH_AND_OR_DEFINE(_type) ATOMIC_LOCKING_FETCH_AND_OP_DEFINE(_type, or, |)
162 #define ATOMIC_LOCKING_FETCH_AND_AND_DEFINE(_type) \
163 ATOMIC_LOCKING_FETCH_AND_OP_DEFINE(_type, and, &)
165 #define ATOMIC_LOCKING_CAS_DEFINE(_type) \
166 ATOMIC_INLINE _type##_t atomic_cas_##_type(_type##_t *v, _type##_t old, _type##_t _new) \
168 atomic_spin_lock(&_atomic_global_lock); \
169 const _type##_t original_value = *v; \
173 atomic_spin_unlock(&_atomic_global_lock); \
174 return original_value; \
177 #define ATOMIC_LOCKING_LOAD_DEFINE(_type) \
178 ATOMIC_INLINE _type##_t atomic_load_##_type(const _type##_t *v) \
180 atomic_spin_lock(&_atomic_global_lock); \
181 const _type##_t value = *v; \
182 atomic_spin_unlock(&_atomic_global_lock); \
186 #define ATOMIC_LOCKING_STORE_DEFINE(_type) \
187 ATOMIC_INLINE void atomic_store_##_type(_type##_t *p, const _type##_t v) \
189 atomic_spin_lock(&_atomic_global_lock); \
191 atomic_spin_unlock(&_atomic_global_lock); \
200 #if !defined(ATOMIC_FORCE_USE_FALLBACK) && \
201 (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
205 return __sync_add_and_fetch(p,
x);
210 return __sync_sub_and_fetch(p,
x);
215 return __sync_fetch_and_add(p,
x);
220 return __sync_fetch_and_sub(p,
x);
225 return __sync_val_compare_and_swap(
v, old, _new);
230 return __atomic_load_n(
v, __ATOMIC_SEQ_CST);
235 __atomic_store(p, &
v, __ATOMIC_SEQ_CST);
241 return __sync_add_and_fetch(p,
x);
246 return __sync_sub_and_fetch(p,
x);
251 return __sync_fetch_and_add(p,
x);
256 return __sync_fetch_and_sub(p,
x);
261 return __sync_val_compare_and_swap(
v, old, _new);
266 return __atomic_load_n(
v, __ATOMIC_SEQ_CST);
271 __atomic_store(p, &
v, __ATOMIC_SEQ_CST);
274 #elif !defined(ATOMIC_FORCE_USE_FALLBACK) && (defined(__amd64__) || defined(__x86_64__))
278 asm volatile(
"lock; xaddq %0, %1;"
288 asm volatile(
"lock; xaddq %0, %1;"
308 asm volatile(
"lock; cmpxchgq %2,%1" :
"=a"(
ret),
"+m"(*
v) :
"r"(_new),
"0"(old) :
"memory");
325 asm volatile(
"lock; xaddq %0, %1;"
335 asm volatile(
"lock; xaddq %0, %1;"
355 asm volatile(
"lock; cmpxchgq %2,%1" :
"=a"(
ret),
"+m"(*
v) :
"r"(_new),
"0"(old) :
"memory");
404 #if !defined(ATOMIC_FORCE_USE_FALLBACK) && \
405 (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
409 return __sync_add_and_fetch(p,
x);
414 return __sync_sub_and_fetch(p,
x);
419 return __sync_val_compare_and_swap(
v, old, _new);
424 return __atomic_load_n(
v, __ATOMIC_SEQ_CST);
429 __atomic_store(p, &
v, __ATOMIC_SEQ_CST);
435 return __sync_add_and_fetch(p,
x);
440 return __sync_sub_and_fetch(p,
x);
445 return __sync_val_compare_and_swap(
v, old, _new);
450 return __atomic_load_n(
v, __ATOMIC_SEQ_CST);
455 __atomic_store(p, &
v, __ATOMIC_SEQ_CST);
458 #elif !defined(ATOMIC_FORCE_USE_FALLBACK) && \
459 (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
464 asm volatile(
"lock; xaddl %0, %1;"
465 :
"+r"(
ret),
"=m"(*p)
474 asm volatile(
"lock; xaddl %0, %1;"
475 :
"+r"(
ret),
"=m"(*p)
484 asm volatile(
"lock; cmpxchgl %2,%1" :
"=a"(
ret),
"+m"(*
v) :
"r"(_new),
"0"(old) :
"memory");
490 return __atomic_load_n(
v, __ATOMIC_SEQ_CST);
495 __atomic_store(p, &
v, __ATOMIC_SEQ_CST);
502 asm volatile(
"lock; xaddl %0, %1;"
503 :
"+r"(
ret),
"=m"(*p)
512 asm volatile(
"lock; xaddl %0, %1;"
513 :
"+r"(
ret),
"=m"(*p)
522 asm volatile(
"lock; cmpxchgl %2,%1" :
"=a"(
ret),
"+m"(*
v) :
"r"(_new),
"0"(old) :
"memory");
528 return __atomic_load_n(
v, __ATOMIC_SEQ_CST);
533 __atomic_store(p, &
v, __ATOMIC_SEQ_CST);
560 #if !defined(ATOMIC_FORCE_USE_FALLBACK) && \
561 (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
565 return __sync_fetch_and_add(p,
x);
570 return __sync_fetch_and_or(p,
x);
575 return __sync_fetch_and_and(p,
x);
581 return __sync_fetch_and_add(p,
x);
586 return __sync_fetch_and_or(p,
x);
591 return __sync_fetch_and_and(p,
x);
614 #if !defined(ATOMIC_FORCE_USE_FALLBACK) && \
615 (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_2))
620 return __sync_fetch_and_and(p,
b);
624 return __sync_fetch_and_or(p,
b);
640 #if !defined(ATOMIC_FORCE_USE_FALLBACK) && \
641 (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1) || defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_1))
646 return __sync_fetch_and_and(p,
b);
650 return __sync_fetch_and_or(p,
b);
656 return __sync_fetch_and_and(p,
b);
660 return __sync_fetch_and_or(p,
b);
677 #undef __atomic_impl_load_generic
678 #undef __atomic_impl_store_generic
680 #undef ATOMIC_LOCKING_OP_AND_FETCH_DEFINE
681 #undef ATOMIC_LOCKING_FETCH_AND_OP_DEFINE
682 #undef ATOMIC_LOCKING_ADD_AND_FETCH_DEFINE
683 #undef ATOMIC_LOCKING_SUB_AND_FETCH_DEFINE
684 #undef ATOMIC_LOCKING_FETCH_AND_ADD_DEFINE
685 #undef ATOMIC_LOCKING_FETCH_AND_SUB_DEFINE
686 #undef ATOMIC_LOCKING_FETCH_AND_OR_DEFINE
687 #undef ATOMIC_LOCKING_FETCH_AND_AND_DEFINE
688 #undef ATOMIC_LOCKING_CAS_DEFINE
689 #undef ATOMIC_LOCKING_LOAD_DEFINE
690 #undef ATOMIC_LOCKING_STORE_DEFINE
unsigned long long uint64
ATOMIC_INLINE uint32_t atomic_fetch_and_or_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE int32_t atomic_add_and_fetch_int32(int32_t *p, int32_t x)
ATOMIC_INLINE int16_t atomic_fetch_and_or_int16(int16_t *p, int16_t b)
ATOMIC_INLINE void atomic_store_uint64(uint64_t *p, uint64_t v)
ATOMIC_INLINE uint64_t atomic_fetch_and_sub_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE int64_t atomic_sub_and_fetch_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint8_t atomic_fetch_and_and_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE uint64_t atomic_load_uint64(const uint64_t *v)
ATOMIC_INLINE int64_t atomic_cas_int64(int64_t *v, int64_t old, int64_t _new)
ATOMIC_INLINE uint32_t atomic_fetch_and_add_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE uint8_t atomic_fetch_and_or_uint8(uint8_t *p, uint8_t b)
ATOMIC_INLINE int32_t atomic_load_int32(const int32_t *v)
ATOMIC_INLINE int64_t atomic_load_int64(const int64_t *v)
ATOMIC_INLINE int32_t atomic_fetch_and_or_int32(int32_t *p, int32_t x)
ATOMIC_INLINE uint32_t atomic_fetch_and_and_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE void atomic_store_int64(int64_t *p, int64_t v)
ATOMIC_INLINE int64_t atomic_fetch_and_add_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint32_t atomic_add_and_fetch_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE void atomic_store_int32(int32_t *p, int32_t v)
ATOMIC_INLINE int32_t atomic_fetch_and_add_int32(int32_t *p, int32_t x)
ATOMIC_INLINE uint64_t atomic_cas_uint64(uint64_t *v, uint64_t old, uint64_t _new)
ATOMIC_INLINE uint64_t atomic_fetch_and_add_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE uint32_t atomic_sub_and_fetch_uint32(uint32_t *p, uint32_t x)
ATOMIC_INLINE int16_t atomic_fetch_and_and_int16(int16_t *p, int16_t b)
ATOMIC_INLINE int64_t atomic_add_and_fetch_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint64_t atomic_add_and_fetch_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE uint32_t atomic_load_uint32(const uint32_t *v)
ATOMIC_INLINE uint64_t atomic_sub_and_fetch_uint64(uint64_t *p, uint64_t x)
ATOMIC_INLINE void atomic_store_uint32(uint32_t *p, uint32_t v)
ATOMIC_INLINE int32_t atomic_cas_int32(int32_t *v, int32_t old, int32_t _new)
ATOMIC_INLINE int32_t atomic_fetch_and_and_int32(int32_t *p, int32_t x)
ATOMIC_INLINE int8_t atomic_fetch_and_or_int8(int8_t *p, int8_t b)
ATOMIC_INLINE int32_t atomic_sub_and_fetch_int32(int32_t *p, int32_t x)
ATOMIC_INLINE int8_t atomic_fetch_and_and_int8(int8_t *p, int8_t b)
ATOMIC_INLINE int64_t atomic_fetch_and_sub_int64(int64_t *p, int64_t x)
ATOMIC_INLINE uint32_t atomic_cas_uint32(uint32_t *v, uint32_t old, uint32_t _new)
struct AtomicSpinLock __attribute__((aligned(32))) AtomicSpinLock
ATOMIC_INLINE void atomic_spin_lock(volatile AtomicSpinLock *lock)
ATOMIC_INLINE void atomic_spin_unlock(volatile AtomicSpinLock *lock)
#define ATOMIC_LOCKING_FETCH_AND_AND_DEFINE(_type)
#define ATOMIC_LOCKING_FETCH_AND_SUB_DEFINE(_type)
#define __atomic_impl_load_generic(v)
#define ATOMIC_LOCKING_FETCH_AND_OR_DEFINE(_type)
#define ATOMIC_LOCKING_SUB_AND_FETCH_DEFINE(_type)
static _ATOMIC_MAYBE_UNUSED AtomicSpinLock _atomic_global_lock
#define ATOMIC_LOCKING_LOAD_DEFINE(_type)
#define __atomic_impl_store_generic(p, v)
#define ATOMIC_LOCKING_ADD_AND_FETCH_DEFINE(_type)
#define ATOMIC_LOCKING_FETCH_AND_ADD_DEFINE(_type)
#define ATOMIC_LOCKING_CAS_DEFINE(_type)
#define ATOMIC_LOCKING_STORE_DEFINE(_type)
#define _ATOMIC_MAYBE_UNUSED
ATTR_WARN_UNUSED_RESULT const BMVert * v
static const pxr::TfToken b("b", pxr::TfToken::Immortal)
unsigned __int64 uint64_t
int pad[32 - sizeof(int)]