#ifndef _MACHINE_ATOMIC_H
#error "Do not include <arm/atomic.h> directly, use <machine/atomic.h>"
#endif
#ifndef _ARM_ATOMIC_H_
#define _ARM_ATOMIC_H_
#include <mach/boolean.h>
#define DMB_OSHLD 0x1
#define DMB_OSHST 0x2
#define DMB_OSH 0x3
#define DMB_NSHLD 0x5
#define DMB_NSHST 0x6
#define DMB_NSH 0x7
#define DMB_ISHLD 0x9
#define DMB_ISHST 0xa
#define DMB_ISH 0xb
#define DMB_LD 0xd
#define DMB_ST 0xe
#define DMB_SY 0xf
#define DSB_OSHLD 0x1
#define DSB_OSHST 0x2
#define DSB_OSH 0x3
#define DSB_NSHLD 0x5
#define DSB_NSHST 0x6
#define DSB_NSH 0x7
#define DSB_ISHLD 0x9
#define DSB_ISHST 0xa
#define DSB_ISH 0xb
#define DSB_LD 0xd
#define DSB_ST 0xe
#define DSB_SY 0xf
#define ISB_SY 0xf
#undef OS_ATOMIC_HAS_LLSC
#define OS_ATOMIC_HAS_LLSC 1
#if defined(__ARM_ARCH_8_2__) && defined(__arm64__)
#undef OS_ATOMIC_USE_LLSC
#define OS_ATOMIC_USE_LLSC 0
#endif
#undef memory_order_dependency_smp
#define memory_order_dependency_smp memory_order_relaxed
#define os_atomic_clear_exclusive() __builtin_arm_clrex()
#if __arm__
#define os_atomic_load_exclusive(p, m) ({ \
_os_atomic_basetypeof(p) _r; \
_r = __builtin_arm_ldrex(p); \
_os_memory_fence_after_atomic(m); \
_os_compiler_barrier_after_atomic(m); \
_r; \
})
#define os_atomic_store_exclusive(p, v, m) ({ \
_os_compiler_barrier_before_atomic(m); \
_os_memory_fence_before_atomic(m); \
!__builtin_arm_strex(p, v); \
})
#undef os_atomic_make_dependency
#define os_atomic_make_dependency(v) ({ \
os_atomic_dependency_t _dep; \
__asm__ __volatile__("and %[_dep], %[_v], #0" \
: [_dep] "=r" (_dep.__opaque_zero) : [_v] "r" (v)); \
os_compiler_barrier(acquire); \
_dep; \
})
#undef os_atomic_rmw_loop
#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
int _result = 0; uint32_t _err = 0; \
_os_atomic_basetypeof(p) *_p; \
_p = (_os_atomic_basetypeof(p) *)(p); \
_os_compiler_barrier_before_atomic(m); \
for (;;) { \
ov = __builtin_arm_ldrex(_p); \
__VA_ARGS__; \
if (!_err) { \
\
_os_memory_fence_before_atomic(m); \
} \
_err = __builtin_arm_strex(nv, _p); \
if (__builtin_expect(!_err, 1)) { \
_os_memory_fence_after_atomic(m); \
_result = 1; \
break; \
} \
} \
_os_compiler_barrier_after_atomic(m); \
_result; \
})
#undef os_atomic_rmw_loop_give_up
#define os_atomic_rmw_loop_give_up(...) \
({ os_atomic_clear_exclusive(); __VA_ARGS__; break; })
#else // __arm64__
#define os_atomic_load_exclusive(p, m) ({ \
_os_atomic_basetypeof(p) _r; \
if (memory_order_has_acquire(memory_order_##m##_smp)) { \
_r = __builtin_arm_ldaex(p); \
} else { \
_r = __builtin_arm_ldrex(p); \
} \
_os_compiler_barrier_after_atomic(m); \
_r; \
})
#define os_atomic_store_exclusive(p, v, m) ({ \
_os_compiler_barrier_before_atomic(m); \
(memory_order_has_release(memory_order_##m##_smp) ? \
!__builtin_arm_stlex(p, v) : !__builtin_arm_strex(p, v)); \
})
#undef os_atomic_make_dependency
#define os_atomic_make_dependency(v) ({ \
os_atomic_dependency_t _dep; \
__asm__ __volatile__("and %[_dep], %[_v], xzr" \
: [_dep] "=r" (_dep.__opaque_zero) : [_v] "r" (v)); \
os_compiler_barrier(acquire); \
_dep; \
})
#if OS_ATOMIC_USE_LLSC
#undef os_atomic_rmw_loop
#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
int _result = 0; \
_os_atomic_basetypeof(p) *_p; \
_p = (_os_atomic_basetypeof(p) *)(p); \
_os_compiler_barrier_before_atomic(m); \
do { \
if (memory_order_has_acquire(memory_order_##m##_smp)) { \
ov = __builtin_arm_ldaex(_p); \
} else { \
ov = __builtin_arm_ldrex(_p); \
} \
__VA_ARGS__; \
if (memory_order_has_release(memory_order_##m##_smp)) { \
_result = !__builtin_arm_stlex(nv, _p); \
} else { \
_result = !__builtin_arm_strex(nv, _p); \
} \
} while (__builtin_expect(!_result, 0)); \
_os_compiler_barrier_after_atomic(m); \
_result; \
})
#undef os_atomic_rmw_loop_give_up
#define os_atomic_rmw_loop_give_up(...) \
({ os_atomic_clear_exclusive(); __VA_ARGS__; break; })
#endif // OS_ATOMIC_USE_LLSC
#endif // __arm64__
#endif // _ARM_ATOMIC_H_