#ifndef _ARM_ATOMIC_H_
#define _ARM_ATOMIC_H_
#include <arm/smp.h>
#define DMB_NSH 0x7
#define DMB_ISHLD 0x9
#define DMB_ISHST 0xa
#define DMB_ISH 0xb
#define DMB_SY 0xf
#define DSB_NSH 0x7
#define DSB_ISHLD 0x9
#define DSB_ISHST 0xa
#define DSB_ISH 0xb
#define DSB_SY 0xf
#define ISB_SY 0xf
#if __SMP__
#define memory_order_consume_smp memory_order_consume
#define memory_order_acquire_smp memory_order_acquire
#define memory_order_release_smp memory_order_release
#define memory_order_acq_rel_smp memory_order_acq_rel
#define memory_order_seq_cst_smp memory_order_seq_cst
#else
#define memory_order_consume_smp memory_order_relaxed
#define memory_order_acquire_smp memory_order_relaxed
#define memory_order_release_smp memory_order_relaxed
#define memory_order_acq_rel_smp memory_order_relaxed
#define memory_order_seq_cst_smp memory_order_relaxed
#endif
static inline boolean_t
memory_order_has_acquire(enum memory_order ord)
{
switch (ord) {
case memory_order_consume:
case memory_order_acquire:
case memory_order_acq_rel:
case memory_order_seq_cst:
return TRUE;
default:
return FALSE;
}
}
static inline boolean_t
memory_order_has_release(enum memory_order ord)
{
switch (ord) {
case memory_order_release:
case memory_order_acq_rel:
case memory_order_seq_cst:
return TRUE;
default:
return FALSE;
}
}
#ifdef ATOMIC_PRIVATE
#define clear_exclusive() __builtin_arm_clrex()
__unused static uint32_t
load_exclusive32(uint32_t *target, enum memory_order ord)
{
uint32_t value;
#if __arm__
if (memory_order_has_release(ord)) {
atomic_thread_fence(memory_order_release);
}
value = __builtin_arm_ldrex(target);
#else
if (memory_order_has_acquire(ord))
value = __builtin_arm_ldaex(target); else
value = __builtin_arm_ldrex(target); #endif // __arm__
return value;
}
__unused static boolean_t
store_exclusive32(uint32_t *target, uint32_t value, enum memory_order ord)
{
boolean_t err;
#if __arm__
err = __builtin_arm_strex(value, target);
if (memory_order_has_acquire(ord)) {
atomic_thread_fence(memory_order_acquire);
}
#else
if (memory_order_has_release(ord))
err = __builtin_arm_stlex(value, target); else
err = __builtin_arm_strex(value, target); #endif // __arm__
return !err;
}
__unused static uintptr_t
load_exclusive(uintptr_t *target, enum memory_order ord)
{
#if !__LP64__
return load_exclusive32((uint32_t *)target, ord);
#else
uintptr_t value;
if (memory_order_has_acquire(ord))
value = __builtin_arm_ldaex(target); else
value = __builtin_arm_ldrex(target); return value;
#endif // __arm__
}
__unused static boolean_t
store_exclusive(uintptr_t *target, uintptr_t value, enum memory_order ord)
{
#if !__LP64__
return store_exclusive32((uint32_t *)target, value, ord);
#else
boolean_t err;
if (memory_order_has_release(ord))
err = __builtin_arm_stlex(value, target); else
err = __builtin_arm_strex(value, target); return !err;
#endif
}
__unused static boolean_t
atomic_compare_exchange(uintptr_t *target, uintptr_t oldval, uintptr_t newval,
enum memory_order orig_ord, boolean_t wait)
{
enum memory_order ord = orig_ord;
uintptr_t value;
#if __arm__
ord = memory_order_relaxed;
if (memory_order_has_release(orig_ord)) {
atomic_thread_fence(memory_order_release);
}
#endif
do {
value = load_exclusive(target, ord);
if (value != oldval) {
if (wait)
wait_for_event(); else
clear_exclusive(); return FALSE;
}
} while (!store_exclusive(target, newval, ord));
#if __arm__
if (memory_order_has_acquire(orig_ord)) {
atomic_thread_fence(memory_order_acquire);
}
#endif
return TRUE;
}
#endif // ATOMIC_PRIVATE
#if __arm__
#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
boolean_t _result = FALSE; uint32_t _err = 0; \
typeof(atomic_load(p)) *_p = (typeof(atomic_load(p)) *)(p); \
for (;;) { \
ov = __builtin_arm_ldrex(_p); \
__VA_ARGS__; \
if (!_err && memory_order_has_release(memory_order_##m)) { \
\
atomic_thread_fence(memory_order_release); \
} \
_err = __builtin_arm_strex(nv, _p); \
if (__builtin_expect(!_err, 1)) { \
if (memory_order_has_acquire(memory_order_##m)) { \
atomic_thread_fence(memory_order_acquire); \
} \
_result = TRUE; \
break; \
} \
} \
_result; \
})
#else
#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \
boolean_t _result = FALSE; \
typeof(atomic_load(p)) *_p = (typeof(atomic_load(p)) *)(p); \
do { \
if (memory_order_has_acquire(memory_order_##m)) { \
ov = __builtin_arm_ldaex(_p); \
} else { \
ov = __builtin_arm_ldrex(_p); \
} \
__VA_ARGS__; \
if (memory_order_has_release(memory_order_##m)) { \
_result = !__builtin_arm_stlex(nv, _p); \
} else { \
_result = !__builtin_arm_strex(nv, _p); \
} \
} while (__builtin_expect(!_result, 0)); \
_result; \
})
#endif
#define os_atomic_rmw_loop_give_up(expr) \
({ __builtin_arm_clrex(); expr; __builtin_trap(); })
#endif // _ARM_ATOMIC_H_