locks_i386_inlines.h [plain text]
#ifndef _I386_LOCKS_I386_INLINES_H_
#define _I386_LOCKS_I386_INLINES_H_
#include <kern/locks.h>
#include <kern/lock_stat.h>
#include <kern/turnstile.h>
#define ordered_load(target) os_atomic_load(target, compiler_acq_rel)
#define ordered_store_release(target, value) ({ \
os_atomic_store(target, value, release); \
os_compiler_barrier(); \
})
#define ordered_load_mtx_state(lock) ordered_load(&(lock)->lck_mtx_state)
#define ordered_store_mtx_state_release(lock, value) ordered_store_release(&(lock)->lck_mtx_state, (value))
#define ordered_store_mtx_owner(lock, value) os_atomic_store(&(lock)->lck_mtx_owner, (value), compiler_acq_rel)
#if DEVELOPMENT | DEBUG
void lck_mtx_owner_check_panic(lck_mtx_t *mutex) __abortlike;
#endif
__attribute__((always_inline))
static inline void
lck_mtx_ilk_unlock_inline(
lck_mtx_t *mutex,
uint32_t state)
{
state &= ~LCK_MTX_ILOCKED_MSK;
ordered_store_mtx_state_release(mutex, state);
enable_preemption();
}
__attribute__((always_inline))
static inline void
lck_mtx_lock_finish_inline(
lck_mtx_t *mutex,
uint32_t state,
boolean_t indirect)
{
assert(state & LCK_MTX_ILOCKED_MSK);
lck_mtx_ilk_unlock_inline(mutex, state);
#if CONFIG_DTRACE
if (indirect) {
LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, mutex, 0);
} else {
LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, mutex, 0);
}
#endif
}
__attribute__((always_inline))
static inline void
lck_mtx_lock_finish_inline_with_cleanup(
lck_mtx_t *mutex,
uint32_t state,
boolean_t indirect)
{
assert(state & LCK_MTX_ILOCKED_MSK);
lck_mtx_ilk_unlock_inline(mutex, state);
turnstile_cleanup();
#if CONFIG_DTRACE
if (indirect) {
LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, mutex, 0);
} else {
LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, mutex, 0);
}
#endif
}
__attribute__((always_inline))
static inline void
lck_mtx_try_lock_finish_inline(
lck_mtx_t *mutex,
uint32_t state)
{
lck_mtx_ilk_unlock_inline(mutex, state);
#if CONFIG_DTRACE
LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, mutex, 0);
#endif
}
__attribute__((always_inline))
static inline void
lck_mtx_convert_spin_finish_inline(
lck_mtx_t *mutex,
uint32_t state)
{
state &= ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK);
state |= LCK_MTX_MLOCKED_MSK;
ordered_store_mtx_state_release(mutex, state);
enable_preemption();
}
__attribute__((always_inline))
static inline void
lck_mtx_unlock_finish_inline(
lck_mtx_t *mutex,
boolean_t indirect)
{
enable_preemption();
#if CONFIG_DTRACE
if (indirect) {
LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, mutex, 0);
} else {
LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, mutex, 0);
}
#endif // CONFIG_DTRACE
}
#endif