#define LOCK_PRIVATE 1
#include <mach_ldebug.h>
#include <debug.h>
#include <mach/kern_return.h>
#include <mach/mach_host_server.h>
#include <mach_debug/lockgroup_info.h>
#include <kern/lock_stat.h>
#include <kern/locks.h>
#include <kern/misc_protos.h>
#include <kern/zalloc.h>
#include <kern/thread.h>
#include <kern/processor.h>
#include <kern/sched_prim.h>
#include <kern/debug.h>
#include <libkern/section_keywords.h>
#include <machine/atomic.h>
#include <machine/machine_cpu.h>
#include <string.h>
#include <sys/kdebug.h>
#define LCK_MTX_SLEEP_CODE 0
#define LCK_MTX_SLEEP_DEADLINE_CODE 1
#define LCK_MTX_LCK_WAIT_CODE 2
#define LCK_MTX_UNLCK_WAKEUP_CODE 3
#if MACH_LDEBUG
#define ALIGN_TEST(p, t) do{if((uintptr_t)p&(sizeof(t)-1)) __builtin_trap();}while(0)
#else
#define ALIGN_TEST(p, t) do{}while(0)
#endif
#define NOINLINE __attribute__((noinline))
#define ordered_load_hw(lock) os_atomic_load(&(lock)->lock_data, compiler_acq_rel)
#define ordered_store_hw(lock, value) os_atomic_store(&(lock)->lock_data, (value), compiler_acq_rel)
queue_head_t lck_grp_queue;
unsigned int lck_grp_cnt;
decl_lck_mtx_data(, lck_grp_lock);
static lck_mtx_ext_t lck_grp_lock_ext;
SECURITY_READ_ONLY_LATE(boolean_t) spinlock_timeout_panic = TRUE;
TUNABLE(uint32_t, LcksOpts, "lcks", 0);
ZONE_VIEW_DEFINE(ZV_LCK_GRP_ATTR, "lck_grp_attr",
KHEAP_ID_DEFAULT, sizeof(lck_grp_attr_t));
ZONE_VIEW_DEFINE(ZV_LCK_GRP, "lck_grp",
KHEAP_ID_DEFAULT, sizeof(lck_grp_t));
ZONE_VIEW_DEFINE(ZV_LCK_ATTR, "lck_attr",
KHEAP_ID_DEFAULT, sizeof(lck_attr_t));
lck_grp_attr_t LockDefaultGroupAttr;
lck_grp_t LockCompatGroup;
lck_attr_t LockDefaultLckAttr;
#if CONFIG_DTRACE
#if defined (__x86_64__)
uint64_t dtrace_spin_threshold = 500; #elif defined(__arm__) || defined(__arm64__)
uint64_t dtrace_spin_threshold = LOCK_PANIC_TIMEOUT / 1000000; #endif
#endif
uintptr_t
unslide_for_kdebug(void* object)
{
if (__improbable(kdebug_enable)) {
return VM_KERNEL_UNSLIDE_OR_PERM(object);
} else {
return 0;
}
}
__startup_func
static void
lck_mod_init(void)
{
queue_init(&lck_grp_queue);
bzero(&LockCompatGroup, sizeof(lck_grp_t));
(void) strncpy(LockCompatGroup.lck_grp_name, "Compatibility APIs", LCK_GRP_MAX_NAME);
LockCompatGroup.lck_grp_attr = LCK_ATTR_NONE;
if (LcksOpts & enaLkStat) {
LockCompatGroup.lck_grp_attr |= LCK_GRP_ATTR_STAT;
}
if (LcksOpts & enaLkTimeStat) {
LockCompatGroup.lck_grp_attr |= LCK_GRP_ATTR_TIME_STAT;
}
os_ref_init(&LockCompatGroup.lck_grp_refcnt, NULL);
enqueue_tail(&lck_grp_queue, (queue_entry_t)&LockCompatGroup);
lck_grp_cnt = 1;
lck_grp_attr_setdefault(&LockDefaultGroupAttr);
lck_attr_setdefault(&LockDefaultLckAttr);
lck_mtx_init_ext(&lck_grp_lock, &lck_grp_lock_ext, &LockCompatGroup, &LockDefaultLckAttr);
}
STARTUP(LOCKS_EARLY, STARTUP_RANK_FIRST, lck_mod_init);
lck_grp_attr_t *
lck_grp_attr_alloc_init(
void)
{
lck_grp_attr_t *attr;
attr = zalloc(ZV_LCK_GRP_ATTR);
lck_grp_attr_setdefault(attr);
return attr;
}
void
lck_grp_attr_setdefault(
lck_grp_attr_t *attr)
{
if (LcksOpts & enaLkStat) {
attr->grp_attr_val = LCK_GRP_ATTR_STAT;
} else {
attr->grp_attr_val = 0;
}
}
void
lck_grp_attr_setstat(
lck_grp_attr_t *attr)
{
#pragma unused(attr)
os_atomic_or(&attr->grp_attr_val, LCK_GRP_ATTR_STAT, relaxed);
}
void
lck_grp_attr_free(
lck_grp_attr_t *attr)
{
zfree(ZV_LCK_GRP_ATTR, attr);
}
lck_grp_t *
lck_grp_alloc_init(
const char* grp_name,
lck_grp_attr_t *attr)
{
lck_grp_t *grp;
grp = zalloc(ZV_LCK_GRP);
lck_grp_init(grp, grp_name, attr);
return grp;
}
void
lck_grp_init(lck_grp_t * grp, const char * grp_name, lck_grp_attr_t * attr)
{
assert(lck_grp_cnt > 0);
bzero((void *)grp, sizeof(lck_grp_t));
(void)strlcpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME);
if (attr != LCK_GRP_ATTR_NULL) {
grp->lck_grp_attr = attr->grp_attr_val;
} else {
grp->lck_grp_attr = 0;
if (LcksOpts & enaLkStat) {
grp->lck_grp_attr |= LCK_GRP_ATTR_STAT;
}
if (LcksOpts & enaLkTimeStat) {
grp->lck_grp_attr |= LCK_GRP_ATTR_TIME_STAT;
}
}
if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT) {
lck_grp_stats_t *stats = &grp->lck_grp_stats;
#if LOCK_STATS
lck_grp_stat_enable(&stats->lgss_spin_held);
lck_grp_stat_enable(&stats->lgss_spin_miss);
#endif
lck_grp_stat_enable(&stats->lgss_mtx_held);
lck_grp_stat_enable(&stats->lgss_mtx_miss);
lck_grp_stat_enable(&stats->lgss_mtx_direct_wait);
lck_grp_stat_enable(&stats->lgss_mtx_wait);
}
if (grp->lck_grp_attr & LCK_GRP_ATTR_TIME_STAT) {
#if LOCK_STATS
lck_grp_stats_t *stats = &grp->lck_grp_stats;
lck_grp_stat_enable(&stats->lgss_spin_spin);
#endif
}
os_ref_init(&grp->lck_grp_refcnt, NULL);
lck_mtx_lock(&lck_grp_lock);
enqueue_tail(&lck_grp_queue, (queue_entry_t)grp);
lck_grp_cnt++;
lck_mtx_unlock(&lck_grp_lock);
}
void
lck_grp_free(
lck_grp_t *grp)
{
lck_mtx_lock(&lck_grp_lock);
lck_grp_cnt--;
(void)remque((queue_entry_t)grp);
lck_mtx_unlock(&lck_grp_lock);
lck_grp_deallocate(grp);
}
void
lck_grp_reference(
lck_grp_t *grp)
{
os_ref_retain(&grp->lck_grp_refcnt);
}
void
lck_grp_deallocate(
lck_grp_t *grp)
{
if (os_ref_release(&grp->lck_grp_refcnt) != 0) {
return;
}
zfree(ZV_LCK_GRP, grp);
}
void
lck_grp_lckcnt_incr(
lck_grp_t *grp,
lck_type_t lck_type)
{
unsigned int *lckcnt;
switch (lck_type) {
case LCK_TYPE_SPIN:
lckcnt = &grp->lck_grp_spincnt;
break;
case LCK_TYPE_MTX:
lckcnt = &grp->lck_grp_mtxcnt;
break;
case LCK_TYPE_RW:
lckcnt = &grp->lck_grp_rwcnt;
break;
case LCK_TYPE_TICKET:
lckcnt = &grp->lck_grp_ticketcnt;
break;
default:
return panic("lck_grp_lckcnt_incr(): invalid lock type: %d\n", lck_type);
}
os_atomic_inc(lckcnt, relaxed);
}
void
lck_grp_lckcnt_decr(
lck_grp_t *grp,
lck_type_t lck_type)
{
unsigned int *lckcnt;
int updated;
switch (lck_type) {
case LCK_TYPE_SPIN:
lckcnt = &grp->lck_grp_spincnt;
break;
case LCK_TYPE_MTX:
lckcnt = &grp->lck_grp_mtxcnt;
break;
case LCK_TYPE_RW:
lckcnt = &grp->lck_grp_rwcnt;
break;
case LCK_TYPE_TICKET:
lckcnt = &grp->lck_grp_ticketcnt;
break;
default:
panic("lck_grp_lckcnt_decr(): invalid lock type: %d\n", lck_type);
return;
}
updated = os_atomic_dec(lckcnt, relaxed);
assert(updated >= 0);
}
lck_attr_t *
lck_attr_alloc_init(
void)
{
lck_attr_t *attr;
attr = zalloc(ZV_LCK_ATTR);
lck_attr_setdefault(attr);
return attr;
}
void
lck_attr_setdefault(
lck_attr_t *attr)
{
#if __arm__ || __arm64__
attr->lck_attr_val = LCK_ATTR_NONE;
#elif __i386__ || __x86_64__
#if !DEBUG
if (LcksOpts & enaLkDeb) {
attr->lck_attr_val = LCK_ATTR_DEBUG;
} else {
attr->lck_attr_val = LCK_ATTR_NONE;
}
#else
attr->lck_attr_val = LCK_ATTR_DEBUG;
#endif
#else
#error Unknown architecture.
#endif
}
void
lck_attr_setdebug(
lck_attr_t *attr)
{
os_atomic_or(&attr->lck_attr_val, LCK_ATTR_DEBUG, relaxed);
}
void
lck_attr_cleardebug(
lck_attr_t *attr)
{
os_atomic_andnot(&attr->lck_attr_val, LCK_ATTR_DEBUG, relaxed);
}
void
lck_attr_rw_shared_priority(
lck_attr_t *attr)
{
os_atomic_or(&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY, relaxed);
}
void
lck_attr_free(
lck_attr_t *attr)
{
zfree(ZV_LCK_ATTR, attr);
}
MARK_AS_HIBERNATE_TEXT void
hw_lock_init(hw_lock_t lock)
{
ordered_store_hw(lock, 0);
}
static inline bool
hw_lock_trylock_contended(hw_lock_t lock, uintptr_t newval)
{
#if OS_ATOMIC_USE_LLSC
uintptr_t oldval;
os_atomic_rmw_loop(&lock->lock_data, oldval, newval, acquire, {
if (oldval != 0) {
wait_for_event(); return false;
}
});
return true;
#else // !OS_ATOMIC_USE_LLSC
#if OS_ATOMIC_HAS_LLSC
uintptr_t oldval = os_atomic_load_exclusive(&lock->lock_data, relaxed);
if (oldval != 0) {
wait_for_event(); return false;
}
#endif // OS_ATOMIC_HAS_LLSC
return os_atomic_cmpxchg(&lock->lock_data, 0, newval, acquire);
#endif // !OS_ATOMIC_USE_LLSC
}
static unsigned int NOINLINE
hw_lock_lock_contended(hw_lock_t lock, uintptr_t data, uint64_t timeout, boolean_t do_panic LCK_GRP_ARG(lck_grp_t *grp))
{
uint64_t end = 0;
uintptr_t holder = lock->lock_data;
int i;
if (timeout == 0) {
timeout = LOCK_PANIC_TIMEOUT;
}
#if CONFIG_DTRACE || LOCK_STATS
uint64_t begin = 0;
boolean_t stat_enabled = lck_grp_spin_spin_enabled(lock LCK_GRP_ARG(grp));
#endif
#if LOCK_STATS || CONFIG_DTRACE
if (__improbable(stat_enabled)) {
begin = mach_absolute_time();
}
#endif
for (;;) {
for (i = 0; i < LOCK_SNOOP_SPINS; i++) {
cpu_pause();
#if (!__ARM_ENABLE_WFE_) || (LOCK_PRETEST)
holder = ordered_load_hw(lock);
if (holder != 0) {
continue;
}
#endif
if (hw_lock_trylock_contended(lock, data)) {
#if CONFIG_DTRACE || LOCK_STATS
if (__improbable(stat_enabled)) {
lck_grp_spin_update_spin(lock LCK_GRP_ARG(grp), mach_absolute_time() - begin);
}
lck_grp_spin_update_miss(lock LCK_GRP_ARG(grp));
#endif
return 1;
}
}
if (end == 0) {
end = ml_get_timebase() + timeout;
} else if (ml_get_timebase() >= end) {
break;
}
}
if (do_panic) {
panic("Spinlock timeout after %llu ticks, %p = %lx",
(ml_get_timebase() - end + timeout), lock, holder);
}
return 0;
}
void *
hw_wait_while_equals(void **address, void *current)
{
void *v;
uint64_t end = 0;
for (;;) {
for (int i = 0; i < LOCK_SNOOP_SPINS; i++) {
cpu_pause();
#if OS_ATOMIC_HAS_LLSC
v = os_atomic_load_exclusive(address, relaxed);
if (__probable(v != current)) {
os_atomic_clear_exclusive();
return v;
}
wait_for_event();
#else
v = os_atomic_load(address, relaxed);
if (__probable(v != current)) {
return v;
}
#endif // OS_ATOMIC_HAS_LLSC
}
if (end == 0) {
end = ml_get_timebase() + LOCK_PANIC_TIMEOUT;
} else if (ml_get_timebase() >= end) {
panic("Wait while equals timeout @ *%p == %p", address, v);
}
}
}
static inline void
hw_lock_lock_internal(hw_lock_t lock, thread_t thread LCK_GRP_ARG(lck_grp_t *grp))
{
uintptr_t state;
state = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK;
#if LOCK_PRETEST
if (ordered_load_hw(lock)) {
goto contended;
}
#endif // LOCK_PRETEST
if (hw_lock_trylock_contended(lock, state)) {
goto end;
}
#if LOCK_PRETEST
contended:
#endif // LOCK_PRETEST
hw_lock_lock_contended(lock, state, 0, spinlock_timeout_panic LCK_GRP_ARG(grp));
end:
lck_grp_spin_update_held(lock LCK_GRP_ARG(grp));
return;
}
void
(hw_lock_lock)(hw_lock_t lock LCK_GRP_ARG(lck_grp_t *grp))
{
thread_t thread = current_thread();
disable_preemption_for_thread(thread);
hw_lock_lock_internal(lock, thread LCK_GRP_ARG(grp));
}
void
(hw_lock_lock_nopreempt)(hw_lock_t lock LCK_GRP_ARG(lck_grp_t *grp))
{
thread_t thread = current_thread();
if (__improbable(!preemption_disabled_for_thread(thread))) {
panic("Attempt to take no-preempt spinlock %p in preemptible context", lock);
}
hw_lock_lock_internal(lock, thread LCK_GRP_ARG(grp));
}
static inline unsigned int
hw_lock_to_internal(hw_lock_t lock, uint64_t timeout, thread_t thread
LCK_GRP_ARG(lck_grp_t *grp))
{
uintptr_t state;
unsigned int success = 0;
state = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK;
#if LOCK_PRETEST
if (ordered_load_hw(lock)) {
goto contended;
}
#endif // LOCK_PRETEST
if (hw_lock_trylock_contended(lock, state)) {
success = 1;
goto end;
}
#if LOCK_PRETEST
contended:
#endif // LOCK_PRETEST
success = hw_lock_lock_contended(lock, state, timeout, FALSE LCK_GRP_ARG(grp));
end:
if (success) {
lck_grp_spin_update_held(lock LCK_GRP_ARG(grp));
}
return success;
}
unsigned
int
(hw_lock_to)(hw_lock_t lock, uint64_t timeout LCK_GRP_ARG(lck_grp_t *grp))
{
thread_t thread = current_thread();
disable_preemption_for_thread(thread);
return hw_lock_to_internal(lock, timeout, thread LCK_GRP_ARG(grp));
}
unsigned
int
(hw_lock_to_nopreempt)(hw_lock_t lock, uint64_t timeout LCK_GRP_ARG(lck_grp_t *grp))
{
thread_t thread = current_thread();
if (__improbable(!preemption_disabled_for_thread(thread))) {
panic("Attempt to test no-preempt spinlock %p in preemptible context", lock);
}
return hw_lock_to_internal(lock, timeout, thread LCK_GRP_ARG(grp));
}
static inline unsigned int
hw_lock_try_internal(hw_lock_t lock, thread_t thread LCK_GRP_ARG(lck_grp_t *grp))
{
int success = 0;
#if LOCK_PRETEST
if (ordered_load_hw(lock)) {
goto failed;
}
#endif // LOCK_PRETEST
success = os_atomic_cmpxchg(&lock->lock_data, 0,
LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK, acquire);
#if LOCK_PRETEST
failed:
#endif // LOCK_PRETEST
if (success) {
lck_grp_spin_update_held(lock LCK_GRP_ARG(grp));
}
return success;
}
unsigned
int
(hw_lock_try)(hw_lock_t lock LCK_GRP_ARG(lck_grp_t *grp))
{
thread_t thread = current_thread();
disable_preemption_for_thread(thread);
unsigned int success = hw_lock_try_internal(lock, thread LCK_GRP_ARG(grp));
if (!success) {
enable_preemption();
}
return success;
}
unsigned
int
(hw_lock_try_nopreempt)(hw_lock_t lock LCK_GRP_ARG(lck_grp_t *grp))
{
thread_t thread = current_thread();
if (__improbable(!preemption_disabled_for_thread(thread))) {
panic("Attempt to test no-preempt spinlock %p in preemptible context", lock);
}
return hw_lock_try_internal(lock, thread LCK_GRP_ARG(grp));
}
static inline void
hw_lock_unlock_internal(hw_lock_t lock)
{
os_atomic_store(&lock->lock_data, 0, release);
#if __arm__ || __arm64__
set_event();
#endif // __arm__ || __arm64__
#if CONFIG_DTRACE
LOCKSTAT_RECORD(LS_LCK_SPIN_UNLOCK_RELEASE, lock, 0);
#endif
}
void
(hw_lock_unlock)(hw_lock_t lock)
{
hw_lock_unlock_internal(lock);
enable_preemption();
}
void
(hw_lock_unlock_nopreempt)(hw_lock_t lock)
{
if (__improbable(!preemption_disabled_for_thread(current_thread()))) {
panic("Attempt to release no-preempt spinlock %p in preemptible context", lock);
}
hw_lock_unlock_internal(lock);
}
unsigned int
hw_lock_held(hw_lock_t lock)
{
return ordered_load_hw(lock) != 0;
}
static unsigned int
hw_lock_bit_to_contended(hw_lock_bit_t *lock, uint32_t mask, uint32_t timeout LCK_GRP_ARG(lck_grp_t *grp));
static inline unsigned int
hw_lock_bit_to_internal(hw_lock_bit_t *lock, unsigned int bit, uint32_t timeout LCK_GRP_ARG(lck_grp_t *grp))
{
unsigned int success = 0;
uint32_t mask = (1 << bit);
if (__improbable(!hw_atomic_test_and_set32(lock, mask, mask, memory_order_acquire, FALSE))) {
success = hw_lock_bit_to_contended(lock, mask, timeout LCK_GRP_ARG(grp));
} else {
success = 1;
}
if (success) {
lck_grp_spin_update_held(lock LCK_GRP_ARG(grp));
}
return success;
}
unsigned
int
(hw_lock_bit_to)(hw_lock_bit_t * lock, unsigned int bit, uint32_t timeout LCK_GRP_ARG(lck_grp_t *grp))
{
_disable_preemption();
return hw_lock_bit_to_internal(lock, bit, timeout LCK_GRP_ARG(grp));
}
static unsigned int NOINLINE
hw_lock_bit_to_contended(hw_lock_bit_t *lock, uint32_t mask, uint32_t timeout LCK_GRP_ARG(lck_grp_t *grp))
{
uint64_t end = 0;
int i;
#if CONFIG_DTRACE || LOCK_STATS
uint64_t begin = 0;
boolean_t stat_enabled = lck_grp_spin_spin_enabled(lock LCK_GRP_ARG(grp));
#endif
#if LOCK_STATS || CONFIG_DTRACE
if (__improbable(stat_enabled)) {
begin = mach_absolute_time();
}
#endif
for (;;) {
for (i = 0; i < LOCK_SNOOP_SPINS; i++) {
if (hw_atomic_test_and_set32(lock, mask, mask, memory_order_acquire, TRUE)) {
goto end;
}
}
if (end == 0) {
end = ml_get_timebase() + timeout;
} else if (ml_get_timebase() >= end) {
break;
}
}
return 0;
end:
#if CONFIG_DTRACE || LOCK_STATS
if (__improbable(stat_enabled)) {
lck_grp_spin_update_spin(lock LCK_GRP_ARG(grp), mach_absolute_time() - begin);
}
lck_grp_spin_update_miss(lock LCK_GRP_ARG(grp));
#endif
return 1;
}
void
(hw_lock_bit)(hw_lock_bit_t * lock, unsigned int bit LCK_GRP_ARG(lck_grp_t *grp))
{
if (hw_lock_bit_to(lock, bit, LOCK_PANIC_TIMEOUT, LCK_GRP_PROBEARG(grp))) {
return;
}
panic("hw_lock_bit(): timed out (%p)", lock);
}
void
(hw_lock_bit_nopreempt)(hw_lock_bit_t * lock, unsigned int bit LCK_GRP_ARG(lck_grp_t *grp))
{
if (__improbable(get_preemption_level() == 0)) {
panic("Attempt to take no-preempt bitlock %p in preemptible context", lock);
}
if (hw_lock_bit_to_internal(lock, bit, LOCK_PANIC_TIMEOUT LCK_GRP_ARG(grp))) {
return;
}
panic("hw_lock_bit_nopreempt(): timed out (%p)", lock);
}
unsigned
int
(hw_lock_bit_try)(hw_lock_bit_t * lock, unsigned int bit LCK_GRP_ARG(lck_grp_t *grp))
{
uint32_t mask = (1 << bit);
boolean_t success = FALSE;
_disable_preemption();
success = hw_atomic_test_and_set32(lock, mask, mask, memory_order_acquire, FALSE);
if (!success) {
_enable_preemption();
}
if (success) {
lck_grp_spin_update_held(lock LCK_GRP_ARG(grp));
}
return success;
}
static inline void
hw_unlock_bit_internal(hw_lock_bit_t *lock, unsigned int bit)
{
uint32_t mask = (1 << bit);
os_atomic_andnot(lock, mask, release);
#if __arm__
set_event();
#endif
#if CONFIG_DTRACE
LOCKSTAT_RECORD(LS_LCK_SPIN_UNLOCK_RELEASE, lock, bit);
#endif
}
void
hw_unlock_bit(hw_lock_bit_t * lock, unsigned int bit)
{
hw_unlock_bit_internal(lock, bit);
_enable_preemption();
}
void
hw_unlock_bit_nopreempt(hw_lock_bit_t * lock, unsigned int bit)
{
if (__improbable(get_preemption_level() == 0)) {
panic("Attempt to release no-preempt bitlock %p in preemptible context", lock);
}
hw_unlock_bit_internal(lock, bit);
}
wait_result_t
lck_spin_sleep_grp(
lck_spin_t *lck,
lck_sleep_action_t lck_sleep_action,
event_t event,
wait_interrupt_t interruptible,
lck_grp_t *grp)
{
wait_result_t res;
if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) {
panic("Invalid lock sleep action %x\n", lck_sleep_action);
}
res = assert_wait(event, interruptible);
if (res == THREAD_WAITING) {
lck_spin_unlock(lck);
res = thread_block(THREAD_CONTINUE_NULL);
if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
lck_spin_lock_grp(lck, grp);
}
} else if (lck_sleep_action & LCK_SLEEP_UNLOCK) {
lck_spin_unlock(lck);
}
return res;
}
wait_result_t
lck_spin_sleep(
lck_spin_t *lck,
lck_sleep_action_t lck_sleep_action,
event_t event,
wait_interrupt_t interruptible)
{
return lck_spin_sleep_grp(lck, lck_sleep_action, event, interruptible, LCK_GRP_NULL);
}
wait_result_t
lck_spin_sleep_deadline(
lck_spin_t *lck,
lck_sleep_action_t lck_sleep_action,
event_t event,
wait_interrupt_t interruptible,
uint64_t deadline)
{
wait_result_t res;
if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) {
panic("Invalid lock sleep action %x\n", lck_sleep_action);
}
res = assert_wait_deadline(event, interruptible, deadline);
if (res == THREAD_WAITING) {
lck_spin_unlock(lck);
res = thread_block(THREAD_CONTINUE_NULL);
if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
lck_spin_lock(lck);
}
} else if (lck_sleep_action & LCK_SLEEP_UNLOCK) {
lck_spin_unlock(lck);
}
return res;
}
wait_result_t
lck_mtx_sleep(
lck_mtx_t *lck,
lck_sleep_action_t lck_sleep_action,
event_t event,
wait_interrupt_t interruptible)
{
wait_result_t res;
thread_t thread = current_thread();
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START,
VM_KERNEL_UNSLIDE_OR_PERM(lck), (int)lck_sleep_action, VM_KERNEL_UNSLIDE_OR_PERM(event), (int)interruptible, 0);
if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) {
panic("Invalid lock sleep action %x\n", lck_sleep_action);
}
if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
thread->rwlock_count++;
}
res = assert_wait(event, interruptible);
if (res == THREAD_WAITING) {
lck_mtx_unlock(lck);
res = thread_block(THREAD_CONTINUE_NULL);
if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
if ((lck_sleep_action & LCK_SLEEP_SPIN)) {
lck_mtx_lock_spin(lck);
} else if ((lck_sleep_action & LCK_SLEEP_SPIN_ALWAYS)) {
lck_mtx_lock_spin_always(lck);
} else {
lck_mtx_lock(lck);
}
}
} else if (lck_sleep_action & LCK_SLEEP_UNLOCK) {
lck_mtx_unlock(lck);
}
if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
if ((thread->rwlock_count-- == 1 ) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
lck_rw_clear_promotion(thread, unslide_for_kdebug(event));
}
}
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
return res;
}
wait_result_t
lck_mtx_sleep_deadline(
lck_mtx_t *lck,
lck_sleep_action_t lck_sleep_action,
event_t event,
wait_interrupt_t interruptible,
uint64_t deadline)
{
wait_result_t res;
thread_t thread = current_thread();
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START,
VM_KERNEL_UNSLIDE_OR_PERM(lck), (int)lck_sleep_action, VM_KERNEL_UNSLIDE_OR_PERM(event), (int)interruptible, 0);
if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) {
panic("Invalid lock sleep action %x\n", lck_sleep_action);
}
if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
thread->rwlock_count++;
}
res = assert_wait_deadline(event, interruptible, deadline);
if (res == THREAD_WAITING) {
lck_mtx_unlock(lck);
res = thread_block(THREAD_CONTINUE_NULL);
if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
if ((lck_sleep_action & LCK_SLEEP_SPIN)) {
lck_mtx_lock_spin(lck);
} else {
lck_mtx_lock(lck);
}
}
} else if (lck_sleep_action & LCK_SLEEP_UNLOCK) {
lck_mtx_unlock(lck);
}
if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
if ((thread->rwlock_count-- == 1 ) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
lck_rw_clear_promotion(thread, unslide_for_kdebug(event));
}
}
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_END, (int)res, 0, 0, 0, 0);
return res;
}
void
lck_mtx_lock_wait(
lck_mtx_t *lck,
thread_t holder,
struct turnstile **ts)
{
thread_t thread = current_thread();
lck_mtx_t *mutex;
__kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck);
#if CONFIG_DTRACE
uint64_t sleep_start = 0;
if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) {
sleep_start = mach_absolute_time();
}
#endif
if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
mutex = lck;
} else {
mutex = &lck->lck_mtx_ptr->lck_mtx;
}
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START,
trace_lck, (uintptr_t)thread_tid(thread), 0, 0, 0);
assert(thread->waiting_for_mutex == NULL);
thread->waiting_for_mutex = mutex;
mutex->lck_mtx_waiters++;
if (*ts == NULL) {
*ts = turnstile_prepare((uintptr_t)mutex, NULL, TURNSTILE_NULL, TURNSTILE_KERNEL_MUTEX);
}
struct turnstile *turnstile = *ts;
thread_set_pending_block_hint(thread, kThreadWaitKernelMutex);
turnstile_update_inheritor(turnstile, holder, (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
waitq_assert_wait64(&turnstile->ts_waitq, CAST_EVENT64_T(LCK_MTX_EVENT(mutex)), THREAD_UNINT | THREAD_WAIT_NOREPORT_USER, TIMEOUT_WAIT_FOREVER);
lck_mtx_ilk_unlock(mutex);
turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
thread_block(THREAD_CONTINUE_NULL);
thread->waiting_for_mutex = NULL;
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
#if CONFIG_DTRACE
if (sleep_start) {
if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_BLOCK, lck,
mach_absolute_time() - sleep_start);
} else {
LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_BLOCK, lck,
mach_absolute_time() - sleep_start);
}
}
#endif
}
int
lck_mtx_lock_acquire(
lck_mtx_t *lck,
struct turnstile *ts)
{
thread_t thread = current_thread();
lck_mtx_t *mutex;
if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
mutex = lck;
} else {
mutex = &lck->lck_mtx_ptr->lck_mtx;
}
assert(thread->waiting_for_mutex == NULL);
if (mutex->lck_mtx_waiters > 0) {
if (ts == NULL) {
ts = turnstile_prepare((uintptr_t)mutex, NULL, TURNSTILE_NULL, TURNSTILE_KERNEL_MUTEX);
}
turnstile_update_inheritor(ts, thread, (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD));
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
}
if (ts != NULL) {
turnstile_complete((uintptr_t)mutex, NULL, NULL, TURNSTILE_KERNEL_MUTEX);
}
return mutex->lck_mtx_waiters;
}
boolean_t
lck_mtx_unlock_wakeup(
lck_mtx_t *lck,
thread_t holder)
{
thread_t thread = current_thread();
lck_mtx_t *mutex;
__kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck);
struct turnstile *ts;
kern_return_t did_wake;
if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) {
mutex = lck;
} else {
mutex = &lck->lck_mtx_ptr->lck_mtx;
}
if (thread != holder) {
panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex, holder);
}
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START,
trace_lck, (uintptr_t)thread_tid(thread), 0, 0, 0);
assert(mutex->lck_mtx_waiters > 0);
assert(thread->waiting_for_mutex == NULL);
ts = turnstile_prepare((uintptr_t)mutex, NULL, TURNSTILE_NULL, TURNSTILE_KERNEL_MUTEX);
if (mutex->lck_mtx_waiters > 1) {
did_wake = waitq_wakeup64_one(&ts->ts_waitq, CAST_EVENT64_T(LCK_MTX_EVENT(mutex)), THREAD_AWAKENED, WAITQ_PROMOTE_ON_WAKE);
} else {
did_wake = waitq_wakeup64_one(&ts->ts_waitq, CAST_EVENT64_T(LCK_MTX_EVENT(mutex)), THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
turnstile_update_inheritor(ts, NULL, TURNSTILE_IMMEDIATE_UPDATE);
}
assert(did_wake == KERN_SUCCESS);
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
turnstile_complete((uintptr_t)mutex, NULL, NULL, TURNSTILE_KERNEL_MUTEX);
mutex->lck_mtx_waiters--;
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0);
return mutex->lck_mtx_waiters > 0;
}
#define MAX_COLLISION_COUNTS 32
#define MAX_COLLISION 8
unsigned int max_collision_count[MAX_COLLISION_COUNTS];
uint32_t collision_backoffs[MAX_COLLISION] = {
10, 50, 100, 200, 400, 600, 800, 1000
};
void
mutex_pause(uint32_t collisions)
{
wait_result_t wait_result;
uint32_t back_off;
if (collisions >= MAX_COLLISION_COUNTS) {
collisions = MAX_COLLISION_COUNTS - 1;
}
max_collision_count[collisions]++;
if (collisions >= MAX_COLLISION) {
collisions = MAX_COLLISION - 1;
}
back_off = collision_backoffs[collisions];
wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, back_off, NSEC_PER_USEC);
assert(wait_result == THREAD_WAITING);
wait_result = thread_block(THREAD_CONTINUE_NULL);
assert(wait_result == THREAD_TIMED_OUT);
}
unsigned int mutex_yield_wait = 0;
unsigned int mutex_yield_no_wait = 0;
void
lck_mtx_yield(
lck_mtx_t *lck)
{
int waiters;
#if DEBUG
lck_mtx_assert(lck, LCK_MTX_ASSERT_OWNED);
#endif
if (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT) {
waiters = lck->lck_mtx_ptr->lck_mtx.lck_mtx_waiters;
} else {
waiters = lck->lck_mtx_waiters;
}
if (!waiters) {
mutex_yield_no_wait++;
} else {
mutex_yield_wait++;
lck_mtx_unlock(lck);
mutex_pause(0);
lck_mtx_lock(lck);
}
}
wait_result_t
lck_rw_sleep(
lck_rw_t *lck,
lck_sleep_action_t lck_sleep_action,
event_t event,
wait_interrupt_t interruptible)
{
wait_result_t res;
lck_rw_type_t lck_rw_type;
thread_t thread = current_thread();
if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) {
panic("Invalid lock sleep action %x\n", lck_sleep_action);
}
if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
thread->rwlock_count++;
}
res = assert_wait(event, interruptible);
if (res == THREAD_WAITING) {
lck_rw_type = lck_rw_done(lck);
res = thread_block(THREAD_CONTINUE_NULL);
if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
if (!(lck_sleep_action & (LCK_SLEEP_SHARED | LCK_SLEEP_EXCLUSIVE))) {
lck_rw_lock(lck, lck_rw_type);
} else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE) {
lck_rw_lock_exclusive(lck);
} else {
lck_rw_lock_shared(lck);
}
}
} else if (lck_sleep_action & LCK_SLEEP_UNLOCK) {
(void)lck_rw_done(lck);
}
if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
if ((thread->rwlock_count-- == 1 ) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
assert(lck_sleep_action & LCK_SLEEP_UNLOCK);
lck_rw_clear_promotion(thread, unslide_for_kdebug(event));
}
}
return res;
}
wait_result_t
lck_rw_sleep_deadline(
lck_rw_t *lck,
lck_sleep_action_t lck_sleep_action,
event_t event,
wait_interrupt_t interruptible,
uint64_t deadline)
{
wait_result_t res;
lck_rw_type_t lck_rw_type;
thread_t thread = current_thread();
if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) {
panic("Invalid lock sleep action %x\n", lck_sleep_action);
}
if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
thread->rwlock_count++;
}
res = assert_wait_deadline(event, interruptible, deadline);
if (res == THREAD_WAITING) {
lck_rw_type = lck_rw_done(lck);
res = thread_block(THREAD_CONTINUE_NULL);
if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) {
if (!(lck_sleep_action & (LCK_SLEEP_SHARED | LCK_SLEEP_EXCLUSIVE))) {
lck_rw_lock(lck, lck_rw_type);
} else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE) {
lck_rw_lock_exclusive(lck);
} else {
lck_rw_lock_shared(lck);
}
}
} else if (lck_sleep_action & LCK_SLEEP_UNLOCK) {
(void)lck_rw_done(lck);
}
if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) {
if ((thread->rwlock_count-- == 1 ) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
assert(lck_sleep_action & LCK_SLEEP_UNLOCK);
lck_rw_clear_promotion(thread, unslide_for_kdebug(event));
}
}
return res;
}
void
lck_rw_clear_promotion(thread_t thread, uintptr_t trace_obj)
{
assert(thread->rwlock_count == 0);
spl_t s = splsched();
thread_lock(thread);
if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) {
sched_thread_unpromote_reason(thread, TH_SFLAG_RW_PROMOTED, trace_obj);
}
thread_unlock(thread);
splx(s);
}
void
lck_rw_set_promotion_locked(thread_t thread)
{
if (LcksOpts & disLkRWPrio) {
return;
}
assert(thread->rwlock_count > 0);
if (!(thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
sched_thread_promote_reason(thread, TH_SFLAG_RW_PROMOTED, 0);
}
}
kern_return_t
host_lockgroup_info(
host_t host,
lockgroup_info_array_t *lockgroup_infop,
mach_msg_type_number_t *lockgroup_infoCntp)
{
lockgroup_info_t *lockgroup_info_base;
lockgroup_info_t *lockgroup_info;
vm_offset_t lockgroup_info_addr;
vm_size_t lockgroup_info_size;
vm_size_t lockgroup_info_vmsize;
lck_grp_t *lck_grp;
unsigned int i;
vm_map_copy_t copy;
kern_return_t kr;
if (host == HOST_NULL) {
return KERN_INVALID_HOST;
}
lck_mtx_lock(&lck_grp_lock);
lockgroup_info_size = lck_grp_cnt * sizeof(*lockgroup_info);
lockgroup_info_vmsize = round_page(lockgroup_info_size);
kr = kmem_alloc_pageable(ipc_kernel_map,
&lockgroup_info_addr, lockgroup_info_vmsize, VM_KERN_MEMORY_IPC);
if (kr != KERN_SUCCESS) {
lck_mtx_unlock(&lck_grp_lock);
return kr;
}
lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr;
lck_grp = (lck_grp_t *)queue_first(&lck_grp_queue);
lockgroup_info = lockgroup_info_base;
for (i = 0; i < lck_grp_cnt; i++) {
lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt;
lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt;
lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt;
#if LOCK_STATS
lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stats.lgss_spin_held.lgs_count;
lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stats.lgss_spin_miss.lgs_count;
#endif
lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stats.lgss_mtx_held.lgs_count;
lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stats.lgss_mtx_direct_wait.lgs_count;
lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stats.lgss_mtx_miss.lgs_count;
lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stats.lgss_mtx_wait.lgs_count;
(void) strncpy(lockgroup_info->lockgroup_name, lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME);
lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp)));
lockgroup_info++;
}
*lockgroup_infoCntp = lck_grp_cnt;
lck_mtx_unlock(&lck_grp_lock);
if (lockgroup_info_size != lockgroup_info_vmsize) {
bzero((char *)lockgroup_info, lockgroup_info_vmsize - lockgroup_info_size);
}
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
(vm_map_size_t)lockgroup_info_size, TRUE, ©);
assert(kr == KERN_SUCCESS);
*lockgroup_infop = (lockgroup_info_t *) copy;
return KERN_SUCCESS;
}
kern_return_t
wakeup_with_inheritor_and_turnstile_type(event_t event, turnstile_type_t type, wait_result_t result, bool wake_one, lck_wake_action_t action, thread_t *thread_wokenup)
{
uint32_t index;
struct turnstile *ts = NULL;
kern_return_t ret = KERN_NOT_WAITING;
int priority;
thread_t wokeup;
turnstile_hash_bucket_lock((uintptr_t)event, &index, type);
ts = turnstile_prepare((uintptr_t)event, NULL, TURNSTILE_NULL, type);
if (wake_one) {
if (action == LCK_WAKE_DEFAULT) {
priority = WAITQ_PROMOTE_ON_WAKE;
} else {
assert(action == LCK_WAKE_DO_NOT_TRANSFER_PUSH);
priority = WAITQ_ALL_PRIORITIES;
}
wokeup = waitq_wakeup64_identify(&ts->ts_waitq, CAST_EVENT64_T(event), result, priority);
if (wokeup != NULL) {
if (thread_wokenup != NULL) {
*thread_wokenup = wokeup;
} else {
thread_deallocate_safe(wokeup);
}
ret = KERN_SUCCESS;
if (action == LCK_WAKE_DO_NOT_TRANSFER_PUSH) {
goto complete;
}
} else {
if (thread_wokenup != NULL) {
*thread_wokenup = NULL;
}
turnstile_update_inheritor(ts, TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
ret = KERN_NOT_WAITING;
}
} else {
ret = waitq_wakeup64_all(&ts->ts_waitq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES);
turnstile_update_inheritor(ts, TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
}
turnstile_hash_bucket_unlock((uintptr_t)NULL, &index, type, 0);
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
turnstile_hash_bucket_lock((uintptr_t)NULL, &index, type);
complete:
turnstile_complete((uintptr_t)event, NULL, NULL, type);
turnstile_hash_bucket_unlock((uintptr_t)NULL, &index, type, 0);
turnstile_cleanup();
return ret;
}
static wait_result_t
sleep_with_inheritor_and_turnstile_type(event_t event,
thread_t inheritor,
wait_interrupt_t interruptible,
uint64_t deadline,
turnstile_type_t type,
void (^primitive_lock)(void),
void (^primitive_unlock)(void))
{
wait_result_t ret;
uint32_t index;
struct turnstile *ts = NULL;
turnstile_hash_bucket_lock((uintptr_t)event, &index, type);
primitive_unlock();
ts = turnstile_prepare((uintptr_t)event, NULL, TURNSTILE_NULL, type);
thread_set_pending_block_hint(current_thread(), kThreadWaitSleepWithInheritor);
turnstile_update_inheritor(ts, inheritor, (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
ret = waitq_assert_wait64(&ts->ts_waitq, CAST_EVENT64_T(event), interruptible, deadline);
turnstile_hash_bucket_unlock((uintptr_t)NULL, &index, type, 0);
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
if (ret == THREAD_WAITING) {
ret = thread_block(THREAD_CONTINUE_NULL);
}
turnstile_hash_bucket_lock((uintptr_t)NULL, &index, type);
turnstile_complete((uintptr_t)event, NULL, NULL, type);
turnstile_hash_bucket_unlock((uintptr_t)NULL, &index, type, 0);
turnstile_cleanup();
primitive_lock();
return ret;
}
kern_return_t
change_sleep_inheritor_and_turnstile_type(event_t event,
thread_t inheritor,
turnstile_type_t type)
{
uint32_t index;
struct turnstile *ts = NULL;
kern_return_t ret = KERN_SUCCESS;
turnstile_hash_bucket_lock((uintptr_t)event, &index, type);
ts = turnstile_prepare((uintptr_t)event, NULL, TURNSTILE_NULL, type);
if (!turnstile_has_waiters(ts)) {
ret = KERN_NOT_WAITING;
}
turnstile_update_inheritor(ts, inheritor, (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD));
turnstile_hash_bucket_unlock((uintptr_t)NULL, &index, type, 0);
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
turnstile_hash_bucket_lock((uintptr_t)NULL, &index, type);
turnstile_complete((uintptr_t)event, NULL, NULL, type);
turnstile_hash_bucket_unlock((uintptr_t)NULL, &index, type, 0);
turnstile_cleanup();
return ret;
}
typedef void (^void_block_void)(void);
wait_result_t
lck_mtx_sleep_with_inheritor_and_turnstile_type(lck_mtx_t *lock, lck_sleep_action_t lck_sleep_action, event_t event, thread_t inheritor, wait_interrupt_t interruptible, uint64_t deadline, turnstile_type_t type)
{
LCK_MTX_ASSERT(lock, LCK_MTX_ASSERT_OWNED);
if (lck_sleep_action & LCK_SLEEP_UNLOCK) {
return sleep_with_inheritor_and_turnstile_type(event,
inheritor,
interruptible,
deadline,
type,
^{;},
^{lck_mtx_unlock(lock);});
} else if (lck_sleep_action & LCK_SLEEP_SPIN) {
return sleep_with_inheritor_and_turnstile_type(event,
inheritor,
interruptible,
deadline,
type,
^{lck_mtx_lock_spin(lock);},
^{lck_mtx_unlock(lock);});
} else if (lck_sleep_action & LCK_SLEEP_SPIN_ALWAYS) {
return sleep_with_inheritor_and_turnstile_type(event,
inheritor,
interruptible,
deadline,
type,
^{lck_mtx_lock_spin_always(lock);},
^{lck_mtx_unlock(lock);});
} else {
return sleep_with_inheritor_and_turnstile_type(event,
inheritor,
interruptible,
deadline,
type,
^{lck_mtx_lock(lock);},
^{lck_mtx_unlock(lock);});
}
}
wait_result_t
lck_spin_sleep_with_inheritor(
lck_spin_t *lock,
lck_sleep_action_t lck_sleep_action,
event_t event,
thread_t inheritor,
wait_interrupt_t interruptible,
uint64_t deadline)
{
if (lck_sleep_action & LCK_SLEEP_UNLOCK) {
return sleep_with_inheritor_and_turnstile_type(event, inheritor,
interruptible, deadline, TURNSTILE_SLEEP_INHERITOR,
^{}, ^{ lck_spin_unlock(lock); });
} else {
return sleep_with_inheritor_and_turnstile_type(event, inheritor,
interruptible, deadline, TURNSTILE_SLEEP_INHERITOR,
^{ lck_spin_lock(lock); }, ^{ lck_spin_unlock(lock); });
}
}
wait_result_t
lck_mtx_sleep_with_inheritor(lck_mtx_t *lock, lck_sleep_action_t lck_sleep_action, event_t event, thread_t inheritor, wait_interrupt_t interruptible, uint64_t deadline)
{
return lck_mtx_sleep_with_inheritor_and_turnstile_type(lock, lck_sleep_action, event, inheritor, interruptible, deadline, TURNSTILE_SLEEP_INHERITOR);
}
wait_result_t
lck_rw_sleep_with_inheritor_and_turnstile_type(lck_rw_t *lock, lck_sleep_action_t lck_sleep_action, event_t event, thread_t inheritor, wait_interrupt_t interruptible, uint64_t deadline, turnstile_type_t type)
{
__block lck_rw_type_t lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
LCK_RW_ASSERT(lock, LCK_RW_ASSERT_HELD);
if (lck_sleep_action & LCK_SLEEP_UNLOCK) {
return sleep_with_inheritor_and_turnstile_type(event,
inheritor,
interruptible,
deadline,
type,
^{;},
^{lck_rw_type = lck_rw_done(lock);});
} else if (!(lck_sleep_action & (LCK_SLEEP_SHARED | LCK_SLEEP_EXCLUSIVE))) {
return sleep_with_inheritor_and_turnstile_type(event,
inheritor,
interruptible,
deadline,
type,
^{lck_rw_lock(lock, lck_rw_type);},
^{lck_rw_type = lck_rw_done(lock);});
} else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE) {
return sleep_with_inheritor_and_turnstile_type(event,
inheritor,
interruptible,
deadline,
type,
^{lck_rw_lock_exclusive(lock);},
^{lck_rw_type = lck_rw_done(lock);});
} else {
return sleep_with_inheritor_and_turnstile_type(event,
inheritor,
interruptible,
deadline,
type,
^{lck_rw_lock_shared(lock);},
^{lck_rw_type = lck_rw_done(lock);});
}
}
wait_result_t
lck_rw_sleep_with_inheritor(lck_rw_t *lock, lck_sleep_action_t lck_sleep_action, event_t event, thread_t inheritor, wait_interrupt_t interruptible, uint64_t deadline)
{
return lck_rw_sleep_with_inheritor_and_turnstile_type(lock, lck_sleep_action, event, inheritor, interruptible, deadline, TURNSTILE_SLEEP_INHERITOR);
}
kern_return_t
wakeup_one_with_inheritor(event_t event, wait_result_t result, lck_wake_action_t action, thread_t *thread_wokenup)
{
return wakeup_with_inheritor_and_turnstile_type(event,
TURNSTILE_SLEEP_INHERITOR,
result,
TRUE,
action,
thread_wokenup);
}
kern_return_t
wakeup_all_with_inheritor(event_t event, wait_result_t result)
{
return wakeup_with_inheritor_and_turnstile_type(event,
TURNSTILE_SLEEP_INHERITOR,
result,
FALSE,
0,
NULL);
}
kern_return_t
change_sleep_inheritor(event_t event, thread_t inheritor)
{
return change_sleep_inheritor_and_turnstile_type(event,
inheritor,
TURNSTILE_SLEEP_INHERITOR);
}
void
kdp_sleep_with_inheritor_find_owner(struct waitq * waitq, __unused event64_t event, thread_waitinfo_t * waitinfo)
{
assert(waitinfo->wait_type == kThreadWaitSleepWithInheritor);
assert(waitq_is_turnstile_queue(waitq));
waitinfo->owner = 0;
waitinfo->context = 0;
if (waitq_held(waitq)) {
return;
}
struct turnstile *turnstile = waitq_to_turnstile(waitq);
assert(turnstile->ts_inheritor_flags & TURNSTILE_INHERITOR_THREAD);
waitinfo->owner = thread_tid(turnstile->ts_inheritor);
}
typedef void (*void_func_void)(void);
static kern_return_t
gate_try_close(gate_t *gate)
{
uintptr_t state;
thread_t holder;
kern_return_t ret;
__assert_only bool waiters;
thread_t thread = current_thread();
if (os_atomic_cmpxchg(&gate->gate_data, 0, GATE_THREAD_TO_STATE(thread), acquire)) {
return KERN_SUCCESS;
}
gate_ilock(gate);
state = ordered_load_gate(gate);
holder = GATE_STATE_TO_THREAD(state);
if (holder == NULL) {
waiters = gate_has_waiters(state);
assert(waiters == FALSE);
state = GATE_THREAD_TO_STATE(current_thread());
state |= GATE_ILOCK;
ordered_store_gate(gate, state);
ret = KERN_SUCCESS;
} else {
if (holder == current_thread()) {
panic("Trying to close a gate already owned by current thread %p", current_thread());
}
ret = KERN_FAILURE;
}
gate_iunlock(gate);
return ret;
}
static void
gate_close(gate_t* gate)
{
uintptr_t state;
thread_t holder;
__assert_only bool waiters;
thread_t thread = current_thread();
if (os_atomic_cmpxchg(&gate->gate_data, 0, GATE_THREAD_TO_STATE(thread), acquire)) {
return;
}
gate_ilock(gate);
state = ordered_load_gate(gate);
holder = GATE_STATE_TO_THREAD(state);
if (holder != NULL) {
panic("Closing a gate already owned by %p from current thread %p", holder, current_thread());
}
waiters = gate_has_waiters(state);
assert(waiters == FALSE);
state = GATE_THREAD_TO_STATE(thread);
state |= GATE_ILOCK;
ordered_store_gate(gate, state);
gate_iunlock(gate);
}
static void
gate_open_turnstile(gate_t *gate)
{
struct turnstile *ts = NULL;
ts = turnstile_prepare((uintptr_t)gate, &gate->turnstile, TURNSTILE_NULL, TURNSTILE_KERNEL_MUTEX);
waitq_wakeup64_all(&ts->ts_waitq, CAST_EVENT64_T(GATE_EVENT(gate)), THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
turnstile_update_inheritor(ts, TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
turnstile_complete((uintptr_t)gate, &gate->turnstile, NULL, TURNSTILE_KERNEL_MUTEX);
turnstile_cleanup();
}
static void
gate_open(gate_t *gate)
{
uintptr_t state;
thread_t holder;
bool waiters;
thread_t thread = current_thread();
if (os_atomic_cmpxchg(&gate->gate_data, GATE_THREAD_TO_STATE(thread), 0, release)) {
return;
}
gate_ilock(gate);
state = ordered_load_gate(gate);
holder = GATE_STATE_TO_THREAD(state);
waiters = gate_has_waiters(state);
if (holder != thread) {
panic("Opening gate owned by %p from current thread %p", holder, thread);
}
if (waiters) {
gate_open_turnstile(gate);
}
state = GATE_ILOCK;
ordered_store_gate(gate, state);
gate_iunlock(gate);
}
static kern_return_t
gate_handoff_turnstile(gate_t *gate,
int flags,
thread_t *thread_woken_up,
bool *waiters)
{
struct turnstile *ts = NULL;
kern_return_t ret = KERN_FAILURE;
thread_t hp_thread;
ts = turnstile_prepare((uintptr_t)gate, &gate->turnstile, TURNSTILE_NULL, TURNSTILE_KERNEL_MUTEX);
hp_thread = waitq_wakeup64_identify(&ts->ts_waitq, CAST_EVENT64_T(GATE_EVENT(gate)), THREAD_AWAKENED, WAITQ_PROMOTE_ON_WAKE);
if (hp_thread != NULL) {
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
*thread_woken_up = hp_thread;
*waiters = turnstile_has_waiters(ts);
ret = KERN_SUCCESS;
} else {
if (flags == GATE_HANDOFF_OPEN_IF_NO_WAITERS) {
turnstile_update_inheritor(ts, TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
}
ret = KERN_NOT_WAITING;
}
turnstile_complete((uintptr_t)gate, &gate->turnstile, NULL, TURNSTILE_KERNEL_MUTEX);
turnstile_cleanup();
return ret;
}
static kern_return_t
gate_handoff(gate_t *gate,
int flags)
{
kern_return_t ret;
thread_t new_holder = NULL;
uintptr_t state;
thread_t holder;
bool waiters;
thread_t thread = current_thread();
assert(flags == GATE_HANDOFF_OPEN_IF_NO_WAITERS || flags == GATE_HANDOFF_DEFAULT);
if (flags == GATE_HANDOFF_OPEN_IF_NO_WAITERS) {
if (os_atomic_cmpxchg(&gate->gate_data, GATE_THREAD_TO_STATE(thread), 0, release)) {
return KERN_NOT_WAITING;
}
}
gate_ilock(gate);
state = ordered_load_gate(gate);
holder = GATE_STATE_TO_THREAD(state);
waiters = gate_has_waiters(state);
if (holder != current_thread()) {
panic("Handing off gate owned by %p from current thread %p", holder, current_thread());
}
if (waiters) {
ret = gate_handoff_turnstile(gate, flags, &new_holder, &waiters);
if (ret == KERN_SUCCESS) {
state = GATE_THREAD_TO_STATE(new_holder);
if (waiters) {
state |= GATE_WAITERS;
}
} else {
if (flags == GATE_HANDOFF_OPEN_IF_NO_WAITERS) {
state = 0;
}
}
} else {
if (flags == GATE_HANDOFF_OPEN_IF_NO_WAITERS) {
state = 0;
}
ret = KERN_NOT_WAITING;
}
state |= GATE_ILOCK;
ordered_store_gate(gate, state);
gate_iunlock(gate);
if (new_holder) {
thread_deallocate(new_holder);
}
return ret;
}
static void_func_void
gate_steal_turnstile(gate_t *gate,
thread_t new_inheritor)
{
struct turnstile *ts = NULL;
ts = turnstile_prepare((uintptr_t)gate, &gate->turnstile, TURNSTILE_NULL, TURNSTILE_KERNEL_MUTEX);
turnstile_update_inheritor(ts, new_inheritor, (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD));
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
turnstile_complete((uintptr_t)gate, &gate->turnstile, NULL, TURNSTILE_KERNEL_MUTEX);
return turnstile_cleanup;
}
static void
gate_steal(gate_t *gate)
{
uintptr_t state;
thread_t holder;
thread_t thread = current_thread();
bool waiters;
void_func_void func_after_interlock_unlock;
gate_ilock(gate);
state = ordered_load_gate(gate);
holder = GATE_STATE_TO_THREAD(state);
waiters = gate_has_waiters(state);
assert(holder != NULL);
state = GATE_THREAD_TO_STATE(thread) | GATE_ILOCK;
if (waiters) {
state |= GATE_WAITERS;
ordered_store_gate(gate, state);
func_after_interlock_unlock = gate_steal_turnstile(gate, thread);
gate_iunlock(gate);
func_after_interlock_unlock();
} else {
ordered_store_gate(gate, state);
gate_iunlock(gate);
}
}
static void_func_void
gate_wait_turnstile(gate_t *gate,
wait_interrupt_t interruptible,
uint64_t deadline,
thread_t holder,
wait_result_t* wait,
bool* waiters)
{
struct turnstile *ts;
uintptr_t state;
ts = turnstile_prepare((uintptr_t)gate, &gate->turnstile, TURNSTILE_NULL, TURNSTILE_KERNEL_MUTEX);
turnstile_update_inheritor(ts, holder, (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
waitq_assert_wait64(&ts->ts_waitq, CAST_EVENT64_T(GATE_EVENT(gate)), interruptible, deadline);
gate_iunlock(gate);
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
*wait = thread_block(THREAD_CONTINUE_NULL);
gate_ilock(gate);
*waiters = turnstile_has_waiters(ts);
if (!*waiters) {
state = ordered_load_gate(gate);
holder = GATE_STATE_TO_THREAD(state);
if (holder &&
((*wait != THREAD_AWAKENED) || holder == current_thread())) { turnstile_update_inheritor(ts, TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE);
turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
}
}
turnstile_complete((uintptr_t)gate, &gate->turnstile, NULL, TURNSTILE_KERNEL_MUTEX);
return turnstile_cleanup;
}
static gate_wait_result_t
gate_wait(gate_t* gate,
wait_interrupt_t interruptible,
uint64_t deadline,
void (^primitive_unlock)(void),
void (^primitive_lock)(void))
{
gate_wait_result_t ret;
void_func_void func_after_interlock_unlock;
wait_result_t wait_result;
uintptr_t state;
thread_t holder;
bool waiters;
gate_ilock(gate);
state = ordered_load_gate(gate);
holder = GATE_STATE_TO_THREAD(state);
if (holder == NULL) {
panic("Trying to wait on open gate thread %p gate %p", current_thread(), gate);
}
state |= GATE_WAITERS;
ordered_store_gate(gate, state);
primitive_unlock();
func_after_interlock_unlock = gate_wait_turnstile( gate,
interruptible,
deadline,
holder,
&wait_result,
&waiters);
state = ordered_load_gate(gate);
holder = GATE_STATE_TO_THREAD(state);
switch (wait_result) {
case THREAD_INTERRUPTED:
case THREAD_TIMED_OUT:
assert(holder != current_thread());
if (waiters) {
state |= GATE_WAITERS;
} else {
state &= ~GATE_WAITERS;
}
ordered_store_gate(gate, state);
if (wait_result == THREAD_INTERRUPTED) {
ret = GATE_INTERRUPTED;
} else {
ret = GATE_TIMED_OUT;
}
break;
default:
if (holder == current_thread()) {
ret = GATE_HANDOFF;
} else {
ret = GATE_OPENED;
}
break;
}
gate_iunlock(gate);
func_after_interlock_unlock();
primitive_lock();
return ret;
}
static void
gate_assert(gate_t *gate, int flags)
{
uintptr_t state;
thread_t holder;
gate_ilock(gate);
state = ordered_load_gate(gate);
holder = GATE_STATE_TO_THREAD(state);
switch (flags) {
case GATE_ASSERT_CLOSED:
assert(holder != NULL);
break;
case GATE_ASSERT_OPEN:
assert(holder == NULL);
break;
case GATE_ASSERT_HELD:
assert(holder == current_thread());
break;
default:
panic("invalid %s flag %d", __func__, flags);
}
gate_iunlock(gate);
}
static void
gate_init(gate_t *gate)
{
gate->gate_data = 0;
gate->turnstile = NULL;
}
static void
gate_destroy(__assert_only gate_t *gate)
{
assert(gate->gate_data == 0);
assert(gate->turnstile == NULL);
}
void
lck_rw_gate_init(lck_rw_t *lock, gate_t *gate)
{
(void) lock;
gate_init(gate);
}
void
lck_rw_gate_destroy(lck_rw_t *lock, gate_t *gate)
{
(void) lock;
gate_destroy(gate);
}
int
lck_rw_gate_try_close(__assert_only lck_rw_t *lock, gate_t *gate)
{
LCK_RW_ASSERT(lock, LCK_RW_ASSERT_HELD);
return gate_try_close(gate);
}
void
lck_rw_gate_close(__assert_only lck_rw_t *lock, gate_t *gate)
{
LCK_RW_ASSERT(lock, LCK_RW_ASSERT_HELD);
return gate_close(gate);
}
void
lck_rw_gate_open(__assert_only lck_rw_t *lock, gate_t *gate)
{
LCK_RW_ASSERT(lock, LCK_RW_ASSERT_HELD);
gate_open(gate);
}
kern_return_t
lck_rw_gate_handoff(__assert_only lck_rw_t *lock, gate_t *gate, int flags)
{
LCK_RW_ASSERT(lock, LCK_RW_ASSERT_HELD);
return gate_handoff(gate, flags);
}
void
lck_rw_gate_steal(__assert_only lck_rw_t *lock, gate_t *gate)
{
LCK_RW_ASSERT(lock, LCK_RW_ASSERT_HELD);
gate_steal(gate);
}
gate_wait_result_t
lck_rw_gate_wait(lck_rw_t *lock, gate_t *gate, lck_sleep_action_t lck_sleep_action, wait_interrupt_t interruptible, uint64_t deadline)
{
__block lck_rw_type_t lck_rw_type = LCK_RW_TYPE_EXCLUSIVE;
LCK_RW_ASSERT(lock, LCK_RW_ASSERT_HELD);
if (lck_sleep_action & LCK_SLEEP_UNLOCK) {
return gate_wait(gate,
interruptible,
deadline,
^{lck_rw_type = lck_rw_done(lock);},
^{;});
} else if (!(lck_sleep_action & (LCK_SLEEP_SHARED | LCK_SLEEP_EXCLUSIVE))) {
return gate_wait(gate,
interruptible,
deadline,
^{lck_rw_type = lck_rw_done(lock);},
^{lck_rw_lock(lock, lck_rw_type);});
} else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE) {
return gate_wait(gate,
interruptible,
deadline,
^{lck_rw_type = lck_rw_done(lock);},
^{lck_rw_lock_exclusive(lock);});
} else {
return gate_wait(gate,
interruptible,
deadline,
^{lck_rw_type = lck_rw_done(lock);},
^{lck_rw_lock_shared(lock);});
}
}
void
lck_rw_gate_assert(__assert_only lck_rw_t *lock, gate_t *gate, int flags)
{
LCK_RW_ASSERT(lock, LCK_RW_ASSERT_HELD);
gate_assert(gate, flags);
return;
}
void
lck_mtx_gate_init(lck_mtx_t *lock, gate_t *gate)
{
(void) lock;
gate_init(gate);
}
void
lck_mtx_gate_destroy(lck_mtx_t *lock, gate_t *gate)
{
(void) lock;
gate_destroy(gate);
}
int
lck_mtx_gate_try_close(__assert_only lck_mtx_t *lock, gate_t *gate)
{
LCK_MTX_ASSERT(lock, LCK_MTX_ASSERT_OWNED);
return gate_try_close(gate);
}
void
lck_mtx_gate_close(__assert_only lck_mtx_t *lock, gate_t *gate)
{
LCK_MTX_ASSERT(lock, LCK_MTX_ASSERT_OWNED);
return gate_close(gate);
}
void
lck_mtx_gate_open(__assert_only lck_mtx_t *lock, gate_t *gate)
{
LCK_MTX_ASSERT(lock, LCK_MTX_ASSERT_OWNED);
gate_open(gate);
}
kern_return_t
lck_mtx_gate_handoff(__assert_only lck_mtx_t *lock, gate_t *gate, int flags)
{
LCK_MTX_ASSERT(lock, LCK_MTX_ASSERT_OWNED);
return gate_handoff(gate, flags);
}
void
lck_mtx_gate_steal(__assert_only lck_mtx_t *lock, gate_t *gate)
{
LCK_MTX_ASSERT(lock, LCK_MTX_ASSERT_OWNED);
gate_steal(gate);
}
gate_wait_result_t
lck_mtx_gate_wait(lck_mtx_t *lock, gate_t *gate, lck_sleep_action_t lck_sleep_action, wait_interrupt_t interruptible, uint64_t deadline)
{
LCK_MTX_ASSERT(lock, LCK_MTX_ASSERT_OWNED);
if (lck_sleep_action & LCK_SLEEP_UNLOCK) {
return gate_wait(gate,
interruptible,
deadline,
^{lck_mtx_unlock(lock);},
^{;});
} else if (lck_sleep_action & LCK_SLEEP_SPIN) {
return gate_wait(gate,
interruptible,
deadline,
^{lck_mtx_unlock(lock);},
^{lck_mtx_lock_spin(lock);});
} else if (lck_sleep_action & LCK_SLEEP_SPIN_ALWAYS) {
return gate_wait(gate,
interruptible,
deadline,
^{lck_mtx_unlock(lock);},
^{lck_mtx_lock_spin_always(lock);});
} else {
return gate_wait(gate,
interruptible,
deadline,
^{lck_mtx_unlock(lock);},
^{lck_mtx_lock(lock);});
}
}
void
lck_mtx_gate_assert(__assert_only lck_mtx_t *lock, gate_t *gate, int flags)
{
LCK_MTX_ASSERT(lock, LCK_MTX_ASSERT_OWNED);
gate_assert(gate, flags);
}
#pragma mark - LCK_*_DECLARE support
__startup_func
void
lck_grp_attr_startup_init(struct lck_grp_attr_startup_spec *sp)
{
lck_grp_attr_t *attr = sp->grp_attr;
lck_grp_attr_setdefault(attr);
attr->grp_attr_val |= sp->grp_attr_set_flags;
attr->grp_attr_val &= ~sp->grp_attr_clear_flags;
}
__startup_func
void
lck_grp_startup_init(struct lck_grp_startup_spec *sp)
{
lck_grp_init(sp->grp, sp->grp_name, sp->grp_attr);
}
__startup_func
void
lck_attr_startup_init(struct lck_attr_startup_spec *sp)
{
lck_attr_t *attr = sp->lck_attr;
lck_attr_setdefault(attr);
attr->lck_attr_val |= sp->lck_attr_set_flags;
attr->lck_attr_val &= ~sp->lck_attr_clear_flags;
}
__startup_func
void
lck_spin_startup_init(struct lck_spin_startup_spec *sp)
{
lck_spin_init(sp->lck, sp->lck_grp, sp->lck_attr);
}
__startup_func
void
lck_mtx_startup_init(struct lck_mtx_startup_spec *sp)
{
if (sp->lck_ext) {
lck_mtx_init_ext(sp->lck, sp->lck_ext, sp->lck_grp, sp->lck_attr);
} else {
lck_mtx_init(sp->lck, sp->lck_grp, sp->lck_attr);
}
}
__startup_func
void
lck_rw_startup_init(struct lck_rw_startup_spec *sp)
{
lck_rw_init(sp->lck, sp->lck_grp, sp->lck_attr);
}
__startup_func
void
usimple_lock_startup_init(struct usimple_lock_startup_spec *sp)
{
simple_lock_init(sp->lck, sp->lck_init_arg);
}