#include <kern/etap_macros.h>
#include <kern/misc_protos.h>
#include <kern/sync_lock.h>
#include <kern/sched_prim.h>
#include <kern/ipc_kobject.h>
#include <kern/ipc_sync.h>
#include <kern/etap_macros.h>
#include <kern/thread.h>
#include <kern/task.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
#define ulock_ownership_set(ul, th) \
MACRO_BEGIN \
thread_act_t _th_act; \
_th_act = (th)->top_act; \
act_lock(_th_act); \
enqueue (&_th_act->held_ulocks, (queue_entry_t) (ul)); \
act_unlock(_th_act); \
(ul)->holder = _th_act; \
MACRO_END
#define ulock_ownership_clear(ul) \
MACRO_BEGIN \
thread_act_t _th_act; \
_th_act = (ul)->holder; \
if (_th_act->active) { \
act_lock(_th_act); \
remqueue(&_th_act->held_ulocks, \
(queue_entry_t) (ul)); \
act_unlock(_th_act); \
} else { \
remqueue(&_th_act->held_ulocks, \
(queue_entry_t) (ul)); \
} \
(ul)->holder = THR_ACT_NULL; \
MACRO_END
#define lock_set_ownership_set(ls, t) \
MACRO_BEGIN \
task_lock((t)); \
enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
(t)->lock_sets_owned++; \
task_unlock((t)); \
(ls)->owner = (t); \
MACRO_END
#define lock_set_ownership_clear(ls, t) \
MACRO_BEGIN \
task_lock((t)); \
remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
(t)->lock_sets_owned--; \
task_unlock((t)); \
MACRO_END
unsigned int lock_set_event;
#define LOCK_SET_EVENT ((event64_t)&lock_set_event)
unsigned int lock_set_handoff;
#define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
void
lock_set_init(void)
{
return;
}
kern_return_t
lock_set_create (
task_t task,
lock_set_t *new_lock_set,
int n_ulocks,
int policy)
{
lock_set_t lock_set = LOCK_SET_NULL;
ulock_t ulock;
int size;
int x;
*new_lock_set = LOCK_SET_NULL;
if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
return KERN_INVALID_ARGUMENT;
size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
lock_set = (lock_set_t) kalloc (size);
if (lock_set == LOCK_SET_NULL)
return KERN_RESOURCE_SHORTAGE;
lock_set_lock_init(lock_set);
lock_set->n_ulocks = n_ulocks;
lock_set->ref_count = 1;
lock_set->port = ipc_port_alloc_kernel();
if (lock_set->port == IP_NULL) {
lock_set_dereference(lock_set);
return KERN_RESOURCE_SHORTAGE;
}
ipc_kobject_set (lock_set->port,
(ipc_kobject_t) lock_set,
IKOT_LOCK_SET);
for (x=0; x < n_ulocks; x++) {
ulock = (ulock_t) &lock_set->ulock_list[x];
ulock_lock_init(ulock);
ulock->lock_set = lock_set;
ulock->holder = THR_ACT_NULL;
ulock->blocked = FALSE;
ulock->unstable = FALSE;
ulock->ho_wait = FALSE;
wait_queue_init(&ulock->wait_queue, policy);
}
lock_set_ownership_set(lock_set, task);
lock_set->active = TRUE;
*new_lock_set = lock_set;
return KERN_SUCCESS;
}
kern_return_t
lock_set_destroy (task_t task, lock_set_t lock_set)
{
thread_t thread;
ulock_t ulock;
int i;
if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_set->owner != task)
return KERN_INVALID_RIGHT;
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
lock_set->active = FALSE;
for (i = 0; i < lock_set->n_ulocks; i++) {
ulock = &lock_set->ulock_list[i];
ulock_lock(ulock);
if (ulock->accept_wait) {
ulock->accept_wait = FALSE;
wait_queue_wakeup64_one(&ulock->wait_queue,
LOCK_SET_HANDOFF,
THREAD_RESTART);
}
if (ulock->holder) {
if (ulock->blocked) {
ulock->blocked = FALSE;
wait_queue_wakeup64_all(&ulock->wait_queue,
LOCK_SET_EVENT,
THREAD_RESTART);
}
if (ulock->ho_wait) {
ulock->ho_wait = FALSE;
wait_queue_wakeup64_one(&ulock->wait_queue,
LOCK_SET_HANDOFF,
THREAD_RESTART);
}
ulock_ownership_clear(ulock);
}
ulock_unlock(ulock);
}
lock_set_unlock(lock_set);
lock_set_ownership_clear(lock_set, task);
ipc_port_dealloc_kernel(lock_set->port);
lock_set_dereference(lock_set);
return KERN_SUCCESS;
}
kern_return_t
lock_acquire (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
retry:
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != THR_ACT_NULL) {
int wait_result;
if (ulock->holder == current_act()) {
ulock_unlock(ulock);
return KERN_LOCK_OWNED_SELF;
}
ulock->blocked = TRUE;
wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
LOCK_SET_EVENT,
THREAD_ABORTSAFE);
ulock_unlock(ulock);
if (wait_result == THREAD_WAITING)
wait_result = thread_block(THREAD_CONTINUE_NULL);
switch (wait_result) {
case THREAD_AWAKENED:
return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
KERN_SUCCESS;
case THREAD_INTERRUPTED:
return KERN_ABORTED;
case THREAD_RESTART:
goto retry;
default:
panic("lock_acquire\n");
}
}
ulock_ownership_set(ulock, current_thread());
ulock_unlock(ulock);
return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
}
kern_return_t
lock_release (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
return (lock_release_internal(ulock, current_act()));
}
kern_return_t
lock_try (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != THR_ACT_NULL) {
lock_set_unlock(lock_set);
if (ulock->holder == current_act()) {
ulock_unlock(ulock);
return KERN_LOCK_OWNED_SELF;
}
ulock_unlock(ulock);
return KERN_LOCK_OWNED;
}
ulock_ownership_set(ulock, current_thread());
ulock_unlock(ulock);
return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
}
kern_return_t
lock_make_stable (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != current_act()) {
ulock_unlock(ulock);
return KERN_INVALID_RIGHT;
}
ulock->unstable = FALSE;
ulock_unlock(ulock);
return KERN_SUCCESS;
}
kern_return_t
lock_make_unstable (ulock_t ulock, thread_act_t thr_act)
{
lock_set_t lock_set;
lock_set = ulock->lock_set;
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != thr_act) {
ulock_unlock(ulock);
return KERN_INVALID_RIGHT;
}
ulock->unstable = TRUE;
ulock_unlock(ulock);
return KERN_SUCCESS;
}
kern_return_t
lock_release_internal (ulock_t ulock, thread_act_t thr_act)
{
lock_set_t lock_set;
int result;
if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != thr_act) {
ulock_unlock(ulock);
return KERN_INVALID_RIGHT;
}
if (ulock->blocked) {
wait_queue_t wq = &ulock->wait_queue;
thread_t thread;
spl_t s;
s = splsched();
wait_queue_lock(wq);
thread = wait_queue_wakeup64_identity_locked(wq,
LOCK_SET_EVENT,
THREAD_AWAKENED,
TRUE);
if (thread != THREAD_NULL) {
thread_unlock(thread);
splx(s);
ulock_ownership_clear(ulock);
ulock_ownership_set(ulock, thread);
ulock_unlock(ulock);
return KERN_SUCCESS;
} else {
ulock->blocked = FALSE;
splx(s);
}
}
ulock_ownership_clear(ulock);
ulock_unlock(ulock);
return KERN_SUCCESS;
}
kern_return_t
lock_handoff (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
int wait_result;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
retry:
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != current_act()) {
ulock_unlock(ulock);
return KERN_INVALID_RIGHT;
}
if (ulock->accept_wait) {
wait_queue_t wq = &ulock->wait_queue;
thread_t thread;
spl_t s;
s = splsched();
wait_queue_lock(wq);
thread = wait_queue_wakeup64_identity_locked(
wq,
LOCK_SET_HANDOFF,
THREAD_AWAKENED,
TRUE);
if (thread != THREAD_NULL) {
thread_unlock(thread);
splx(s);
ulock_ownership_clear(ulock);
ulock_ownership_set(ulock, thread);
ulock->accept_wait = FALSE;
ulock_unlock(ulock);
return KERN_SUCCESS;
} else {
splx(s);
}
}
ulock->ho_wait = TRUE;
wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
LOCK_SET_HANDOFF,
THREAD_ABORTSAFE);
ulock_unlock(ulock);
if (wait_result == THREAD_WAITING)
wait_result = thread_block(THREAD_CONTINUE_NULL);
switch (wait_result) {
case THREAD_AWAKENED:
return KERN_SUCCESS;
case THREAD_INTERRUPTED:
ulock_lock(ulock);
assert(ulock->holder == current_act());
ulock->ho_wait = FALSE;
ulock_unlock(ulock);
return KERN_ABORTED;
case THREAD_RESTART:
goto retry;
default:
panic("lock_handoff");
}
}
kern_return_t
lock_handoff_accept (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
int wait_result;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
retry:
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->accept_wait) {
ulock_unlock(ulock);
return KERN_ALREADY_WAITING;
}
if (ulock->holder == current_act()) {
ulock_unlock(ulock);
return KERN_LOCK_OWNED_SELF;
}
if (ulock->ho_wait) {
wait_queue_t wq = &ulock->wait_queue;
thread_t thread;
assert(ulock->holder != THR_ACT_NULL);
thread = ulock->holder->thread;
if (wait_queue_wakeup64_thread(wq,
LOCK_SET_HANDOFF,
thread,
THREAD_AWAKENED) == KERN_SUCCESS) {
ulock_ownership_clear(ulock);
ulock_ownership_set(ulock, current_thread());
ulock->ho_wait = FALSE;
ulock_unlock(ulock);
return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
KERN_SUCCESS;
}
}
ulock->accept_wait = TRUE;
wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
LOCK_SET_HANDOFF,
THREAD_ABORTSAFE);
ulock_unlock(ulock);
if (wait_result == THREAD_WAITING)
wait_result = thread_block(THREAD_CONTINUE_NULL);
switch (wait_result) {
case THREAD_AWAKENED:
return KERN_SUCCESS;
case THREAD_INTERRUPTED:
ulock_lock(ulock);
ulock->accept_wait = FALSE;
ulock_unlock(ulock);
return KERN_ABORTED;
case THREAD_RESTART:
goto retry;
default:
panic("lock_handoff_accept");
}
}
void
lock_set_reference(lock_set_t lock_set)
{
lock_set_lock(lock_set);
lock_set->ref_count++;
lock_set_unlock(lock_set);
}
void
lock_set_dereference(lock_set_t lock_set)
{
int ref_count;
int size;
lock_set_lock(lock_set);
ref_count = --(lock_set->ref_count);
lock_set_unlock(lock_set);
if (ref_count == 0) {
size = sizeof(struct lock_set) +
(sizeof(struct ulock) * (lock_set->n_ulocks - 1));
kfree((vm_offset_t) lock_set, size);
}
}