#include <mach/mach_types.h>
#include <mach/lock_set_server.h>
#include <mach/task_server.h>
#include <kern/misc_protos.h>
#include <kern/kalloc.h>
#include <kern/sync_lock.h>
#include <kern/sched_prim.h>
#include <kern/ipc_kobject.h>
#include <kern/ipc_sync.h>
#include <kern/thread.h>
#include <kern/task.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
#define ulock_ownership_set(ul, th) \
MACRO_BEGIN \
thread_mtx_lock(th); \
enqueue (&th->held_ulocks, (queue_entry_t) (ul)); \
thread_mtx_unlock(th); \
(ul)->holder = th; \
MACRO_END
#define ulock_ownership_clear(ul) \
MACRO_BEGIN \
thread_t th; \
th = (ul)->holder; \
if ((th)->active) { \
thread_mtx_lock(th); \
remqueue(&th->held_ulocks, \
(queue_entry_t) (ul)); \
thread_mtx_unlock(th); \
} else { \
remqueue(&th->held_ulocks, \
(queue_entry_t) (ul)); \
} \
(ul)->holder = THREAD_NULL; \
MACRO_END
#define lock_set_ownership_set(ls, t) \
MACRO_BEGIN \
task_lock((t)); \
enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\
(t)->lock_sets_owned++; \
task_unlock((t)); \
(ls)->owner = (t); \
MACRO_END
#define lock_set_ownership_clear(ls, t) \
MACRO_BEGIN \
task_lock((t)); \
remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \
(t)->lock_sets_owned--; \
task_unlock((t)); \
MACRO_END
unsigned int lock_set_event;
#define LOCK_SET_EVENT CAST_EVENT64_T(&lock_set_event)
unsigned int lock_set_handoff;
#define LOCK_SET_HANDOFF CAST_EVENT64_T(&lock_set_handoff)
lck_attr_t lock_set_attr;
lck_grp_t lock_set_grp;
static lck_grp_attr_t lock_set_grp_attr;
void
lock_set_init(void)
{
lck_grp_attr_setdefault(&lock_set_grp_attr);
lck_grp_init(&lock_set_grp, "lock_set", &lock_set_grp_attr);
lck_attr_setdefault(&lock_set_attr);
}
kern_return_t
lock_set_create (
task_t task,
lock_set_t *new_lock_set,
int n_ulocks,
int policy)
{
lock_set_t lock_set = LOCK_SET_NULL;
ulock_t ulock;
vm_size_t size;
int x;
*new_lock_set = LOCK_SET_NULL;
if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
return KERN_INVALID_ARGUMENT;
if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks)
return KERN_RESOURCE_SHORTAGE;
size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
lock_set = (lock_set_t) kalloc (size);
if (lock_set == LOCK_SET_NULL)
return KERN_RESOURCE_SHORTAGE;
lock_set_lock_init(lock_set);
lock_set->n_ulocks = n_ulocks;
lock_set->ref_count = (task == kernel_task) ? 1 : 2;
lock_set->port = ipc_port_alloc_kernel();
if (lock_set->port == IP_NULL) {
kfree(lock_set, size);
return KERN_RESOURCE_SHORTAGE;
}
ipc_kobject_set (lock_set->port,
(ipc_kobject_t) lock_set,
IKOT_LOCK_SET);
for (x=0; x < n_ulocks; x++) {
ulock = (ulock_t) &lock_set->ulock_list[x];
ulock_lock_init(ulock);
ulock->lock_set = lock_set;
ulock->holder = THREAD_NULL;
ulock->blocked = FALSE;
ulock->unstable = FALSE;
ulock->ho_wait = FALSE;
ulock->accept_wait = FALSE;
wait_queue_init(&ulock->wait_queue, policy);
}
lock_set_ownership_set(lock_set, task);
lock_set->active = TRUE;
*new_lock_set = lock_set;
return KERN_SUCCESS;
}
kern_return_t
lock_set_destroy (task_t task, lock_set_t lock_set)
{
ulock_t ulock;
int i;
if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_set->owner != task)
return KERN_INVALID_RIGHT;
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
lock_set->active = FALSE;
for (i = 0; i < lock_set->n_ulocks; i++) {
ulock = &lock_set->ulock_list[i];
ulock_lock(ulock);
if (ulock->accept_wait) {
ulock->accept_wait = FALSE;
wait_queue_wakeup64_one(&ulock->wait_queue,
LOCK_SET_HANDOFF,
THREAD_RESTART);
}
if (ulock->holder) {
if (ulock->blocked) {
ulock->blocked = FALSE;
wait_queue_wakeup64_all(&ulock->wait_queue,
LOCK_SET_EVENT,
THREAD_RESTART);
}
if (ulock->ho_wait) {
ulock->ho_wait = FALSE;
wait_queue_wakeup64_one(&ulock->wait_queue,
LOCK_SET_HANDOFF,
THREAD_RESTART);
}
ulock_ownership_clear(ulock);
}
ulock_unlock(ulock);
}
lock_set_unlock(lock_set);
lock_set_ownership_clear(lock_set, task);
lock_set_dereference(lock_set);
return KERN_SUCCESS;
}
kern_return_t
lock_acquire (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
retry:
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != THREAD_NULL) {
int wait_result;
if (ulock->holder == current_thread()) {
ulock_unlock(ulock);
return KERN_LOCK_OWNED_SELF;
}
ulock->blocked = TRUE;
wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
LOCK_SET_EVENT,
THREAD_ABORTSAFE, 0);
ulock_unlock(ulock);
if (wait_result == THREAD_WAITING)
wait_result = thread_block(THREAD_CONTINUE_NULL);
switch (wait_result) {
case THREAD_AWAKENED:
return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
KERN_SUCCESS;
case THREAD_INTERRUPTED:
return KERN_ABORTED;
case THREAD_RESTART:
goto retry;
default:
panic("lock_acquire\n");
}
}
ulock_ownership_set(ulock, current_thread());
ulock_unlock(ulock);
return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
}
kern_return_t
lock_release (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
return (ulock_release_internal(ulock, current_thread()));
}
kern_return_t
lock_try (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != THREAD_NULL) {
lock_set_unlock(lock_set);
if (ulock->holder == current_thread()) {
ulock_unlock(ulock);
return KERN_LOCK_OWNED_SELF;
}
ulock_unlock(ulock);
return KERN_LOCK_OWNED;
}
ulock_ownership_set(ulock, current_thread());
ulock_unlock(ulock);
return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS;
}
kern_return_t
lock_make_stable (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != current_thread()) {
ulock_unlock(ulock);
return KERN_INVALID_RIGHT;
}
ulock->unstable = FALSE;
ulock_unlock(ulock);
return KERN_SUCCESS;
}
kern_return_t
lock_make_unstable (ulock_t ulock, thread_t thread)
{
lock_set_t lock_set;
lock_set = ulock->lock_set;
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != thread) {
ulock_unlock(ulock);
return KERN_INVALID_RIGHT;
}
ulock->unstable = TRUE;
ulock_unlock(ulock);
return KERN_SUCCESS;
}
kern_return_t
ulock_release_internal (ulock_t ulock, thread_t thread)
{
lock_set_t lock_set;
if ((lock_set = ulock->lock_set) == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != thread) {
ulock_unlock(ulock);
return KERN_INVALID_RIGHT;
}
if (ulock->blocked) {
wait_queue_t wq = &ulock->wait_queue;
thread_t wqthread;
spl_t s;
s = splsched();
wait_queue_lock(wq);
wqthread = wait_queue_wakeup64_identity_locked(wq,
LOCK_SET_EVENT,
THREAD_AWAKENED,
TRUE);
if (wqthread != THREAD_NULL) {
thread_unlock(wqthread);
splx(s);
ulock_ownership_clear(ulock);
ulock_ownership_set(ulock, wqthread);
ulock_unlock(ulock);
return KERN_SUCCESS;
} else {
ulock->blocked = FALSE;
splx(s);
}
}
ulock_ownership_clear(ulock);
ulock_unlock(ulock);
return KERN_SUCCESS;
}
kern_return_t
lock_handoff (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
int wait_result;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
retry:
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->holder != current_thread()) {
ulock_unlock(ulock);
return KERN_INVALID_RIGHT;
}
if (ulock->accept_wait) {
wait_queue_t wq = &ulock->wait_queue;
thread_t thread;
spl_t s;
s = splsched();
wait_queue_lock(wq);
thread = wait_queue_wakeup64_identity_locked(
wq,
LOCK_SET_HANDOFF,
THREAD_AWAKENED,
TRUE);
if (thread != THREAD_NULL) {
thread_unlock(thread);
splx(s);
ulock_ownership_clear(ulock);
ulock_ownership_set(ulock, thread);
ulock->accept_wait = FALSE;
ulock_unlock(ulock);
return KERN_SUCCESS;
} else {
splx(s);
}
}
ulock->ho_wait = TRUE;
wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
LOCK_SET_HANDOFF,
THREAD_ABORTSAFE, 0);
ulock_unlock(ulock);
if (wait_result == THREAD_WAITING)
wait_result = thread_block(THREAD_CONTINUE_NULL);
switch (wait_result) {
case THREAD_AWAKENED:
ulock_lock(ulock);
assert(ulock->holder != current_thread());
ulock_unlock(ulock);
return KERN_SUCCESS;
case THREAD_INTERRUPTED:
ulock_lock(ulock);
assert(ulock->holder == current_thread());
ulock->ho_wait = FALSE;
ulock_unlock(ulock);
return KERN_ABORTED;
case THREAD_RESTART:
goto retry;
}
panic("lock_handoff");
return KERN_FAILURE;
}
kern_return_t
lock_handoff_accept (lock_set_t lock_set, int lock_id)
{
ulock_t ulock;
int wait_result;
if (lock_set == LOCK_SET_NULL)
return KERN_INVALID_ARGUMENT;
if (lock_id < 0 || lock_id >= lock_set->n_ulocks)
return KERN_INVALID_ARGUMENT;
retry:
lock_set_lock(lock_set);
if (!lock_set->active) {
lock_set_unlock(lock_set);
return KERN_LOCK_SET_DESTROYED;
}
ulock = (ulock_t) &lock_set->ulock_list[lock_id];
ulock_lock(ulock);
lock_set_unlock(lock_set);
if (ulock->accept_wait) {
ulock_unlock(ulock);
return KERN_ALREADY_WAITING;
}
if (ulock->holder == current_thread()) {
ulock_unlock(ulock);
return KERN_LOCK_OWNED_SELF;
}
if (ulock->ho_wait) {
wait_queue_t wq = &ulock->wait_queue;
assert(ulock->holder != THREAD_NULL);
if (wait_queue_wakeup64_thread(wq,
LOCK_SET_HANDOFF,
ulock->holder,
THREAD_AWAKENED) == KERN_SUCCESS) {
ulock_ownership_clear(ulock);
ulock_ownership_set(ulock, current_thread());
ulock->ho_wait = FALSE;
ulock_unlock(ulock);
return (ulock->unstable) ? KERN_LOCK_UNSTABLE :
KERN_SUCCESS;
}
}
ulock->accept_wait = TRUE;
wait_result = wait_queue_assert_wait64(&ulock->wait_queue,
LOCK_SET_HANDOFF,
THREAD_ABORTSAFE, 0);
ulock_unlock(ulock);
if (wait_result == THREAD_WAITING)
wait_result = thread_block(THREAD_CONTINUE_NULL);
switch (wait_result) {
case THREAD_AWAKENED:
ulock_lock(ulock);
assert(ulock->accept_wait == FALSE);
assert(ulock->holder == current_thread());
ulock_unlock(ulock);
return KERN_SUCCESS;
case THREAD_INTERRUPTED:
ulock_lock(ulock);
ulock->accept_wait = FALSE;
ulock_unlock(ulock);
return KERN_ABORTED;
case THREAD_RESTART:
goto retry;
}
panic("lock_handoff_accept");
return KERN_FAILURE;
}
void
lock_set_reference(lock_set_t lock_set)
{
lock_set_lock(lock_set);
lock_set->ref_count++;
lock_set_unlock(lock_set);
}
void
lock_set_dereference(lock_set_t lock_set)
{
int ref_count;
int size;
lock_set_lock(lock_set);
ref_count = --(lock_set->ref_count);
lock_set_unlock(lock_set);
if (ref_count == 0) {
ipc_port_dealloc_kernel(lock_set->port);
size = (int)(sizeof(struct lock_set) +
(sizeof(struct ulock) * (lock_set->n_ulocks - 1)));
kfree(lock_set, size);
}
}
void
ulock_release_all(
thread_t thread)
{
ulock_t ulock;
while (!queue_empty(&thread->held_ulocks)) {
ulock = (ulock_t)queue_first(&thread->held_ulocks);
lock_make_unstable(ulock, thread);
ulock_release_internal(ulock, thread);
}
}