#include <kern/kern_types.h>
#include <kern/simple_lock.h>
#include <kern/kalloc.h>
#include <kern/queue.h>
#include <kern/spl.h>
#include <mach/sync_policy.h>
#include <kern/sched_prim.h>
#include <kern/wait_queue.h>
kern_return_t
wait_queue_init(
wait_queue_t wq,
int policy)
{
if (!((policy & SYNC_POLICY_ORDER_MASK) == SYNC_POLICY_FIFO))
return KERN_INVALID_ARGUMENT;
wq->wq_fifo = TRUE;
wq->wq_type = _WAIT_QUEUE_inited;
queue_init(&wq->wq_queue);
hw_lock_init(&wq->wq_interlock);
return KERN_SUCCESS;
}
wait_queue_t
wait_queue_alloc(
int policy)
{
wait_queue_t wq;
kern_return_t ret;
wq = (wait_queue_t) kalloc(sizeof(struct wait_queue));
if (wq != WAIT_QUEUE_NULL) {
ret = wait_queue_init(wq, policy);
if (ret != KERN_SUCCESS) {
kfree((vm_offset_t)wq, sizeof(struct wait_queue));
wq = WAIT_QUEUE_NULL;
}
}
return wq;
}
kern_return_t
wait_queue_free(
wait_queue_t wq)
{
if (!wait_queue_is_queue(wq))
return KERN_INVALID_ARGUMENT;
if (!queue_empty(&wq->wq_queue))
return KERN_FAILURE;
kfree((vm_offset_t)wq, sizeof(struct wait_queue));
return KERN_SUCCESS;
}
kern_return_t
wait_queue_set_init(
wait_queue_set_t wqset,
int policy)
{
kern_return_t ret;
ret = wait_queue_init(&wqset->wqs_wait_queue, policy);
if (ret != KERN_SUCCESS)
return ret;
wqset->wqs_wait_queue.wq_type = _WAIT_QUEUE_SET_inited;
if (policy & SYNC_POLICY_PREPOST)
wqset->wqs_wait_queue.wq_isprepost = TRUE;
else
wqset->wqs_wait_queue.wq_isprepost = FALSE;
queue_init(&wqset->wqs_setlinks);
wqset->wqs_refcount = 0;
return KERN_SUCCESS;
}
kern_return_t
wait_queue_sub_init(
wait_queue_set_t wqset,
int policy)
{
return wait_queue_set_init(wqset, policy);
}
wait_queue_set_t
wait_queue_set_alloc(
int policy)
{
wait_queue_set_t wq_set;
wq_set = (wait_queue_set_t) kalloc(sizeof(struct wait_queue_set));
if (wq_set != WAIT_QUEUE_SET_NULL) {
kern_return_t ret;
ret = wait_queue_set_init(wq_set, policy);
if (ret != KERN_SUCCESS) {
kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
wq_set = WAIT_QUEUE_SET_NULL;
}
}
return wq_set;
}
kern_return_t
wait_queue_set_free(
wait_queue_set_t wq_set)
{
if (!wait_queue_is_set(wq_set))
return KERN_INVALID_ARGUMENT;
if (!queue_empty(&wq_set->wqs_wait_queue.wq_queue))
return KERN_FAILURE;
kfree((vm_offset_t)wq_set, sizeof(struct wait_queue_set));
return KERN_SUCCESS;
}
kern_return_t
wait_queue_sub_clearrefs(
wait_queue_set_t wq_set)
{
if (!wait_queue_is_set(wq_set))
return KERN_INVALID_ARGUMENT;
wqs_lock(wq_set);
wq_set->wqs_refcount = 0;
wqs_unlock(wq_set);
return KERN_SUCCESS;
}
unsigned int wait_queue_set_size(void) { return sizeof(WaitQueueSet); }
unsigned int wait_queue_link_size(void) { return sizeof(WaitQueueLink); }
static unsigned int _wait_queue_link;
static unsigned int _wait_queue_unlinked;
#define WAIT_QUEUE_LINK ((void *)&_wait_queue_link)
#define WAIT_QUEUE_UNLINKED ((void *)&_wait_queue_unlinked)
#define WAIT_QUEUE_ELEMENT_CHECK(wq, wqe) \
WQASSERT(((wqe)->wqe_queue == (wq) && \
queue_next(queue_prev((queue_t) (wqe))) == (queue_t)(wqe)), \
"wait queue element list corruption: wq=%#x, wqe=%#x", \
(wq), (wqe))
#define WQSPREV(wqs, wql) ((wait_queue_link_t)queue_prev( \
((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
(queue_t)(wql) : &(wql)->wql_setlinks)))
#define WQSNEXT(wqs, wql) ((wait_queue_link_t)queue_next( \
((&(wqs)->wqs_setlinks == (queue_t)(wql)) ? \
(queue_t)(wql) : &(wql)->wql_setlinks)))
#define WAIT_QUEUE_SET_LINK_CHECK(wqs, wql) \
WQASSERT((((wql)->wql_type == WAIT_QUEUE_LINK) && \
((wql)->wql_setqueue == (wqs)) && \
((wql)->wql_queue->wq_type == _WAIT_QUEUE_inited) && \
(WQSNEXT((wqs), WQSPREV((wqs),(wql))) == (wql))), \
"wait queue set links corruption: wqs=%#x, wql=%#x", \
(wqs), (wql))
#if defined(_WAIT_QUEUE_DEBUG_)
#define WQASSERT(e, s, p0, p1) ((e) ? 0 : panic(s, p0, p1))
#define WAIT_QUEUE_CHECK(wq) \
MACRO_BEGIN \
queue_t q2 = &(wq)->wq_queue; \
wait_queue_element_t wqe2 = (wait_queue_element_t) queue_first(q2); \
while (!queue_end(q2, (queue_entry_t)wqe2)) { \
WAIT_QUEUE_ELEMENT_CHECK((wq), wqe2); \
wqe2 = (wait_queue_element_t) queue_next((queue_t) wqe2); \
} \
MACRO_END
#define WAIT_QUEUE_SET_CHECK(wqs) \
MACRO_BEGIN \
queue_t q2 = &(wqs)->wqs_setlinks; \
wait_queue_link_t wql2 = (wait_queue_link_t) queue_first(q2); \
while (!queue_end(q2, (queue_entry_t)wql2)) { \
WAIT_QUEUE_SET_LINK_CHECK((wqs), wql2); \
wql2 = (wait_queue_link_t) wql2->wql_setlinks.next; \
} \
MACRO_END
#else
#define WQASSERT(e, s, p0, p1) assert(e)
#define WAIT_QUEUE_CHECK(wq)
#define WAIT_QUEUE_SET_CHECK(wqs)
#endif
__private_extern__ boolean_t
wait_queue_member_locked(
wait_queue_t wq,
wait_queue_set_t wq_set)
{
wait_queue_element_t wq_element;
queue_t q;
assert(wait_queue_held(wq));
assert(wait_queue_is_set(wq_set));
q = &wq->wq_queue;
wq_element = (wait_queue_element_t) queue_first(q);
while (!queue_end(q, (queue_entry_t)wq_element)) {
WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
if ((wq_element->wqe_type == WAIT_QUEUE_LINK)) {
wait_queue_link_t wql = (wait_queue_link_t)wq_element;
if (wql->wql_setqueue == wq_set)
return TRUE;
}
wq_element = (wait_queue_element_t)
queue_next((queue_t) wq_element);
}
return FALSE;
}
boolean_t
wait_queue_member(
wait_queue_t wq,
wait_queue_set_t wq_set)
{
boolean_t ret;
spl_t s;
if (!wait_queue_is_set(wq_set))
return FALSE;
s = splsched();
wait_queue_lock(wq);
ret = wait_queue_member_locked(wq, wq_set);
wait_queue_unlock(wq);
splx(s);
return ret;
}
kern_return_t
wait_queue_link_noalloc(
wait_queue_t wq,
wait_queue_set_t wq_set,
wait_queue_link_t wql)
{
wait_queue_element_t wq_element;
queue_t q;
spl_t s;
if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set))
return KERN_INVALID_ARGUMENT;
s = splsched();
wait_queue_lock(wq);
wqs_lock(wq_set);
q = &wq->wq_queue;
wq_element = (wait_queue_element_t) queue_first(q);
while (!queue_end(q, (queue_entry_t)wq_element)) {
WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
if (wq_element->wqe_type == WAIT_QUEUE_LINK &&
((wait_queue_link_t)wq_element)->wql_setqueue == wq_set) {
wqs_unlock(wq_set);
wait_queue_unlock(wq);
splx(s);
return KERN_ALREADY_IN_SET;
}
wq_element = (wait_queue_element_t)
queue_next((queue_t) wq_element);
}
WAIT_QUEUE_SET_CHECK(wq_set);
wql->wql_queue = wq;
queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
wql->wql_setqueue = wq_set;
queue_enter(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
wql->wql_type = WAIT_QUEUE_LINK;
wqs_unlock(wq_set);
wait_queue_unlock(wq);
splx(s);
return KERN_SUCCESS;
}
kern_return_t
wait_queue_link(
wait_queue_t wq,
wait_queue_set_t wq_set)
{
wait_queue_link_t wql;
kern_return_t ret;
wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link));
if (wql == WAIT_QUEUE_LINK_NULL)
return KERN_RESOURCE_SHORTAGE;
ret = wait_queue_link_noalloc(wq, wq_set, wql);
if (ret != KERN_SUCCESS)
kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
return ret;
}
static void
wait_queue_unlink_locked(
wait_queue_t wq,
wait_queue_set_t wq_set,
wait_queue_link_t wql)
{
assert(wait_queue_held(wq));
assert(wait_queue_held(&wq_set->wqs_wait_queue));
wql->wql_queue = WAIT_QUEUE_NULL;
queue_remove(&wq->wq_queue, wql, wait_queue_link_t, wql_links);
wql->wql_setqueue = WAIT_QUEUE_SET_NULL;
queue_remove(&wq_set->wqs_setlinks, wql, wait_queue_link_t, wql_setlinks);
wql->wql_type = WAIT_QUEUE_UNLINKED;
WAIT_QUEUE_CHECK(wq);
WAIT_QUEUE_SET_CHECK(wq_set);
}
kern_return_t
wait_queue_unlink(
wait_queue_t wq,
wait_queue_set_t wq_set)
{
wait_queue_element_t wq_element;
wait_queue_link_t wql;
queue_t q;
spl_t s;
if (!wait_queue_is_queue(wq) || !wait_queue_is_set(wq_set)) {
return KERN_INVALID_ARGUMENT;
}
s = splsched();
wait_queue_lock(wq);
q = &wq->wq_queue;
wq_element = (wait_queue_element_t) queue_first(q);
while (!queue_end(q, (queue_entry_t)wq_element)) {
WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
wql = (wait_queue_link_t)wq_element;
if (wql->wql_setqueue == wq_set) {
wqs_lock(wq_set);
wait_queue_unlink_locked(wq, wq_set, wql);
wqs_unlock(wq_set);
wait_queue_unlock(wq);
splx(s);
kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
return KERN_SUCCESS;
}
}
wq_element = (wait_queue_element_t)
queue_next((queue_t) wq_element);
}
wait_queue_unlock(wq);
splx(s);
return KERN_NOT_IN_SET;
}
kern_return_t
wait_queue_unlinkall_nofree(
wait_queue_t wq)
{
wait_queue_element_t wq_element;
wait_queue_element_t wq_next_element;
wait_queue_set_t wq_set;
wait_queue_link_t wql;
queue_head_t links_queue_head;
queue_t links = &links_queue_head;
queue_t q;
spl_t s;
if (!wait_queue_is_queue(wq)) {
return KERN_INVALID_ARGUMENT;
}
queue_init(links);
s = splsched();
wait_queue_lock(wq);
q = &wq->wq_queue;
wq_element = (wait_queue_element_t) queue_first(q);
while (!queue_end(q, (queue_entry_t)wq_element)) {
WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
wq_next_element = (wait_queue_element_t)
queue_next((queue_t) wq_element);
if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
wql = (wait_queue_link_t)wq_element;
wq_set = wql->wql_setqueue;
wqs_lock(wq_set);
wait_queue_unlink_locked(wq, wq_set, wql);
wqs_unlock(wq_set);
}
wq_element = wq_next_element;
}
wait_queue_unlock(wq);
splx(s);
return(KERN_SUCCESS);
}
kern_return_t
wait_queue_unlink_all(
wait_queue_t wq)
{
wait_queue_element_t wq_element;
wait_queue_element_t wq_next_element;
wait_queue_set_t wq_set;
wait_queue_link_t wql;
queue_head_t links_queue_head;
queue_t links = &links_queue_head;
queue_t q;
spl_t s;
if (!wait_queue_is_queue(wq)) {
return KERN_INVALID_ARGUMENT;
}
queue_init(links);
s = splsched();
wait_queue_lock(wq);
q = &wq->wq_queue;
wq_element = (wait_queue_element_t) queue_first(q);
while (!queue_end(q, (queue_entry_t)wq_element)) {
WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
wq_next_element = (wait_queue_element_t)
queue_next((queue_t) wq_element);
if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
wql = (wait_queue_link_t)wq_element;
wq_set = wql->wql_setqueue;
wqs_lock(wq_set);
wait_queue_unlink_locked(wq, wq_set, wql);
wqs_unlock(wq_set);
enqueue(links, &wql->wql_links);
}
wq_element = wq_next_element;
}
wait_queue_unlock(wq);
splx(s);
while(!queue_empty(links)) {
wql = (wait_queue_link_t) dequeue(links);
kfree((vm_offset_t) wql, sizeof(struct wait_queue_link));
}
return(KERN_SUCCESS);
}
kern_return_t
wait_queue_set_unlink_all_nofree(
wait_queue_set_t wq_set)
{
wait_queue_link_t wql;
wait_queue_t wq;
queue_t q;
kern_return_t kret;
spl_t s;
if (!wait_queue_is_set(wq_set)) {
return KERN_INVALID_ARGUMENT;
}
retry:
s = splsched();
wqs_lock(wq_set);
q = &wq_set->wqs_setlinks;
wql = (wait_queue_link_t)queue_first(q);
while (!queue_end(q, (queue_entry_t)wql)) {
WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
wq = wql->wql_queue;
if (wait_queue_lock_try(wq)) {
wait_queue_unlink_locked(wq, wq_set, wql);
wait_queue_unlock(wq);
wql = (wait_queue_link_t)queue_first(q);
} else {
wqs_unlock(wq_set);
splx(s);
delay(1);
goto retry;
}
}
wqs_unlock(wq_set);
splx(s);
return(KERN_SUCCESS);
}
kern_return_t
wait_subqueue_unlink_all(
wait_queue_set_t wq_set)
{
return wait_queue_set_unlink_all_nofree(wq_set);
}
kern_return_t
wait_queue_set_unlink_all(
wait_queue_set_t wq_set)
{
wait_queue_link_t wql;
wait_queue_t wq;
queue_t q;
queue_head_t links_queue_head;
queue_t links = &links_queue_head;
kern_return_t kret;
spl_t s;
if (!wait_queue_is_set(wq_set)) {
return KERN_INVALID_ARGUMENT;
}
queue_init(links);
retry:
s = splsched();
wqs_lock(wq_set);
q = &wq_set->wqs_setlinks;
wql = (wait_queue_link_t)queue_first(q);
while (!queue_end(q, (queue_entry_t)wql)) {
WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
wq = wql->wql_queue;
if (wait_queue_lock_try(wq)) {
wait_queue_unlink_locked(wq, wq_set, wql);
wait_queue_unlock(wq);
enqueue(links, &wql->wql_links);
wql = (wait_queue_link_t)queue_first(q);
} else {
wqs_unlock(wq_set);
splx(s);
delay(1);
goto retry;
}
}
wqs_unlock(wq_set);
splx(s);
while (!queue_empty (links)) {
wql = (wait_queue_link_t) dequeue(links);
kfree((vm_offset_t)wql, sizeof(struct wait_queue_link));
}
return(KERN_SUCCESS);
}
void
wait_queue_unlink_one(
wait_queue_t wq,
wait_queue_set_t *wq_setp)
{
wait_queue_element_t wq_element;
queue_t q;
spl_t s;
s = splsched();
wait_queue_lock(wq);
q = &wq->wq_queue;
wq_element = (wait_queue_element_t) queue_first(q);
while (!queue_end(q, (queue_entry_t)wq_element)) {
if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
wait_queue_link_t wql = (wait_queue_link_t)wq_element;
wait_queue_set_t wq_set = wql->wql_setqueue;
wqs_lock(wq_set);
wait_queue_unlink_locked(wq, wq_set, wql);
wqs_unlock(wq_set);
wait_queue_unlock(wq);
splx(s);
kfree((vm_offset_t)wql,sizeof(struct wait_queue_link));
*wq_setp = wq_set;
return;
}
wq_element = (wait_queue_element_t)
queue_next((queue_t) wq_element);
}
wait_queue_unlock(wq);
splx(s);
*wq_setp = WAIT_QUEUE_SET_NULL;
}
__private_extern__ wait_result_t
wait_queue_assert_wait64_locked(
wait_queue_t wq,
event64_t event,
wait_interrupt_t interruptible,
boolean_t unlock)
{
thread_t thread;
wait_result_t wait_result;
if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
wait_queue_set_t wqs = (wait_queue_set_t)wq;
if (wqs->wqs_isprepost && wqs->wqs_refcount > 0) {
if (unlock)
wait_queue_unlock(wq);
return(THREAD_AWAKENED);
}
}
thread = current_thread();
thread_lock(thread);
wait_result = thread_mark_wait_locked(thread, interruptible);
if (wait_result == THREAD_WAITING) {
if (thread->vm_privilege)
enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
else
enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
thread->wait_event = event;
thread->wait_queue = wq;
}
thread_unlock(thread);
if (unlock)
wait_queue_unlock(wq);
return(wait_result);
}
wait_result_t
wait_queue_assert_wait(
wait_queue_t wq,
event_t event,
wait_interrupt_t interruptible)
{
spl_t s;
wait_result_t ret;
if (!wait_queue_is_valid(wq)) {
thread_t thread = current_thread();
return (thread->wait_result = THREAD_RESTART);
}
s = splsched();
wait_queue_lock(wq);
ret = wait_queue_assert_wait64_locked(
wq, (event64_t)((uint32_t)event),
interruptible, TRUE);
splx(s);
return(ret);
}
wait_result_t
wait_queue_assert_wait64(
wait_queue_t wq,
event64_t event,
wait_interrupt_t interruptible)
{
spl_t s;
wait_result_t ret;
if (!wait_queue_is_valid(wq)) {
thread_t thread = current_thread();
return (thread->wait_result = THREAD_RESTART);
}
s = splsched();
wait_queue_lock(wq);
ret = wait_queue_assert_wait64_locked(wq, event, interruptible, TRUE);
splx(s);
return(ret);
}
static void
_wait_queue_select64_all(
wait_queue_t wq,
event64_t event,
queue_t wake_queue)
{
wait_queue_element_t wq_element;
wait_queue_element_t wqe_next;
queue_t q;
q = &wq->wq_queue;
wq_element = (wait_queue_element_t) queue_first(q);
while (!queue_end(q, (queue_entry_t)wq_element)) {
WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
wqe_next = (wait_queue_element_t)
queue_next((queue_t) wq_element);
if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
wait_queue_link_t wql = (wait_queue_link_t)wq_element;
wait_queue_t set_queue;
set_queue = (wait_queue_t)wql->wql_setqueue;
wait_queue_lock(set_queue);
if (set_queue->wq_isprepost) {
wait_queue_set_t wqs = (wait_queue_set_t)set_queue;
wqs->wqs_refcount++;
}
if (! wait_queue_empty(set_queue))
_wait_queue_select64_all(set_queue, event, wake_queue);
wait_queue_unlock(set_queue);
} else {
thread_t t = (thread_t)wq_element;
if (t->wait_event == event) {
thread_lock(t);
remqueue(q, (queue_entry_t) t);
enqueue (wake_queue, (queue_entry_t) t);
t->wait_queue = WAIT_QUEUE_NULL;
t->wait_event = NO_EVENT64;
t->at_safe_point = FALSE;
}
}
wq_element = wqe_next;
}
}
__private_extern__ kern_return_t
wait_queue_wakeup64_all_locked(
wait_queue_t wq,
event64_t event,
wait_result_t result,
boolean_t unlock)
{
queue_head_t wake_queue_head;
queue_t q = &wake_queue_head;
kern_return_t res;
assert(wait_queue_held(wq));
queue_init(q);
_wait_queue_select64_all(wq, event, q);
if (unlock)
wait_queue_unlock(wq);
res = KERN_NOT_WAITING;
while (!queue_empty (q)) {
thread_t thread = (thread_t) dequeue(q);
res = thread_go_locked(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
}
return res;
}
kern_return_t
wait_queue_wakeup_all(
wait_queue_t wq,
event_t event,
wait_result_t result)
{
kern_return_t ret;
spl_t s;
if (!wait_queue_is_valid(wq)) {
return KERN_INVALID_ARGUMENT;
}
s = splsched();
wait_queue_lock(wq);
ret = wait_queue_wakeup64_all_locked(
wq, (event64_t)((uint32_t)event),
result, TRUE);
splx(s);
return ret;
}
kern_return_t
wait_queue_wakeup64_all(
wait_queue_t wq,
event64_t event,
wait_result_t result)
{
kern_return_t ret;
spl_t s;
if (!wait_queue_is_valid(wq)) {
return KERN_INVALID_ARGUMENT;
}
s = splsched();
wait_queue_lock(wq);
ret = wait_queue_wakeup64_all_locked(wq, event, result, TRUE);
splx(s);
return ret;
}
static thread_t
_wait_queue_select64_one(
wait_queue_t wq,
event64_t event)
{
wait_queue_element_t wq_element;
wait_queue_element_t wqe_next;
thread_t t = THREAD_NULL;
queue_t q;
assert(wq->wq_fifo);
q = &wq->wq_queue;
wq_element = (wait_queue_element_t) queue_first(q);
while (!queue_end(q, (queue_entry_t)wq_element)) {
WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
wqe_next = (wait_queue_element_t)
queue_next((queue_t) wq_element);
if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
wait_queue_link_t wql = (wait_queue_link_t)wq_element;
wait_queue_t set_queue;
set_queue = (wait_queue_t)wql->wql_setqueue;
wait_queue_lock(set_queue);
if (! wait_queue_empty(set_queue)) {
t = _wait_queue_select64_one(set_queue, event);
}
wait_queue_unlock(set_queue);
if (t != THREAD_NULL)
return t;
} else {
thread_t t = (thread_t)wq_element;
if (t->wait_event == event) {
thread_lock(t);
remqueue(q, (queue_entry_t) t);
t->wait_queue = WAIT_QUEUE_NULL;
t->wait_event = NO_EVENT64;
t->at_safe_point = FALSE;
return t;
}
}
wq_element = wqe_next;
}
return THREAD_NULL;
}
__private_extern__ void
wait_queue_peek64_locked(
wait_queue_t wq,
event64_t event,
thread_t *tp,
wait_queue_t *wqp)
{
wait_queue_element_t wq_element;
wait_queue_element_t wqe_next;
thread_t t;
queue_t q;
assert(wq->wq_fifo);
*tp = THREAD_NULL;
q = &wq->wq_queue;
wq_element = (wait_queue_element_t) queue_first(q);
while (!queue_end(q, (queue_entry_t)wq_element)) {
WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
wqe_next = (wait_queue_element_t)
queue_next((queue_t) wq_element);
if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
wait_queue_link_t wql = (wait_queue_link_t)wq_element;
wait_queue_t set_queue;
set_queue = (wait_queue_t)wql->wql_setqueue;
wait_queue_lock(set_queue);
if (! wait_queue_empty(set_queue)) {
wait_queue_peek64_locked(set_queue, event, tp, wqp);
}
if (*tp != THREAD_NULL) {
if (*wqp != set_queue)
wait_queue_unlock(set_queue);
return;
}
wait_queue_unlock(set_queue);
} else {
thread_t t = (thread_t)wq_element;
if (t->wait_event == event) {
thread_lock(t);
*tp = t;
*wqp = wq;
return;
}
}
wq_element = wqe_next;
}
}
void
wait_queue_pull_thread_locked(
wait_queue_t waitq,
thread_t thread,
boolean_t unlock)
{
assert(thread->wait_queue == waitq);
remqueue(&waitq->wq_queue, (queue_entry_t)thread );
thread->wait_queue = WAIT_QUEUE_NULL;
thread->wait_event = NO_EVENT64;
thread->at_safe_point = FALSE;
if (unlock)
wait_queue_unlock(waitq);
}
static kern_return_t
_wait_queue_select64_thread(
wait_queue_t wq,
event64_t event,
thread_t thread)
{
wait_queue_element_t wq_element;
wait_queue_element_t wqe_next;
kern_return_t res = KERN_NOT_WAITING;
queue_t q = &wq->wq_queue;
thread_lock(thread);
if ((thread->wait_queue == wq) && (thread->wait_event == event)) {
remqueue(q, (queue_entry_t) thread);
thread->at_safe_point = FALSE;
thread->wait_event = NO_EVENT64;
thread->wait_queue = WAIT_QUEUE_NULL;
return KERN_SUCCESS;
}
thread_unlock(thread);
wq_element = (wait_queue_element_t) queue_first(q);
while (!queue_end(q, (queue_entry_t)wq_element)) {
WAIT_QUEUE_ELEMENT_CHECK(wq, wq_element);
wqe_next = (wait_queue_element_t)
queue_next((queue_t) wq_element);
if (wq_element->wqe_type == WAIT_QUEUE_LINK) {
wait_queue_link_t wql = (wait_queue_link_t)wq_element;
wait_queue_t set_queue;
set_queue = (wait_queue_t)wql->wql_setqueue;
wait_queue_lock(set_queue);
if (! wait_queue_empty(set_queue)) {
res = _wait_queue_select64_thread(set_queue,
event,
thread);
}
wait_queue_unlock(set_queue);
if (res == KERN_SUCCESS)
return KERN_SUCCESS;
}
wq_element = wqe_next;
}
return res;
}
__private_extern__ thread_t
wait_queue_wakeup64_identity_locked(
wait_queue_t wq,
event64_t event,
wait_result_t result,
boolean_t unlock)
{
kern_return_t res;
thread_t thread;
assert(wait_queue_held(wq));
thread = _wait_queue_select64_one(wq, event);
if (unlock)
wait_queue_unlock(wq);
if (thread) {
res = thread_go_locked(thread, result);
assert(res == KERN_SUCCESS);
}
return thread;
}
__private_extern__ kern_return_t
wait_queue_wakeup64_one_locked(
wait_queue_t wq,
event64_t event,
wait_result_t result,
boolean_t unlock)
{
thread_t thread;
assert(wait_queue_held(wq));
thread = _wait_queue_select64_one(wq, event);
if (unlock)
wait_queue_unlock(wq);
if (thread) {
kern_return_t res;
res = thread_go_locked(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
return res;
}
return KERN_NOT_WAITING;
}
kern_return_t
wait_queue_wakeup_one(
wait_queue_t wq,
event_t event,
wait_result_t result)
{
thread_t thread;
spl_t s;
if (!wait_queue_is_valid(wq)) {
return KERN_INVALID_ARGUMENT;
}
s = splsched();
wait_queue_lock(wq);
thread = _wait_queue_select64_one(wq, (event64_t)((uint32_t)event));
wait_queue_unlock(wq);
if (thread) {
kern_return_t res;
res = thread_go_locked(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
splx(s);
return res;
}
splx(s);
return KERN_NOT_WAITING;
}
kern_return_t
wait_queue_wakeup64_one(
wait_queue_t wq,
event64_t event,
wait_result_t result)
{
thread_t thread;
spl_t s;
if (!wait_queue_is_valid(wq)) {
return KERN_INVALID_ARGUMENT;
}
s = splsched();
wait_queue_lock(wq);
thread = _wait_queue_select64_one(wq, event);
wait_queue_unlock(wq);
if (thread) {
kern_return_t res;
res = thread_go_locked(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
splx(s);
return res;
}
splx(s);
return KERN_NOT_WAITING;
}
__private_extern__ kern_return_t
wait_queue_wakeup64_thread_locked(
wait_queue_t wq,
event64_t event,
thread_t thread,
wait_result_t result,
boolean_t unlock)
{
kern_return_t res;
assert(wait_queue_held(wq));
res = _wait_queue_select64_thread(wq, event, thread);
if (unlock)
wait_queue_unlock(wq);
if (res != KERN_SUCCESS)
return KERN_NOT_WAITING;
res = thread_go_locked(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
return res;
}
kern_return_t
wait_queue_wakeup_thread(
wait_queue_t wq,
event_t event,
thread_t thread,
wait_result_t result)
{
kern_return_t res;
spl_t s;
if (!wait_queue_is_valid(wq)) {
return KERN_INVALID_ARGUMENT;
}
s = splsched();
wait_queue_lock(wq);
res = _wait_queue_select64_thread(wq, (event64_t)((uint32_t)event), thread);
wait_queue_unlock(wq);
if (res == KERN_SUCCESS) {
res = thread_go_locked(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
splx(s);
return res;
}
splx(s);
return KERN_NOT_WAITING;
}
kern_return_t
wait_queue_wakeup64_thread(
wait_queue_t wq,
event64_t event,
thread_t thread,
wait_result_t result)
{
kern_return_t res;
spl_t s;
if (!wait_queue_is_valid(wq)) {
return KERN_INVALID_ARGUMENT;
}
s = splsched();
wait_queue_lock(wq);
res = _wait_queue_select64_thread(wq, event, thread);
wait_queue_unlock(wq);
if (res == KERN_SUCCESS) {
res = thread_go_locked(thread, result);
assert(res == KERN_SUCCESS);
thread_unlock(thread);
splx(s);
return res;
}
splx(s);
return KERN_NOT_WAITING;
}