pthread_cancelable.c [plain text]
#include "resolver.h"
#include "internal.h"
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <signal.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/sysctl.h>
#include <sys/queue.h>
#include <sys/ulock.h>
#include <machine/vmparam.h>
#include <mach/vm_statistics.h>
#ifndef BUILDING_VARIANT
OS_ALWAYS_INLINE
static inline int
_pthread_update_cancel_state(pthread_t thread, int mask, int state)
{
uint16_t oldstate, newstate;
os_atomic_rmw_loop(&thread->cancel_state, oldstate, newstate, relaxed, {
newstate = oldstate;
newstate &= ~mask;
newstate |= state;
});
return oldstate;
}
void
_pthread_setcancelstate_exit(pthread_t thread, void *value_ptr)
{
_pthread_update_cancel_state(thread,
_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK,
PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED |
_PTHREAD_CANCEL_EXITING);
}
PTHREAD_NOEXPORT_VARIANT
int
pthread_cancel(pthread_t thread)
{
if (!_pthread_is_valid(thread, NULL)) {
return(ESRCH);
}
if (thread->wqthread != 0) {
return(ENOTSUP);
}
int state = os_atomic_or(&thread->cancel_state, _PTHREAD_CANCEL_PENDING, relaxed);
if (state & PTHREAD_CANCEL_ENABLE) {
mach_port_t kport = _pthread_tsd_slot(thread, MACH_THREAD_SELF);
if (kport) __pthread_markcancel(kport);
}
return (0);
}
PTHREAD_NOEXPORT_VARIANT
int
pthread_setcancelstate(int state, int *oldstateptr)
{
pthread_t self = pthread_self();
_pthread_validate_signature(self);
switch (state) {
case PTHREAD_CANCEL_ENABLE:
__pthread_canceled(1);
break;
case PTHREAD_CANCEL_DISABLE:
__pthread_canceled(2);
break;
default:
return EINVAL;
}
int oldstate = _pthread_update_cancel_state(self, _PTHREAD_CANCEL_STATE_MASK, state);
if (oldstateptr) {
*oldstateptr = oldstate & _PTHREAD_CANCEL_STATE_MASK;
}
return 0;
}
PTHREAD_NOEXPORT_VARIANT
int
pthread_setcanceltype(int type, int *oldtype)
{
pthread_t self = pthread_self();
_pthread_validate_signature(self);
if ((type != PTHREAD_CANCEL_DEFERRED) &&
(type != PTHREAD_CANCEL_ASYNCHRONOUS))
return EINVAL;
int oldstate = _pthread_update_cancel_state(self, _PTHREAD_CANCEL_TYPE_MASK, type);
if (oldtype) {
*oldtype = oldstate & _PTHREAD_CANCEL_TYPE_MASK;
}
return (0);
}
OS_ALWAYS_INLINE
static inline bool
_pthread_is_canceled(pthread_t thread)
{
const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
int state = os_atomic_load(&thread->cancel_state, seq_cst);
return (state & flags) == flags;
}
OS_ALWAYS_INLINE
static inline void *
_pthread_get_exit_value(pthread_t thread)
{
if (os_unlikely(_pthread_is_canceled(thread))) {
return PTHREAD_CANCELED;
}
return thread->tl_exit_value;
}
void
pthread_testcancel(void)
{
pthread_t self = pthread_self();
if (os_unlikely(_pthread_is_canceled(self))) {
_pthread_validate_signature(self);
self->canceled = true;
pthread_exit(PTHREAD_CANCELED);
}
}
void
_pthread_markcancel_if_canceled(pthread_t thread, mach_port_t kport)
{
if (os_unlikely(_pthread_is_canceled(thread))) {
__pthread_markcancel(kport);
}
}
void
_pthread_exit_if_canceled(int error)
{
if ((error & 0xff) == EINTR && __pthread_canceled(0) == 0) {
pthread_t self = pthread_self();
_pthread_validate_signature(self);
self->cancel_error = error;
self->canceled = true;
pthread_exit(PTHREAD_CANCELED);
}
}
int
pthread_sigmask(int how, const sigset_t * set, sigset_t * oset)
{
int err = 0;
if (__pthread_sigmask(how, set, oset) == -1) {
err = errno;
}
return(err);
}
semaphore_t
_pthread_joiner_prepost_wake(pthread_t thread)
{
pthread_join_context_t ctx = thread->tl_join_ctx;
semaphore_t sema = MACH_PORT_NULL;
if (thread->tl_joinable) {
sema = ctx->custom_stack_sema;
thread->tl_joinable = false;
} else {
ctx->detached = true;
thread->tl_join_ctx = NULL;
}
if (ctx->value_ptr) *ctx->value_ptr = _pthread_get_exit_value(thread);
return sema;
}
static inline bool
_pthread_joiner_abort_wait(pthread_t thread, pthread_join_context_t ctx)
{
bool aborted = false;
_pthread_lock_lock(&_pthread_list_lock);
if (!ctx->detached && thread->tl_exit_gate != MACH_PORT_DEAD) {
PTHREAD_DEBUG_ASSERT(thread->tl_join_ctx == ctx);
thread->tl_join_ctx = NULL;
thread->tl_exit_gate = MACH_PORT_NULL;
aborted = true;
}
_pthread_lock_unlock(&_pthread_list_lock);
return aborted;
}
static int
_pthread_joiner_wait(pthread_t thread, pthread_join_context_t ctx,
pthread_conformance_t conforming)
{
uint32_t *exit_gate = &thread->tl_exit_gate;
int ulock_op = UL_UNFAIR_LOCK | ULF_NO_ERRNO;
if (conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE) {
ulock_op |= ULF_WAIT_CANCEL_POINT;
}
for (;;) {
uint32_t cur = os_atomic_load(exit_gate, acquire);
if (cur == MACH_PORT_DEAD) {
break;
}
if (os_unlikely(cur != ctx->kport)) {
PTHREAD_CLIENT_CRASH(cur, "pthread_join() state corruption");
}
int ret = __ulock_wait(ulock_op, exit_gate, ctx->kport, 0);
switch (-ret) {
case 0:
case EFAULT:
break;
case EINTR:
if (os_unlikely(conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE &&
_pthread_is_canceled(ctx->waiter))) {
if (_pthread_joiner_abort_wait(thread, ctx)) {
ctx->waiter->canceled = true;
pthread_exit(PTHREAD_CANCELED);
}
}
break;
}
}
bool cleanup = false;
_pthread_lock_lock(&_pthread_list_lock);
if (!ctx->detached) {
PTHREAD_DEBUG_ASSERT(thread->tl_join_ctx == ctx);
thread->tl_join_ctx = NULL;
cleanup = thread->tl_joiner_cleans_up;
}
_pthread_lock_unlock(&_pthread_list_lock);
if (cleanup) {
_pthread_deallocate(thread, false);
}
return 0;
}
OS_NOINLINE
int
_pthread_join(pthread_t thread, void **value_ptr, pthread_conformance_t conforming)
{
pthread_t self = pthread_self();
pthread_join_context_s ctx = {
.waiter = self,
.value_ptr = value_ptr,
.kport = MACH_PORT_NULL,
.custom_stack_sema = MACH_PORT_NULL,
};
int res = 0;
kern_return_t kr;
if (!_pthread_validate_thread_and_list_lock(thread)) {
return ESRCH;
}
_pthread_validate_signature(self);
if (!thread->tl_joinable || (thread->tl_join_ctx != NULL)) {
res = EINVAL;
} else if (thread == self ||
(self->tl_join_ctx && self->tl_join_ctx->waiter == thread)) {
res = EDEADLK;
} else if (thread->tl_exit_gate == MACH_PORT_DEAD) {
TAILQ_REMOVE(&__pthread_head, thread, tl_plist);
PTHREAD_DEBUG_ASSERT(thread->tl_joiner_cleans_up);
thread->tl_joinable = false;
if (value_ptr) *value_ptr = _pthread_get_exit_value(thread);
} else {
ctx.kport = _pthread_tsd_slot(thread, MACH_THREAD_SELF);
thread->tl_exit_gate = ctx.kport;
thread->tl_join_ctx = &ctx;
if (thread->tl_has_custom_stack) {
ctx.custom_stack_sema = (semaphore_t)os_get_cached_semaphore();
}
}
_pthread_lock_unlock(&_pthread_list_lock);
if (res == 0) {
if (ctx.kport == MACH_PORT_NULL) {
_pthread_deallocate(thread, false);
} else {
res = _pthread_joiner_wait(thread, &ctx, conforming);
}
}
if (res == 0 && ctx.custom_stack_sema && !ctx.detached) {
do {
kr = __semwait_signal_nocancel(ctx.custom_stack_sema, 0, 0, 0, 0, 0);
} while (kr != KERN_SUCCESS);
}
if (ctx.custom_stack_sema) {
os_put_cached_semaphore(ctx.custom_stack_sema);
}
return res;
}
#endif
static inline pthread_conformance_t
_pthread_conformance(void)
{
#ifdef VARIANT_CANCELABLE
return PTHREAD_CONFORM_UNIX03_CANCELABLE;
#else
return PTHREAD_CONFORM_UNIX03_NOCANCEL;
#endif
}
static inline void
_pthread_testcancel_if_cancelable_variant(void)
{
#ifdef VARIANT_CANCELABLE
pthread_testcancel();
#endif
}
int
pthread_join(pthread_t thread, void **value_ptr)
{
_pthread_testcancel_if_cancelable_variant();
return _pthread_join(thread, value_ptr, _pthread_conformance());
}
int
pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
{
return _pthread_cond_wait(cond, mutex, NULL, 0, _pthread_conformance());
}
int
pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec *abstime)
{
return _pthread_cond_wait(cond, mutex, abstime, 0, _pthread_conformance());
}
int
sigwait(const sigset_t * set, int * sig)
{
int err = 0;
_pthread_testcancel_if_cancelable_variant();
if (__sigwait(set, sig) == -1) {
err = errno;
_pthread_testcancel_if_cancelable_variant();
if (err == EINTR) {
err = 0;
}
}
return(err);
}