#define __POSIX_LIB__
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/sysctl.h>
#include <machine/vmparam.h>
#include <mach/vm_statistics.h>
#include "pthread_internals.h"
extern void _pthread_set_self(pthread_t);
extern void mig_init(int);
extern void set_malloc_singlethreaded(int);
extern pthread_lock_t reply_port_lock;
size_t _pthread_stack_size = 0;
int _spin_tries = 0;
#if !defined(__ppc__)
int _cpu_has_altivec = 0;
#endif
int __is_threaded = 0;
static semaphore_t *sem_pool = NULL;
static int sem_pool_count = 0;
static int sem_pool_current = 0;
static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
static int default_priority;
static int max_priority;
static int min_priority;
extern mach_port_t thread_recycle_port;
#define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
#define STACK_RESERVED (sizeof (struct _pthread))
#ifdef STACK_GROWS_UP
#define STACK_BASE(sp) STACK_LOWEST(sp)
#define STACK_START(stack_low) (STACK_BASE(stack_low) + STACK_RESERVED)
#define STACK_SELF(sp) STACK_BASE(sp)
#else
#define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
#define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
#define STACK_SELF(sp) STACK_START(sp)
#endif
static int
_pthread_allocate_stack(pthread_attr_t *attrs, vm_address_t *stack)
{
kern_return_t kr;
#if 1
assert(attrs->stacksize >= PTHREAD_STACK_MIN);
if (attrs->stackaddr != NULL) {
assert(((vm_offset_t)(attrs->stackaddr) & (vm_page_size - 1)) == 0);
*stack = (vm_address_t)attrs->stackaddr;
return 0;
}
kr = vm_allocate(mach_task_self(), stack, attrs->stacksize + vm_page_size, VM_MAKE_TAG(VM_MEMORY_STACK)| TRUE);
if (kr != KERN_SUCCESS) {
return EAGAIN;
}
#ifdef STACK_GROWS_UP
kr = vm_protect(mach_task_self(), *stack + attrs->stacksize, vm_page_size, FALSE, VM_PROT_NONE);
#else
kr = vm_protect(mach_task_self(), *stack, vm_page_size, FALSE, VM_PROT_NONE);
*stack += attrs->stacksize + vm_page_size;
#endif
#else
vm_address_t cur_stack = (vm_address_t)0;
if (free_stacks == 0)
{
#ifndef NO_GUARD_PAGES
# define GUARD_SIZE(a) (2*(a))
# define GUARD_MASK(a) (((a)<<1) | 1)
#else
# define GUARD_SIZE(a) (a)
# define GUARD_MASK(a) (a)
#endif
while (lowest_stack > GUARD_SIZE(__pthread_stack_size))
{
lowest_stack -= GUARD_SIZE(__pthread_stack_size);
kr = vm_allocate(mach_task_self(),
&lowest_stack,
GUARD_SIZE(__pthread_stack_size),
FALSE);
#ifndef NO_GUARD_PAGES
if (kr == KERN_SUCCESS) {
# ifdef STACK_GROWS_UP
kr = vm_protect(mach_task_self(),
lowest_stack+__pthread_stack_size,
__pthread_stack_size,
FALSE, VM_PROT_NONE);
# else
kr = vm_protect(mach_task_self(),
lowest_stack,
__pthread_stack_size,
FALSE, VM_PROT_NONE);
lowest_stack += __pthread_stack_size;
# endif
if (kr == KERN_SUCCESS)
break;
}
#else
if (kr == KERN_SUCCESS)
break;
#endif
}
if (lowest_stack > 0)
free_stacks = (vm_address_t *)lowest_stack;
else
{
kr = vm_map(mach_task_self(), &lowest_stack,
GUARD_SIZE(__pthread_stack_size),
GUARD_MASK(__pthread_stack_mask),
TRUE , MEMORY_OBJECT_NULL,
0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
#ifndef NO_GUARD_PAGES
if (kr == KERN_SUCCESS) {
# ifdef STACK_GROWS_UP
kr = vm_protect(mach_task_self(),
lowest_stack+__pthread_stack_size,
__pthread_stack_size,
FALSE, VM_PROT_NONE);
# else
kr = vm_protect(mach_task_self(),
lowest_stack,
__pthread_stack_size,
FALSE, VM_PROT_NONE);
lowest_stack += __pthread_stack_size;
# endif
}
#endif
free_stacks = (vm_address_t *)lowest_stack;
lowest_stack = 0;
}
*free_stacks = 0;
}
cur_stack = STACK_START((vm_address_t) free_stacks);
free_stacks = (vm_address_t *)*free_stacks;
cur_stack = _adjust_sp(cur_stack);
#endif
return 0;
}
int
pthread_attr_destroy(pthread_attr_t *attr)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
int
pthread_attr_getdetachstate(const pthread_attr_t *attr,
int *detachstate)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
*detachstate = attr->detached;
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
int
pthread_attr_getinheritsched(const pthread_attr_t *attr,
int *inheritsched)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
*inheritsched = attr->inherit;
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
int
pthread_attr_getschedparam(const pthread_attr_t *attr,
struct sched_param *param)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
*param = attr->param;
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
int
pthread_attr_getschedpolicy(const pthread_attr_t *attr,
int *policy)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
*policy = attr->policy;
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
static const size_t DEFAULT_STACK_SIZE = DFLSSIZ;
int
pthread_attr_init(pthread_attr_t *attr)
{
attr->stacksize = DEFAULT_STACK_SIZE;
attr->stackaddr = NULL;
attr->sig = _PTHREAD_ATTR_SIG;
attr->policy = _PTHREAD_DEFAULT_POLICY;
attr->param.sched_priority = default_priority;
attr->param.quantum = 10;
attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
attr->detached = PTHREAD_CREATE_JOINABLE;
attr->freeStackOnExit = TRUE;
return (ESUCCESS);
}
int
pthread_attr_setdetachstate(pthread_attr_t *attr,
int detachstate)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
(detachstate == PTHREAD_CREATE_DETACHED))
{
attr->detached = detachstate;
return (ESUCCESS);
} else
{
return (EINVAL);
}
} else
{
return (EINVAL);
}
}
int
pthread_attr_setinheritsched(pthread_attr_t *attr,
int inheritsched)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
(inheritsched == PTHREAD_EXPLICIT_SCHED))
{
attr->inherit = inheritsched;
return (ESUCCESS);
} else
{
return (EINVAL);
}
} else
{
return (EINVAL);
}
}
int
pthread_attr_setschedparam(pthread_attr_t *attr,
const struct sched_param *param)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
attr->param = *param;
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
int
pthread_attr_setschedpolicy(pthread_attr_t *attr,
int policy)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
if ((policy == SCHED_OTHER) ||
(policy == SCHED_RR) ||
(policy == SCHED_FIFO))
{
attr->policy = policy;
return (ESUCCESS);
} else
{
return (EINVAL);
}
} else
{
return (EINVAL);
}
}
int
pthread_attr_setscope(pthread_attr_t *attr,
int scope)
{
if (attr->sig == _PTHREAD_ATTR_SIG) {
if (scope == PTHREAD_SCOPE_SYSTEM) {
return (ESUCCESS);
} else if (scope == PTHREAD_SCOPE_PROCESS) {
return (ENOTSUP);
}
}
return (EINVAL);
}
int
pthread_attr_getscope(pthread_attr_t *attr,
int *scope)
{
if (attr->sig == _PTHREAD_ATTR_SIG) {
*scope = PTHREAD_SCOPE_SYSTEM;
return (ESUCCESS);
}
return (EINVAL);
}
int
pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
{
if (attr->sig == _PTHREAD_ATTR_SIG) {
*stackaddr = attr->stackaddr;
return (ESUCCESS);
} else {
return (EINVAL);
}
}
int
pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
{
if ((attr->sig == _PTHREAD_ATTR_SIG) && (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0)) {
attr->stackaddr = stackaddr;
attr->freeStackOnExit = FALSE;
return (ESUCCESS);
} else {
return (EINVAL);
}
}
int
pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
{
if (attr->sig == _PTHREAD_ATTR_SIG) {
*stacksize = attr->stacksize;
return (ESUCCESS);
} else {
return (EINVAL);
}
}
int
pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
{
if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
attr->stacksize = stacksize;
return (ESUCCESS);
} else {
return (EINVAL);
}
}
static void
_pthread_body(pthread_t self)
{
_pthread_set_self(self);
pthread_exit((self->fun)(self->arg));
}
int
_pthread_create(pthread_t t,
const pthread_attr_t *attrs,
vm_address_t stack,
const mach_port_t kernel_thread)
{
int res;
kern_return_t kern_res;
res = ESUCCESS;
do
{
memset(t, 0, sizeof(*t));
t->stacksize = attrs->stacksize;
t->stackaddr = (void *)stack;
t->kernel_thread = kernel_thread;
t->detached = attrs->detached;
t->inherit = attrs->inherit;
t->policy = attrs->policy;
t->param = attrs->param;
t->freeStackOnExit = attrs->freeStackOnExit;
t->mutexes = (struct _pthread_mutex *)NULL;
t->sig = _PTHREAD_SIG;
t->reply_port = MACH_PORT_NULL;
t->cthread_self = NULL;
LOCK_INIT(t->lock);
t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
t->cleanup_stack = (struct _pthread_handler_rec *)NULL;
pthread_setschedparam(t, t->policy, &t->param);
if (t->detached == PTHREAD_CREATE_JOINABLE)
{
PTHREAD_MACH_CALL(semaphore_create(mach_task_self(),
&t->death,
SYNC_POLICY_FIFO,
0), kern_res);
if (kern_res != KERN_SUCCESS)
{
printf("Can't create 'death' semaphore: %d\n", kern_res);
res = EINVAL;
break;
}
PTHREAD_MACH_CALL(semaphore_create(mach_task_self(),
&t->joiners,
SYNC_POLICY_FIFO,
0), kern_res);
if (kern_res != KERN_SUCCESS)
{
printf("Can't create 'joiners' semaphore: %d\n", kern_res);
res = EINVAL;
break;
}
t->num_joiners = 0;
} else
{
t->death = MACH_PORT_NULL;
}
} while (0);
return (res);
}
int
_pthread_is_threaded(void)
{
return __is_threaded;
}
mach_port_t
pthread_mach_thread_np(pthread_t t)
{
return t->kernel_thread;
}
size_t
pthread_get_stacksize_np(pthread_t t)
{
return t->stacksize;
}
void *
pthread_get_stackaddr_np(pthread_t t)
{
return t->stackaddr;
}
mach_port_t
_pthread_reply_port(pthread_t t)
{
return t->reply_port;
}
static int
_pthread_create_suspended(pthread_t *thread,
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg,
int suspended)
{
pthread_attr_t _attr, *attrs;
vm_address_t stack;
int res;
pthread_t t;
kern_return_t kern_res;
mach_port_t kernel_thread;
if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
{
attrs = &_attr;
pthread_attr_init(attrs);
} else if (attrs->sig != _PTHREAD_ATTR_SIG) {
return EINVAL;
}
res = ESUCCESS;
do
{
if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
break;
}
t = (pthread_t)malloc(sizeof(struct _pthread));
*thread = t;
PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
if (kern_res != KERN_SUCCESS)
{
printf("Can't create thread: %d\n", kern_res);
res = EINVAL;
break;
}
if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
{
break;
}
t->arg = arg;
t->fun = start_routine;
_pthread_setup(t, _pthread_body, stack);
set_malloc_singlethreaded(0);
__is_threaded = 1;
if (suspended == 0) {
PTHREAD_MACH_CALL(thread_resume(kernel_thread), kern_res);
}
if (kern_res != KERN_SUCCESS)
{
printf("Can't resume thread: %d\n", kern_res);
res = EINVAL;
break;
}
} while (0);
return (res);
}
int
pthread_create(pthread_t *thread,
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg)
{
return _pthread_create_suspended(thread, attr, start_routine, arg, 0);
}
int
pthread_create_suspended_np(pthread_t *thread,
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg)
{
return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
}
int
pthread_detach(pthread_t thread)
{
kern_return_t kern_res;
int num_joiners;
mach_port_t death;
if (thread->sig == _PTHREAD_SIG)
{
LOCK(thread->lock);
if (thread->detached == PTHREAD_CREATE_JOINABLE)
{
thread->detached = PTHREAD_CREATE_DETACHED;
num_joiners = thread->num_joiners;
death = thread->death;
thread->death = MACH_PORT_NULL;
UNLOCK(thread->lock);
if (num_joiners > 0)
{
PTHREAD_MACH_CALL(semaphore_signal(thread->joiners), kern_res);
}
PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(),
thread->joiners), kern_res);
PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(),
death), kern_res);
return (ESUCCESS);
} else if (thread->detached == _PTHREAD_EXITED) {
UNLOCK(thread->lock);
pthread_join(thread, NULL);
return ESUCCESS;
} else
{
UNLOCK(thread->lock);
return (EINVAL);
}
} else
{
return (ESRCH);
}
}
static void _pthread_become_available(pthread_t thread) {
mach_msg_empty_rcv_t msg = { { 0 } };
kern_return_t ret;
if (thread->reply_port == MACH_PORT_NULL) {
thread->reply_port = mach_reply_port();
}
msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
msg.header.msgh_remote_port = thread_recycle_port;
msg.header.msgh_local_port = MACH_PORT_NULL;
msg.header.msgh_id = (int)thread;
msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
ret = mach_msg(&msg.header, MACH_SEND_MSG | MACH_RCV_MSG,
msg.header.msgh_size, sizeof msg,
thread->reply_port, MACH_MSG_TIMEOUT_NONE,
MACH_PORT_NULL);
while (1) {
ret = thread_suspend(thread->kernel_thread);
}
}
static kern_return_t _pthread_check_for_available_threads(mach_msg_empty_rcv_t *msg) {
return mach_msg(&msg->header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
sizeof(mach_msg_empty_rcv_t), thread_recycle_port, 0,
MACH_PORT_NULL);
}
static void _pthread_reap_threads(void) {
kern_return_t ret;
mach_msg_empty_rcv_t msg = { { 0 } };
while((ret = _pthread_check_for_available_threads(&msg)) == KERN_SUCCESS) {
pthread_t th = (pthread_t)msg.header.msgh_id;
mach_port_t kernel_thread = th->kernel_thread;
mach_port_t reply_port = th->reply_port;
vm_size_t size = (vm_size_t)th->stacksize + vm_page_size;
vm_address_t addr = (vm_address_t)th->stackaddr;
#if !defined(STACK_GROWS_UP)
addr -= size;
#endif
ret = thread_terminate(kernel_thread);
if (ret != KERN_SUCCESS) {
fprintf(stderr, "thread_terminate() failed: %s\n",
mach_error_string(ret));
}
ret = mach_port_destroy(mach_task_self(), reply_port);
if (ret != KERN_SUCCESS) {
fprintf(stderr,
"mach_port_destroy(thread_reply) failed: %s\n",
mach_error_string(ret));
}
if (th->freeStackOnExit) {
ret = vm_deallocate(mach_task_self(), addr, size);
if (ret != KERN_SUCCESS) {
fprintf(stderr,
"vm_deallocate(stack) failed: %s\n",
mach_error_string(ret));
}
}
free(th);
}
assert(ret == MACH_RCV_TIMED_OUT);
}
pthread_t
_pthread_self() {
return pthread_self();
}
void
pthread_exit(void *value_ptr)
{
pthread_t self = pthread_self();
struct _pthread_handler_rec *handler;
kern_return_t kern_res;
int num_joiners;
while ((handler = self->cleanup_stack) != 0)
{
(handler->routine)(handler->arg);
self->cleanup_stack = handler->next;
}
_pthread_tsd_cleanup(self);
LOCK(self->lock);
if (self->detached == PTHREAD_CREATE_JOINABLE)
{
self->detached = _PTHREAD_EXITED;
self->exit_value = value_ptr;
num_joiners = self->num_joiners;
UNLOCK(self->lock);
if (num_joiners > 0)
{
PTHREAD_MACH_CALL(semaphore_signal(self->joiners), kern_res);
}
do {
PTHREAD_MACH_CALL(semaphore_wait(self->death), kern_res);
} while (kern_res == KERN_ABORTED);
} else
UNLOCK(self->lock);
if (self->death)
{
PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(), self->joiners), kern_res);
PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(), self->death), kern_res);
}
if (self->detached == _PTHREAD_CREATE_PARENT) {
exit((int)(self->exit_value));
}
_pthread_reap_threads();
_pthread_become_available(self);
}
int
pthread_join(pthread_t thread,
void **value_ptr)
{
kern_return_t kern_res;
if (thread->sig == _PTHREAD_SIG)
{
LOCK(thread->lock);
if (thread->detached == PTHREAD_CREATE_JOINABLE)
{
thread->num_joiners++;
UNLOCK(thread->lock);
do {
PTHREAD_MACH_CALL(semaphore_wait(thread->joiners), kern_res);
} while (kern_res == KERN_ABORTED);
LOCK(thread->lock);
thread->num_joiners--;
}
if (thread->detached == _PTHREAD_EXITED)
{
if (thread->num_joiners == 0)
{
if (value_ptr)
{
*value_ptr = thread->exit_value;
}
UNLOCK(thread->lock);
PTHREAD_MACH_CALL(semaphore_signal(thread->death), kern_res);
return (ESUCCESS);
} else
{
UNLOCK(thread->lock);
return (ESRCH);
}
} else
{
UNLOCK(thread->lock);
return (EINVAL);
}
} else
{
return (ESRCH);
}
}
int
pthread_getschedparam(pthread_t thread,
int *policy,
struct sched_param *param)
{
if (thread->sig == _PTHREAD_SIG)
{
*policy = thread->policy;
*param = thread->param;
return (ESUCCESS);
} else
{
return (ESRCH);
}
}
int
pthread_setschedparam(pthread_t thread,
int policy,
const struct sched_param *param)
{
policy_base_data_t bases;
policy_base_t base;
mach_msg_type_number_t count;
kern_return_t ret;
if (thread->sig == _PTHREAD_SIG)
{
switch (policy)
{
case SCHED_OTHER:
bases.ts.base_priority = param->sched_priority;
base = (policy_base_t)&bases.ts;
count = POLICY_TIMESHARE_BASE_COUNT;
break;
case SCHED_FIFO:
bases.fifo.base_priority = param->sched_priority;
base = (policy_base_t)&bases.fifo;
count = POLICY_FIFO_BASE_COUNT;
break;
case SCHED_RR:
bases.rr.base_priority = param->sched_priority;
bases.rr.quantum = param->quantum;
base = (policy_base_t)&bases.rr;
count = POLICY_RR_BASE_COUNT;
break;
default:
return (EINVAL);
}
thread->policy = policy;
thread->param = *param;
ret = thread_policy(thread->kernel_thread, policy, base, count, TRUE);
if (ret != KERN_SUCCESS)
{
return (EINVAL);
}
return (ESUCCESS);
} else
{
return (ESRCH);
}
}
int
sched_get_priority_min(int policy)
{
return default_priority - 16;
}
int
sched_get_priority_max(int policy)
{
return default_priority + 16;
}
int
pthread_equal(pthread_t t1,
pthread_t t2)
{
return (t1 == t2);
}
void
cthread_set_self(void *cself)
{
pthread_t self = pthread_self();
if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
_pthread_set_self(cself);
return;
}
self->cthread_self = cself;
}
void *
ur_cthread_self(void) {
pthread_t self = pthread_self();
if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
return (void *)self;
}
return self->cthread_self;
}
int
pthread_once(pthread_once_t *once_control,
void (*init_routine)(void))
{
LOCK(once_control->lock);
if (once_control->sig == _PTHREAD_ONCE_SIG_init)
{
(*init_routine)();
once_control->sig = _PTHREAD_ONCE_SIG;
}
UNLOCK(once_control->lock);
return (ESUCCESS);
}
int
pthread_cancel(pthread_t thread)
{
if (thread->sig == _PTHREAD_SIG)
{
thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
return (ESUCCESS);
} else
{
return (ESRCH);
}
}
static void
_pthread_testcancel(pthread_t thread)
{
LOCK(thread->lock);
if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
(PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
{
UNLOCK(thread->lock);
pthread_exit(0);
}
UNLOCK(thread->lock);
}
void
pthread_testcancel(void)
{
pthread_t self = pthread_self();
_pthread_testcancel(self);
}
int
pthread_setcancelstate(int state, int *oldstate)
{
pthread_t self = pthread_self();
int err = ESUCCESS;
LOCK(self->lock);
*oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
if ((state == PTHREAD_CANCEL_ENABLE) || (state == PTHREAD_CANCEL_DISABLE))
{
self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_STATE_MASK) | state;
} else
{
err = EINVAL;
}
UNLOCK(self->lock);
_pthread_testcancel(self);
return (err);
}
int
pthread_setcanceltype(int type, int *oldtype)
{
pthread_t self = pthread_self();
int err = ESUCCESS;
LOCK(self->lock);
*oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
if ((type == PTHREAD_CANCEL_DEFERRED) || (type == PTHREAD_CANCEL_ASYNCHRONOUS))
{
self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK) | type;
} else
{
err = EINVAL;
}
UNLOCK(self->lock);
_pthread_testcancel(self);
return (err);
}
static struct _pthread _thread = {0};
static int
pthread_init(void)
{
pthread_attr_t _attr, *attrs;
pthread_t thread;
kern_return_t kr;
host_basic_info_data_t basic_info;
host_priority_info_data_t priority_info;
host_info_t info;
host_flavor_t flavor;
mach_msg_type_number_t count;
int mib[2];
size_t len;
int hasvectorunit, numcpus;
count = HOST_PRIORITY_INFO_COUNT;
info = (host_info_t)&priority_info;
flavor = HOST_PRIORITY_INFO;
kr = host_info(mach_host_self(), flavor, info, &count);
if (kr != KERN_SUCCESS)
printf("host_info failed (%d); probably need privilege.\n", kr);
else {
default_priority = priority_info.user_priority;
min_priority = priority_info.minimum_priority;
max_priority = priority_info.maximum_priority;
}
attrs = &_attr;
pthread_attr_init(attrs);
_pthread_set_self(&_thread);
_pthread_create(&_thread, attrs, USRSTACK, mach_thread_self());
thread = &_thread;
thread->detached = _PTHREAD_CREATE_PARENT;
mib[0] = CTL_HW;
mib[1] = HW_NCPU;
len = sizeof(numcpus);
if (sysctl(mib, 2, &numcpus, &len, NULL, 0) == 0) {
if (numcpus > 1) {
_spin_tries = MP_SPIN_TRIES;
}
} else {
count = HOST_BASIC_INFO_COUNT;
info = (host_info_t)&basic_info;
flavor = HOST_BASIC_INFO;
kr = host_info(mach_host_self(), flavor, info, &count);
if (kr != KERN_SUCCESS)
printf("host_info failed (%d)\n", kr);
else {
if (basic_info.avail_cpus > 1)
_spin_tries = MP_SPIN_TRIES;
if (basic_info.cpu_subtype >= CPU_SUBTYPE_POWERPC_7400)
_cpu_has_altivec = 1;
}
}
mib[0] = CTL_HW;
mib[1] = HW_VECTORUNIT;
len = sizeof(hasvectorunit);
if (sysctl(mib, 2, &hasvectorunit, &len, NULL, 0) == 0) {
_cpu_has_altivec = hasvectorunit;
}
mig_init(1);
return 0;
}
int sched_yield(void)
{
swtch_pri(0);
return 0;
}
int (*_cthread_init_routine)(void) = pthread_init;
__private_extern__ semaphore_t new_sem_from_pool(void) {
kern_return_t res;
semaphore_t sem;
int i;
LOCK(sem_pool_lock);
if (sem_pool_current == sem_pool_count) {
sem_pool_count += 16;
sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
for (i = sem_pool_current; i < sem_pool_count; i++) {
PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
}
}
sem = sem_pool[sem_pool_current++];
UNLOCK(sem_pool_lock);
return sem;
}
__private_extern__ void restore_sem_to_pool(semaphore_t sem) {
LOCK(sem_pool_lock);
sem_pool[--sem_pool_current] = sem;
UNLOCK(sem_pool_lock);
}
static void sem_pool_reset(void) {
LOCK(sem_pool_lock);
sem_pool_count = 0;
sem_pool_current = 0;
sem_pool = NULL;
UNLOCK(sem_pool_lock);
}
__private_extern__ void _pthread_fork_child(void) {
UNLOCK(sem_pool_lock);
sem_pool_reset();
}