#include "pthread_internals.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/sysctl.h>
#include <sys/queue.h>
#include <sys/syscall.h>
#include <machine/vmparam.h>
#include <mach/vm_statistics.h>
#define __APPLE_API_PRIVATE
#include <machine/cpu_capabilities.h>
#ifndef BUILDING_VARIANT
__private_extern__ struct __pthread_list __pthread_head = LIST_HEAD_INITIALIZER(&__pthread_head);
extern void _pthread_set_self(pthread_t);
extern void mig_init(int);
__private_extern__ void _init_cpu_capabilities(void);
extern void set_malloc_singlethreaded(int);
extern pthread_lock_t reply_port_lock;
typedef struct _pthread_reap_msg_t {
mach_msg_header_t header;
pthread_t thread;
mach_msg_trailer_t trailer;
} pthread_reap_msg_t;
static struct _pthread _thread = {0};
int __is_threaded = 0;
static int _pthread_count = 1;
int __unix_conforming = 0;
__private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
int _spin_tries = 0;
__private_extern__ void _spin_lock_retry(pthread_lock_t *lock)
{
int tries = _spin_tries;
do {
if (tries-- > 0)
continue;
syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_DEPRESS, 1);
tries = _spin_tries;
} while(!_spin_lock_try(lock));
}
extern mach_port_t thread_recycle_port;
static semaphore_t *sem_pool = NULL;
static int sem_pool_count = 0;
static int sem_pool_current = 0;
static pthread_lock_t sem_pool_lock = LOCK_INITIALIZER;
static int default_priority;
static int max_priority;
static int min_priority;
static int pthread_concurrency;
static void _pthread_exit(pthread_t self, void *value_ptr);
size_t _pthread_stack_size = 0;
#define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
#define STACK_RESERVED (sizeof (struct _pthread))
#define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
#define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
#define STACK_SELF(sp) STACK_START(sp)
#if defined(__ppc__) || defined(__ppc64__)
static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000;
#elif defined(__i386__) || defined(__x86_64__)
static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000;
#else
#error Need to define a stack address hint for this architecture
#endif
static int
_pthread_allocate_stack(pthread_attr_t *attrs, void **stack)
{
kern_return_t kr;
vm_address_t stackaddr;
size_t guardsize;
#if 1
assert(attrs->stacksize >= PTHREAD_STACK_MIN);
if (attrs->stackaddr != NULL) {
assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
*stack = attrs->stackaddr;
return 0;
}
guardsize = attrs->guardsize;
stackaddr = PTHREAD_STACK_HINT;
kr = vm_map(mach_task_self(), &stackaddr,
attrs->stacksize + guardsize,
vm_page_size-1,
VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL,
0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
if (kr != KERN_SUCCESS)
kr = vm_allocate(mach_task_self(),
&stackaddr, attrs->stacksize + guardsize,
VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
if (kr != KERN_SUCCESS) {
return EAGAIN;
}
if (guardsize)
kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE);
*stack = (void *)(stackaddr + attrs->stacksize + guardsize);
#else
vm_address_t cur_stack = (vm_address_t)0;
if (free_stacks == 0)
{
#ifndef NO_GUARD_PAGES
# define GUARD_SIZE(a) (2*(a))
# define GUARD_MASK(a) (((a)<<1) | 1)
#else
# define GUARD_SIZE(a) (a)
# define GUARD_MASK(a) (a)
#endif
while (lowest_stack > GUARD_SIZE(__pthread_stack_size))
{
lowest_stack -= GUARD_SIZE(__pthread_stack_size);
kr = vm_allocate(mach_task_self(),
&lowest_stack,
GUARD_SIZE(__pthread_stack_size),
FALSE);
#ifndef NO_GUARD_PAGES
if (kr == KERN_SUCCESS) {
kr = vm_protect(mach_task_self(),
lowest_stack,
__pthread_stack_size,
FALSE, VM_PROT_NONE);
lowest_stack += __pthread_stack_size;
if (kr == KERN_SUCCESS)
break;
}
#else
if (kr == KERN_SUCCESS)
break;
#endif
}
if (lowest_stack > 0)
free_stacks = (vm_address_t *)lowest_stack;
else
{
kr = vm_map(mach_task_self(), &lowest_stack,
GUARD_SIZE(__pthread_stack_size),
GUARD_MASK(__pthread_stack_mask),
TRUE , MEMORY_OBJECT_NULL,
0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
#ifndef NO_GUARD_PAGES
if (kr == KERN_SUCCESS) {
kr = vm_protect(mach_task_self(),
lowest_stack,
__pthread_stack_size,
FALSE, VM_PROT_NONE);
lowest_stack += __pthread_stack_size;
}
#endif
free_stacks = (vm_address_t *)lowest_stack;
lowest_stack = 0;
}
*free_stacks = 0;
}
cur_stack = STACK_START((vm_address_t) free_stacks);
free_stacks = (vm_address_t *)*free_stacks;
cur_stack = _adjust_sp(cur_stack);
#endif
return 0;
}
static pthread_attr_t _pthread_attr_default = {0};
int
pthread_attr_destroy(pthread_attr_t *attr)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
int
pthread_attr_getdetachstate(const pthread_attr_t *attr,
int *detachstate)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
*detachstate = attr->detached;
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
int
pthread_attr_getinheritsched(const pthread_attr_t *attr,
int *inheritsched)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
*inheritsched = attr->inherit;
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
int
pthread_attr_getschedparam(const pthread_attr_t *attr,
struct sched_param *param)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
*param = attr->param;
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
int
pthread_attr_getschedpolicy(const pthread_attr_t *attr,
int *policy)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
*policy = attr->policy;
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
static const size_t DEFAULT_STACK_SIZE = (512*1024);
int
pthread_attr_init(pthread_attr_t *attr)
{
attr->stacksize = DEFAULT_STACK_SIZE;
attr->stackaddr = NULL;
attr->sig = _PTHREAD_ATTR_SIG;
attr->param.sched_priority = default_priority;
attr->param.quantum = 10;
attr->detached = PTHREAD_CREATE_JOINABLE;
attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED;
attr->policy = _PTHREAD_DEFAULT_POLICY;
attr->freeStackOnExit = TRUE;
attr->guardsize = vm_page_size;
return (ESUCCESS);
}
int
pthread_attr_setdetachstate(pthread_attr_t *attr,
int detachstate)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
if ((detachstate == PTHREAD_CREATE_JOINABLE) ||
(detachstate == PTHREAD_CREATE_DETACHED))
{
attr->detached = detachstate;
return (ESUCCESS);
} else
{
return (EINVAL);
}
} else
{
return (EINVAL);
}
}
int
pthread_attr_setinheritsched(pthread_attr_t *attr,
int inheritsched)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
if ((inheritsched == PTHREAD_INHERIT_SCHED) ||
(inheritsched == PTHREAD_EXPLICIT_SCHED))
{
attr->inherit = inheritsched;
return (ESUCCESS);
} else
{
return (EINVAL);
}
} else
{
return (EINVAL);
}
}
int
pthread_attr_setschedparam(pthread_attr_t *attr,
const struct sched_param *param)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
attr->param = *param;
return (ESUCCESS);
} else
{
return (EINVAL);
}
}
int
pthread_attr_setschedpolicy(pthread_attr_t *attr,
int policy)
{
if (attr->sig == _PTHREAD_ATTR_SIG)
{
if ((policy == SCHED_OTHER) ||
(policy == SCHED_RR) ||
(policy == SCHED_FIFO))
{
attr->policy = policy;
return (ESUCCESS);
} else
{
return (EINVAL);
}
} else
{
return (EINVAL);
}
}
int
pthread_attr_setscope(pthread_attr_t *attr,
int scope)
{
if (attr->sig == _PTHREAD_ATTR_SIG) {
if (scope == PTHREAD_SCOPE_SYSTEM) {
return (ESUCCESS);
} else if (scope == PTHREAD_SCOPE_PROCESS) {
return (ENOTSUP);
}
}
return (EINVAL);
}
int
pthread_attr_getscope(pthread_attr_t *attr,
int *scope)
{
if (attr->sig == _PTHREAD_ATTR_SIG) {
*scope = PTHREAD_SCOPE_SYSTEM;
return (ESUCCESS);
}
return (EINVAL);
}
int
pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
{
if (attr->sig == _PTHREAD_ATTR_SIG) {
*stackaddr = attr->stackaddr;
return (ESUCCESS);
} else {
return (EINVAL);
}
}
int
pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
{
if ((attr->sig == _PTHREAD_ATTR_SIG) && (((uintptr_t)stackaddr % vm_page_size) == 0)) {
attr->stackaddr = stackaddr;
attr->freeStackOnExit = FALSE;
return (ESUCCESS);
} else {
return (EINVAL);
}
}
int
pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
{
if (attr->sig == _PTHREAD_ATTR_SIG) {
*stacksize = attr->stacksize;
return (ESUCCESS);
} else {
return (EINVAL);
}
}
int
pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
{
if ((attr->sig == _PTHREAD_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
attr->stacksize = stacksize;
return (ESUCCESS);
} else {
return (EINVAL);
}
}
int
pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize)
{
if (attr->sig == _PTHREAD_ATTR_SIG) {
*stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize);
*stacksize = attr->stacksize;
return (ESUCCESS);
} else {
return (EINVAL);
}
}
int
pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize)
{
if ((attr->sig == _PTHREAD_ATTR_SIG) &&
(((uintptr_t)stackaddr % vm_page_size) == 0) &&
((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) {
attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize);
attr->stacksize = stacksize;
attr->freeStackOnExit = FALSE;
return (ESUCCESS);
} else {
return (EINVAL);
}
}
int
pthread_attr_setguardsize(pthread_attr_t *attr,
size_t guardsize)
{
if (attr->sig == _PTHREAD_ATTR_SIG) {
if ((guardsize % vm_page_size) == 0) {
attr->guardsize = guardsize;
return (ESUCCESS);
} else
return(EINVAL);
}
return (EINVAL);
}
int
pthread_attr_getguardsize(const pthread_attr_t *attr,
size_t *guardsize)
{
if (attr->sig == _PTHREAD_ATTR_SIG) {
*guardsize = attr->guardsize;
return (ESUCCESS);
}
return (EINVAL);
}
static void
_pthread_body(pthread_t self)
{
_pthread_set_self(self);
_pthread_exit(self, (self->fun)(self->arg));
}
int
_pthread_create(pthread_t t,
const pthread_attr_t *attrs,
void *stack,
const mach_port_t kernel_thread)
{
int res;
res = ESUCCESS;
do
{
memset(t, 0, sizeof(*t));
t->tsd[0] = t;
t->stacksize = attrs->stacksize;
t->stackaddr = (void *)stack;
t->guardsize = attrs->guardsize;
t->kernel_thread = kernel_thread;
t->detached = attrs->detached;
t->inherit = attrs->inherit;
t->policy = attrs->policy;
t->param = attrs->param;
t->freeStackOnExit = attrs->freeStackOnExit;
t->mutexes = (struct _pthread_mutex *)NULL;
t->sig = _PTHREAD_SIG;
t->reply_port = MACH_PORT_NULL;
t->cthread_self = NULL;
LOCK_INIT(t->lock);
t->plist.le_next = (struct _pthread *)0;
t->plist.le_prev = (struct _pthread **)0;
t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL;
t->death = SEMAPHORE_NULL;
if (kernel_thread != MACH_PORT_NULL)
pthread_setschedparam(t, t->policy, &t->param);
} while (0);
return (res);
}
int
_pthread_is_threaded(void)
{
return __is_threaded;
}
int
pthread_is_threaded_np(void)
{
return (__is_threaded);
}
mach_port_t
pthread_mach_thread_np(pthread_t t)
{
thread_t kernel_thread;
while ((kernel_thread = t->kernel_thread) == MACH_PORT_NULL)
sched_yield();
return kernel_thread;
}
size_t
pthread_get_stacksize_np(pthread_t t)
{
return t->stacksize;
}
void *
pthread_get_stackaddr_np(pthread_t t)
{
return t->stackaddr;
}
mach_port_t
_pthread_reply_port(pthread_t t)
{
return t->reply_port;
}
int
pthread_main_np(void)
{
pthread_t self = pthread_self();
return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT);
}
static int
_pthread_create_suspended(pthread_t *thread,
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg,
int suspended)
{
pthread_attr_t *attrs;
void *stack;
int res;
pthread_t t;
kern_return_t kern_res;
mach_port_t kernel_thread = MACH_PORT_NULL;
int needresume;
if ((attrs = (pthread_attr_t *)attr) == (pthread_attr_t *)NULL)
{
attrs = &_pthread_attr_default;
} else if (attrs->sig != _PTHREAD_ATTR_SIG) {
return EINVAL;
}
res = ESUCCESS;
if (((attrs->policy != _PTHREAD_DEFAULT_POLICY) ||
(attrs->param.sched_priority != default_priority)) && (suspended == 0)) {
needresume = 1;
suspended = 1;
} else
needresume = 0;
do
{
if ((res = _pthread_allocate_stack(attrs, &stack)) != 0) {
break;
}
t = (pthread_t)malloc(sizeof(struct _pthread));
*thread = t;
if (suspended) {
PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread), kern_res);
if (kern_res != KERN_SUCCESS)
{
printf("Can't create thread: %d\n", kern_res);
res = EINVAL;
break;
}
}
if ((res = _pthread_create(t, attrs, stack, kernel_thread)) != 0)
{
break;
}
set_malloc_singlethreaded(0);
__is_threaded = 1;
t->arg = arg;
t->fun = start_routine;
LOCK(_pthread_list_lock);
LIST_INSERT_HEAD(&__pthread_head, t, plist);
_pthread_count++;
UNLOCK(_pthread_list_lock);
_pthread_setup(t, _pthread_body, stack, suspended, needresume);
} while (0);
return (res);
}
int
pthread_create(pthread_t *thread,
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg)
{
return _pthread_create_suspended(thread, attr, start_routine, arg, 0);
}
int
pthread_create_suspended_np(pthread_t *thread,
const pthread_attr_t *attr,
void *(*start_routine)(void *),
void *arg)
{
return _pthread_create_suspended(thread, attr, start_routine, arg, 1);
}
int
pthread_detach(pthread_t thread)
{
if (thread->sig == _PTHREAD_SIG)
{
LOCK(thread->lock);
if (thread->detached & PTHREAD_CREATE_JOINABLE)
{
if (thread->detached & _PTHREAD_EXITED) {
UNLOCK(thread->lock);
pthread_join(thread, NULL);
return ESUCCESS;
} else {
semaphore_t death = thread->death;
thread->detached &= ~PTHREAD_CREATE_JOINABLE;
thread->detached |= PTHREAD_CREATE_DETACHED;
UNLOCK(thread->lock);
if (death)
(void) semaphore_signal(death);
return (ESUCCESS);
}
} else {
UNLOCK(thread->lock);
return (EINVAL);
}
} else {
return (ESRCH);
}
}
extern int __pthread_kill(mach_port_t, int);
int
pthread_kill (
pthread_t th,
int sig)
{
int error = 0;
if ((sig < 0) || (sig > NSIG))
return(EINVAL);
if (th && (th->sig == _PTHREAD_SIG)) {
error = __pthread_kill(pthread_mach_thread_np(th), sig);
if (error == -1)
error = errno;
return(error);
}
else
return(ESRCH);
}
static
void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) {
pthread_reap_msg_t msg;
kern_return_t ret;
msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND,
MACH_MSG_TYPE_MOVE_SEND);
msg.header.msgh_size = sizeof msg - sizeof msg.trailer;
msg.header.msgh_remote_port = thread_recycle_port;
msg.header.msgh_local_port = kernel_thread;
msg.header.msgh_id = 0x44454144;
msg.thread = thread;
ret = mach_msg_send(&msg.header);
assert(ret == MACH_MSG_SUCCESS);
}
__private_extern__
int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr) {
mach_port_type_t ptype;
kern_return_t ret;
task_t self;
self = mach_task_self();
if (kernel_thread != MACH_PORT_DEAD) {
ret = mach_port_type(self, kernel_thread, &ptype);
if (ret == KERN_SUCCESS && ptype != MACH_PORT_TYPE_DEAD_NAME) {
return EAGAIN;
}
ret = mach_port_deallocate(self, kernel_thread);
if (ret != KERN_SUCCESS) {
fprintf(stderr,
"mach_port_deallocate(kernel_thread) failed: %s\n",
mach_error_string(ret));
}
}
if (th->reply_port != MACH_PORT_NULL) {
ret = mach_port_mod_refs(self, th->reply_port,
MACH_PORT_RIGHT_RECEIVE, -1);
if (ret != KERN_SUCCESS) {
fprintf(stderr,
"mach_port_mod_refs(reply_port) failed: %s\n",
mach_error_string(ret));
}
}
if (th->freeStackOnExit) {
vm_address_t addr = (vm_address_t)th->stackaddr;
vm_size_t size;
size = (vm_size_t)th->stacksize + th->guardsize;
addr -= size;
ret = vm_deallocate(self, addr, size);
if (ret != KERN_SUCCESS) {
fprintf(stderr,
"vm_deallocate(stack) failed: %s\n",
mach_error_string(ret));
}
}
if (value_ptr)
*value_ptr = th->exit_value;
if (th != &_thread)
free(th);
return ESUCCESS;
}
static
void _pthread_reap_threads(void)
{
pthread_reap_msg_t msg;
kern_return_t ret;
ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
sizeof msg, thread_recycle_port,
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
while (ret == MACH_MSG_SUCCESS) {
mach_port_t kernel_thread = msg.header.msgh_remote_port;
pthread_t thread = msg.thread;
if (_pthread_reap_thread(thread, kernel_thread, (void **)0) == EAGAIN)
{
_pthread_become_available(thread, kernel_thread);
return;
}
ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0,
sizeof msg, thread_recycle_port,
MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
}
}
pthread_t
_pthread_self() {
return pthread_self();
}
static void
_pthread_exit(pthread_t self, void *value_ptr)
{
struct __darwin_pthread_handler_rec *handler;
kern_return_t kern_res;
int thread_count;
syscall(331,1);
while ((handler = self->__cleanup_stack) != 0)
{
(handler->__routine)(handler->__arg);
self->__cleanup_stack = handler->__next;
}
_pthread_tsd_cleanup(self);
_pthread_reap_threads();
LOCK(self->lock);
self->detached |= _PTHREAD_EXITED;
if (self->detached & PTHREAD_CREATE_JOINABLE) {
mach_port_t death = self->death;
self->exit_value = value_ptr;
UNLOCK(self->lock);
if (death) {
PTHREAD_MACH_CALL(semaphore_signal(death), kern_res);
if (kern_res != KERN_SUCCESS)
fprintf(stderr,
"semaphore_signal(death) failed: %s\n",
mach_error_string(kern_res));
}
LOCK(_pthread_list_lock);
thread_count = --_pthread_count;
UNLOCK(_pthread_list_lock);
} else {
UNLOCK(self->lock);
LOCK(_pthread_list_lock);
LIST_REMOVE(self, plist);
thread_count = --_pthread_count;
UNLOCK(_pthread_list_lock);
_pthread_become_available(self, pthread_mach_thread_np(self));
}
if (thread_count <= 0)
exit(0);
PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res);
fprintf(stderr, "thread_terminate(mach_thread_self()) failed: %s\n",
mach_error_string(kern_res));
abort();
}
void
pthread_exit(void *value_ptr)
{
_pthread_exit(pthread_self(), value_ptr);
}
int
pthread_getschedparam(pthread_t thread,
int *policy,
struct sched_param *param)
{
if (thread->sig == _PTHREAD_SIG)
{
*policy = thread->policy;
*param = thread->param;
return (ESUCCESS);
} else
{
return (ESRCH);
}
}
int
pthread_setschedparam(pthread_t thread,
int policy,
const struct sched_param *param)
{
policy_base_data_t bases;
policy_base_t base;
mach_msg_type_number_t count;
kern_return_t ret;
if (thread->sig == _PTHREAD_SIG)
{
switch (policy)
{
case SCHED_OTHER:
bases.ts.base_priority = param->sched_priority;
base = (policy_base_t)&bases.ts;
count = POLICY_TIMESHARE_BASE_COUNT;
break;
case SCHED_FIFO:
bases.fifo.base_priority = param->sched_priority;
base = (policy_base_t)&bases.fifo;
count = POLICY_FIFO_BASE_COUNT;
break;
case SCHED_RR:
bases.rr.base_priority = param->sched_priority;
bases.rr.quantum = param->quantum;
base = (policy_base_t)&bases.rr;
count = POLICY_RR_BASE_COUNT;
break;
default:
return (EINVAL);
}
ret = thread_policy(pthread_mach_thread_np(thread), policy, base, count, TRUE);
if (ret != KERN_SUCCESS)
{
return (EINVAL);
}
thread->policy = policy;
thread->param = *param;
return (ESUCCESS);
} else
{
return (ESRCH);
}
}
int
sched_get_priority_min(int policy)
{
return default_priority - 16;
}
int
sched_get_priority_max(int policy)
{
return default_priority + 16;
}
int
pthread_equal(pthread_t t1,
pthread_t t2)
{
return (t1 == t2);
}
__private_extern__ void
_pthread_set_self(pthread_t p)
{
extern void __pthread_set_self(pthread_t);
if (p == 0) {
bzero(&_thread, sizeof(struct _pthread));
p = &_thread;
}
p->tsd[0] = p;
__pthread_set_self(p);
}
void
cthread_set_self(void *cself)
{
pthread_t self = pthread_self();
if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
_pthread_set_self(cself);
return;
}
self->cthread_self = cself;
}
void *
ur_cthread_self(void) {
pthread_t self = pthread_self();
if ((self == (pthread_t)NULL) || (self->sig != _PTHREAD_SIG)) {
return (void *)self;
}
return self->cthread_self;
}
int
pthread_once(pthread_once_t *once_control,
void (*init_routine)(void))
{
_spin_lock(&once_control->lock);
if (once_control->sig == _PTHREAD_ONCE_SIG_init)
{
(*init_routine)();
once_control->sig = _PTHREAD_ONCE_SIG;
}
_spin_unlock(&once_control->lock);
return (ESUCCESS);
}
__private_extern__ void
_pthread_testcancel(pthread_t thread, int isconforming)
{
LOCK(thread->lock);
if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) ==
(PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
{
UNLOCK(thread->lock);
if (isconforming)
pthread_exit(PTHREAD_CANCELED);
else
pthread_exit(0);
}
UNLOCK(thread->lock);
}
int
pthread_getconcurrency(void)
{
return(pthread_concurrency);
}
int
pthread_setconcurrency(int new_level)
{
pthread_concurrency = new_level;
return(ESUCCESS);
}
static int
pthread_init(void)
{
pthread_attr_t *attrs;
pthread_t thread;
kern_return_t kr;
host_basic_info_data_t basic_info;
host_priority_info_data_t priority_info;
host_info_t info;
host_flavor_t flavor;
host_t host;
mach_msg_type_number_t count;
int mib[2];
size_t len;
int numcpus;
void *stackaddr;
count = HOST_PRIORITY_INFO_COUNT;
info = (host_info_t)&priority_info;
flavor = HOST_PRIORITY_INFO;
host = mach_host_self();
kr = host_info(host, flavor, info, &count);
if (kr != KERN_SUCCESS)
printf("host_info failed (%d); probably need privilege.\n", kr);
else {
default_priority = priority_info.user_priority;
min_priority = priority_info.minimum_priority;
max_priority = priority_info.maximum_priority;
}
attrs = &_pthread_attr_default;
pthread_attr_init(attrs);
LIST_INIT(&__pthread_head);
LOCK_INIT(_pthread_list_lock);
thread = &_thread;
LIST_INSERT_HEAD(&__pthread_head, thread, plist);
_pthread_set_self(thread);
mib[0] = CTL_KERN;
mib[1] = KERN_USRSTACK;
len = sizeof (stackaddr);
if (sysctl (mib, 2, &stackaddr, &len, NULL, 0) != 0)
stackaddr = (void *)USRSTACK;
_pthread_create(thread, attrs, stackaddr, mach_thread_self());
thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT;
mib[0] = CTL_HW;
mib[1] = HW_NCPU;
len = sizeof(numcpus);
if (sysctl(mib, 2, &numcpus, &len, NULL, 0) == 0) {
if (numcpus > 1) {
_spin_tries = MP_SPIN_TRIES;
}
} else {
count = HOST_BASIC_INFO_COUNT;
info = (host_info_t)&basic_info;
flavor = HOST_BASIC_INFO;
kr = host_info(host, flavor, info, &count);
if (kr != KERN_SUCCESS)
printf("host_info failed (%d)\n", kr);
else {
if (basic_info.avail_cpus > 1)
_spin_tries = MP_SPIN_TRIES;
}
}
mach_port_deallocate(mach_task_self(), host);
_init_cpu_capabilities();
#if defined(_OBJC_PAGE_BASE_ADDRESS)
{
vm_address_t objcRTPage = (vm_address_t)_OBJC_PAGE_BASE_ADDRESS;
kr = vm_map(mach_task_self(),
&objcRTPage, vm_page_size * 4, vm_page_size - 1,
VM_FLAGS_FIXED | VM_MAKE_TAG(0), MACH_PORT_NULL,
(vm_address_t)0, FALSE,
(vm_prot_t)0, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
VM_INHERIT_DEFAULT);
}
#endif
mig_init(1);
return 0;
}
int sched_yield(void)
{
swtch_pri(0);
return 0;
}
int (*_cthread_init_routine)(void) = pthread_init;
__private_extern__ semaphore_t new_sem_from_pool(void) {
kern_return_t res;
semaphore_t sem;
int i;
LOCK(sem_pool_lock);
if (sem_pool_current == sem_pool_count) {
sem_pool_count += 16;
sem_pool = realloc(sem_pool, sem_pool_count * sizeof(semaphore_t));
for (i = sem_pool_current; i < sem_pool_count; i++) {
PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool[i], SYNC_POLICY_FIFO, 0), res);
}
}
sem = sem_pool[sem_pool_current++];
UNLOCK(sem_pool_lock);
return sem;
}
__private_extern__ void restore_sem_to_pool(semaphore_t sem) {
LOCK(sem_pool_lock);
sem_pool[--sem_pool_current] = sem;
UNLOCK(sem_pool_lock);
}
static void sem_pool_reset(void) {
LOCK(sem_pool_lock);
sem_pool_count = 0;
sem_pool_current = 0;
sem_pool = NULL;
UNLOCK(sem_pool_lock);
}
__private_extern__ void _pthread_fork_child(pthread_t p) {
UNLOCK(sem_pool_lock);
sem_pool_reset();
LIST_INIT(&__pthread_head);
LOCK_INIT(_pthread_list_lock);
LIST_INSERT_HEAD(&__pthread_head, p, plist);
_pthread_count = 1;
}
#else
extern int __unix_conforming;
extern pthread_lock_t _pthread_list_lock;
extern void _pthread_testcancel(pthread_t thread, int isconforming);
extern int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr);
#endif
#if __DARWIN_UNIX03
static void __posix_join_cleanup(void *arg)
{
pthread_t thread = (pthread_t)arg;
int already_exited, res;
void * dummy;
semaphore_t death;
LOCK(thread->lock);
death = thread->death;
already_exited = (thread->detached & _PTHREAD_EXITED);
if (!already_exited){
thread->joiner = (struct _pthread *)NULL;
UNLOCK(thread->lock);
restore_sem_to_pool(death);
} else {
UNLOCK(thread->lock);
while ((res = _pthread_reap_thread(thread,
thread->kernel_thread,
&dummy)) == EAGAIN)
{
sched_yield();
}
restore_sem_to_pool(death);
}
}
#endif
int
pthread_join(pthread_t thread,
void **value_ptr)
{
kern_return_t kern_res;
int res = ESUCCESS;
#if __DARWIN_UNIX03
if (__unix_conforming == 0)
__unix_conforming = 1;
#endif
if (thread->sig == _PTHREAD_SIG)
{
semaphore_t death = new_sem_from_pool();
LOCK(thread->lock);
if ((thread->detached & PTHREAD_CREATE_JOINABLE) &&
thread->death == SEMAPHORE_NULL)
{
pthread_t self = pthread_self();
assert(thread->joiner == NULL);
if (thread != self && (self == NULL || self->joiner != thread))
{
int already_exited = (thread->detached & _PTHREAD_EXITED);
thread->death = death;
thread->joiner = self;
UNLOCK(thread->lock);
if (!already_exited)
{
#if __DARWIN_UNIX03
pthread_cleanup_push(__posix_join_cleanup, (void *)thread);
do {
res = __semwait_signal(death, 0, 0, 0, 0, 0);
} while ((res < 0) && (errno == EINTR));
pthread_cleanup_pop(0);
#else
do {
PTHREAD_MACH_CALL(semaphore_wait(death), kern_res);
} while (kern_res != KERN_SUCCESS);
#endif
}
#if __DARWIN_UNIX03
else {
if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) == (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING))
res = PTHREAD_CANCELED;
}
#endif
LOCK(_pthread_list_lock);
LIST_REMOVE(thread, plist);
UNLOCK(_pthread_list_lock);
while ((res = _pthread_reap_thread(thread,
thread->kernel_thread,
value_ptr)) == EAGAIN)
{
sched_yield();
}
} else {
UNLOCK(thread->lock);
res = EDEADLK;
}
} else {
UNLOCK(thread->lock);
res = EINVAL;
}
restore_sem_to_pool(death);
return res;
}
return ESRCH;
}
int
pthread_cancel(pthread_t thread)
{
#if __DARWIN_UNIX03
if (__unix_conforming == 0)
__unix_conforming = 1;
#endif
if (thread->sig == _PTHREAD_SIG)
{
#if __DARWIN_UNIX03
int state;
LOCK(thread->lock);
state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
UNLOCK(thread->lock);
if (state & PTHREAD_CANCEL_ENABLE)
__pthread_markcancel(thread->kernel_thread);
#else
thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
#endif
return (ESUCCESS);
} else
{
return (ESRCH);
}
}
void
pthread_testcancel(void)
{
pthread_t self = pthread_self();
#if __DARWIN_UNIX03
if (__unix_conforming == 0)
__unix_conforming = 1;
_pthread_testcancel(self, 1);
#else
_pthread_testcancel(self, 0);
#endif
}
int
pthread_setcancelstate(int state, int *oldstate)
{
pthread_t self = pthread_self();
#if __DARWIN_UNIX03
if (__unix_conforming == 0)
__unix_conforming = 1;
#endif
switch (state) {
case PTHREAD_CANCEL_ENABLE:
#if __DARWIN_UNIX03
__pthread_canceled(1);
#endif
break;
case PTHREAD_CANCEL_DISABLE:
#if __DARWIN_UNIX03
__pthread_canceled(2);
#endif
break;
default:
return EINVAL;
}
self = pthread_self();
LOCK(self->lock);
if (oldstate)
*oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
self->cancel_state |= state;
UNLOCK(self->lock);
#if !__DARWIN_UNIX03
_pthread_testcancel(self, 0);
#endif
return (0);
}
int
pthread_setcanceltype(int type, int *oldtype)
{
pthread_t self = pthread_self();
#if __DARWIN_UNIX03
if (__unix_conforming == 0)
__unix_conforming = 1;
#endif
if ((type != PTHREAD_CANCEL_DEFERRED) &&
(type != PTHREAD_CANCEL_ASYNCHRONOUS))
return EINVAL;
self = pthread_self();
LOCK(self->lock);
if (oldtype)
*oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK;
self->cancel_state |= type;
UNLOCK(self->lock);
#if !__DARWIN_UNIX03
_pthread_testcancel(self, 0);
#endif
return (0);
}