#include <cpus.h>
#include <task_swapper.h>
#include <mach/kern_return.h>
#include <mach/alert.h>
#include <kern/etap_macros.h>
#include <kern/mach_param.h>
#include <kern/zalloc.h>
#include <kern/thread.h>
#include <kern/thread_swap.h>
#include <kern/task.h>
#include <kern/task_swap.h>
#include <kern/thread_act.h>
#include <kern/thread_pool.h>
#include <kern/sched_prim.h>
#include <kern/misc_protos.h>
#include <kern/assert.h>
#include <kern/exception.h>
#include <kern/ipc_mig.h>
#include <kern/ipc_tt.h>
#include <kern/profile.h>
#include <kern/machine.h>
#include <kern/spl.h>
#include <kern/syscall_subr.h>
#include <kern/sync_lock.h>
#include <kern/mk_sp.h>
#include <kern/processor.h>
#include <mach_prof.h>
#include <mach/rpc.h>
#if MACH_ASSERT
unsigned int watchacts = 0
;
#endif
int act_free_swapin = 0;
kern_return_t act_abort( thread_act_t, int);
void special_handler(ReturnHandler *, thread_act_t);
void nudge(thread_act_t);
kern_return_t act_set_state_locked(thread_act_t, int,
thread_state_t,
mach_msg_type_number_t);
kern_return_t act_get_state_locked(thread_act_t, int,
thread_state_t,
mach_msg_type_number_t *);
void act_set_apc(thread_act_t);
void act_clr_apc(thread_act_t);
void act_user_to_kernel(thread_act_t);
void act_ulock_release_all(thread_act_t thr_act);
void install_special_handler_locked(thread_act_t);
static zone_t thr_act_zone;
kern_return_t
thread_terminate_internal(
register thread_act_t thr_act)
{
thread_t thread;
task_t task;
struct ipc_port *iplock;
kern_return_t ret;
#if THREAD_SWAPPER
thread_swap_disable(thr_act);
#endif
thread = act_lock_thread(thr_act);
if (!thr_act->active) {
act_unlock_thread(thr_act);
return(KERN_TERMINATED);
}
act_disable_task_locked(thr_act);
ret = act_abort(thr_act,FALSE);
#if NCPUS > 1
if (thread != current_thread()) {
thread_hold(thr_act);
act_unlock_thread(thr_act);
if (thread_stop_wait(thread))
thread_unstop(thread);
else
ret = KERN_ABORTED;
(void)act_lock_thread(thr_act);
thread_release(thr_act);
}
#endif
act_unlock_thread(thr_act);
return(ret);
}
kern_return_t
thread_terminate(
register thread_act_t thr_act)
{
task_t task;
kern_return_t ret;
if (thr_act == THR_ACT_NULL)
return KERN_INVALID_ARGUMENT;
task = thr_act->task;
if (((task == kernel_task) || (thr_act->kernel_loaded == TRUE))
&& (current_act() != thr_act)) {
return(KERN_FAILURE);
}
task_lock(task);
ret = thread_terminate_internal(thr_act);
task_unlock(task);
if ( ( thr_act->task == kernel_task ||
thr_act->kernel_loaded == TRUE ) &&
current_act() == thr_act ) {
ast_taken(AST_APC, FALSE);
panic("thread_terminate(): returning from ast_taken() for %x kernel activation\n", thr_act);
}
return ret;
}
void
thread_hold(
register thread_act_t thr_act)
{
if (thr_act->suspend_count++ == 0) {
install_special_handler(thr_act);
nudge(thr_act);
}
}
void
thread_release(
register thread_act_t thr_act)
{
if( thr_act->suspend_count &&
(--thr_act->suspend_count == 0) )
nudge( thr_act );
}
kern_return_t
thread_suspend(
register thread_act_t thr_act)
{
thread_t thread;
if (thr_act == THR_ACT_NULL) {
return(KERN_INVALID_ARGUMENT);
}
thread = act_lock_thread(thr_act);
if (!thr_act->active) {
act_unlock_thread(thr_act);
return(KERN_TERMINATED);
}
if (thr_act->user_stop_count++ == 0 &&
thr_act->suspend_count++ == 0 ) {
install_special_handler(thr_act);
if (thread &&
thr_act == thread->top_act && thread != current_thread()) {
nudge(thr_act);
act_unlock_thread(thr_act);
(void)thread_wait(thread);
}
else {
act_unlock_thread(thr_act);
}
}
else {
act_unlock_thread(thr_act);
}
return(KERN_SUCCESS);
}
kern_return_t
thread_resume(
register thread_act_t thr_act)
{
register kern_return_t ret;
spl_t s;
thread_t thread;
if (thr_act == THR_ACT_NULL)
return(KERN_INVALID_ARGUMENT);
thread = act_lock_thread(thr_act);
ret = KERN_SUCCESS;
if (thr_act->active) {
if (thr_act->user_stop_count > 0) {
if( --thr_act->user_stop_count == 0 ) {
--thr_act->suspend_count;
nudge( thr_act );
}
}
else
ret = KERN_FAILURE;
}
else
ret = KERN_TERMINATED;
act_unlock_thread( thr_act );
return ret;
}
kern_return_t
post_alert(
register thread_act_t thr_act,
unsigned alert_bits )
{
thread_act_t next;
thread_t thread;
for (next = thr_act; next != THR_ACT_NULL; next = next->higher) {
next->alerts |= alert_bits;
install_special_handler_locked(next);
}
return(KERN_SUCCESS);
}
kern_return_t
thread_depress_abort(
register thread_act_t thr_act)
{
register thread_t thread;
kern_return_t result;
if (thr_act == THR_ACT_NULL)
return (KERN_INVALID_ARGUMENT);
thread = act_lock_thread(thr_act);
if (!thr_act->active) {
act_unlock_thread(thr_act);
return (KERN_TERMINATED);
}
result = _mk_sp_thread_depress_abort(thread, FALSE);
act_unlock_thread(thr_act);
return (result);
}
kern_return_t
act_abort( thread_act_t thr_act, int chain_break )
{
spl_t spl;
thread_t thread;
struct ipc_port *iplock = thr_act->pool_port;
thread_act_t orphan;
kern_return_t kr;
etap_data_t probe_data;
ETAP_DATA_LOAD(probe_data[0], thr_act);
ETAP_DATA_LOAD(probe_data[1], thr_act->thread);
ETAP_PROBE_DATA(ETAP_P_ACT_ABORT,
0,
current_thread(),
&probe_data,
ETAP_DATA_ENTRY*2);
if ( thr_act->thread->top_act != thr_act ) {
install_special_handler(thr_act);
#ifdef AGRESSIVE_ABORT
if (unwind_invoke_state(thr_act) != KERN_SUCCESS) {
panic("unwind_invoke_state failure");
}
if (thr_act->lower != THR_ACT_NULL) {
if (unwind_invoke_state(thr_act->lower)
!= KERN_SUCCESS) {
panic("unwind_invoke_state failure");
}
}
if ( thr_act->lower == THR_ACT_NULL ) {
thr_act->higher->lower = THR_ACT_NULL;
} else {
thr_act->higher->lower = thr_act->lower;
thr_act->lower->higher = thr_act->higher;
thr_act->lower->alerts |= SERVER_TERMINATED;
}
orphan = thr_act->higher;
act_locked_act_set_thread_pool(thr_act, IP_NULL);
if (iplock != IP_NULL) ip_unlock(iplock);
if (iplock == IP_NULL) act_locked_act_deallocate(thr_act);
assert(thr_act->ref_count == 1);
#else
if (thr_act->lower != THR_ACT_NULL) {
thr_act->lower->alerts |= SERVER_TERMINATED;
}
orphan = thr_act->higher;
#endif
orphan->alerts |= CLIENT_TERMINATED;
post_alert(orphan, ORPHANED);
nudge(thr_act->thread->top_act);
return (KERN_SUCCESS);
}
spl = splsched();
thread_lock(thr_act->thread);
if (thr_act->thread->top_act == thr_act) {
thr_act->thread->state |= TH_ABORT;
clear_wait_internal(thr_act->thread, THREAD_INTERRUPTED);
thread_unlock(thr_act->thread);
splx(spl);
install_special_handler(thr_act);
nudge( thr_act );
}
return KERN_SUCCESS;
}
kern_return_t
thread_abort(
register thread_act_t thr_act)
{
int ret;
thread_t thread;
if (thr_act == THR_ACT_NULL || thr_act == current_act())
return (KERN_INVALID_ARGUMENT);
thread = act_lock_thread(thr_act);
if (!thr_act->active) {
act_unlock_thread(thr_act);
return(KERN_TERMINATED);
}
ret = act_abort( thr_act, FALSE );
act_unlock_thread( thr_act );
return ret;
}
kern_return_t
thread_abort_safely(
register thread_act_t thr_act)
{
thread_t thread;
spl_t s;
if (thr_act == THR_ACT_NULL || thr_act == current_act())
return(KERN_INVALID_ARGUMENT);
thread = act_lock_thread(thr_act);
if (!thr_act->active) {
act_unlock_thread(thr_act);
return(KERN_TERMINATED);
}
if (thread->top_act != thr_act) {
act_unlock_thread(thr_act);
return(KERN_FAILURE);
}
s = splsched();
thread_lock(thread);
if ( thread->at_safe_point ) {
clear_wait_internal(thread, THREAD_INTERRUPTED);
thread_unlock(thread);
act_unlock_thread(thr_act);
splx(s);
return KERN_SUCCESS;
}
thread_unlock(thread);
act_unlock_thread(thr_act);
splx(s);
return KERN_FAILURE;
}
#include <mach/thread_info.h>
#include <mach/thread_special_ports.h>
#include <ipc/ipc_port.h>
#include <mach/thread_act_server.h>
kern_return_t
thread_info(
thread_act_t thr_act,
thread_flavor_t flavor,
thread_info_t thread_info_out,
mach_msg_type_number_t *thread_info_count)
{
register thread_t thread;
kern_return_t result;
if (thr_act == THR_ACT_NULL)
return (KERN_INVALID_ARGUMENT);
thread = act_lock_thread(thr_act);
if (!thr_act->active) {
act_unlock_thread(thr_act);
return (KERN_TERMINATED);
}
result = thread_info_shuttle(thr_act, flavor,
thread_info_out, thread_info_count);
act_unlock_thread(thr_act);
return (result);
}
kern_return_t
thread_get_special_port(
thread_act_t thr_act,
int which,
ipc_port_t *portp)
{
ipc_port_t *whichp;
ipc_port_t port;
thread_t thread;
#if MACH_ASSERT
if (watchacts & WA_PORT)
printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n",
thr_act, which, portp, (portp ? *portp : 0));
#endif
if (!thr_act)
return KERN_INVALID_ARGUMENT;
thread = act_lock_thread(thr_act);
switch (which) {
case THREAD_KERNEL_PORT:
whichp = &thr_act->ith_sself;
break;
default:
act_unlock_thread(thr_act);
return KERN_INVALID_ARGUMENT;
}
if (!thr_act->active) {
act_unlock_thread(thr_act);
return KERN_FAILURE;
}
port = ipc_port_copy_send(*whichp);
act_unlock_thread(thr_act);
*portp = port;
return KERN_SUCCESS;
}
kern_return_t
thread_set_special_port(
thread_act_t thr_act,
int which,
ipc_port_t port)
{
ipc_port_t *whichp;
ipc_port_t old;
thread_t thread;
#if MACH_ASSERT
if (watchacts & WA_PORT)
printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n",
thr_act, which, port);
#endif
if (thr_act == 0)
return KERN_INVALID_ARGUMENT;
thread = act_lock_thread(thr_act);
switch (which) {
case THREAD_KERNEL_PORT:
whichp = &thr_act->ith_self;
break;
default:
act_unlock_thread(thr_act);
return KERN_INVALID_ARGUMENT;
}
if (!thr_act->active) {
act_unlock_thread(thr_act);
return KERN_FAILURE;
}
old = *whichp;
*whichp = port;
act_unlock_thread(thr_act);
if (IP_VALID(old))
ipc_port_release_send(old);
return KERN_SUCCESS;
}
kern_return_t
thread_get_state(
register thread_act_t thr_act,
int flavor,
thread_state_t state,
mach_msg_type_number_t *state_count)
{
kern_return_t ret;
thread_t thread, nthread;
if (thr_act == THR_ACT_NULL || thr_act == current_act())
return (KERN_INVALID_ARGUMENT);
thread = act_lock_thread(thr_act);
if (!thr_act->active) {
act_unlock_thread(thr_act);
return(KERN_TERMINATED);
}
thread_hold(thr_act);
while (1) {
if (!thread || thr_act != thread->top_act)
break;
act_unlock_thread(thr_act);
(void)thread_stop_wait(thread);
nthread = act_lock_thread(thr_act);
if (nthread == thread)
break;
thread_unstop(thread);
thread = nthread;
}
ret = act_machine_get_state(thr_act, flavor,
state, state_count);
if (thread && thr_act == thread->top_act)
thread_unstop(thread);
thread_release(thr_act);
act_unlock_thread(thr_act);
return(ret);
}
kern_return_t
thread_set_state(
register thread_act_t thr_act,
int flavor,
thread_state_t state,
mach_msg_type_number_t state_count)
{
kern_return_t ret;
thread_t thread, nthread;
if (thr_act == THR_ACT_NULL || thr_act == current_act())
return (KERN_INVALID_ARGUMENT);
thread = act_lock_thread(thr_act);
if (!thr_act->active) {
act_unlock_thread(thr_act);
return(KERN_TERMINATED);
}
thread_hold(thr_act);
while (1) {
if (!thread || thr_act != thread->top_act)
break;
act_unlock_thread(thr_act);
(void)thread_stop_wait(thread);
nthread = act_lock_thread(thr_act);
if (nthread == thread)
break;
thread_unstop(thread);
thread = nthread;
}
ret = act_machine_set_state(thr_act, flavor,
state, state_count);
if (thread && thr_act == thread->top_act)
thread_unstop(thread);
thread_release(thr_act);
act_unlock_thread(thr_act);
return(ret);
}
kern_return_t
thread_dup(
thread_act_t source_thr_act,
thread_act_t target_thr_act)
{
kern_return_t ret;
thread_t thread, nthread;
if (target_thr_act == THR_ACT_NULL || target_thr_act == current_act())
return (KERN_INVALID_ARGUMENT);
thread = act_lock_thread(target_thr_act);
if (!target_thr_act->active) {
act_unlock_thread(target_thr_act);
return(KERN_TERMINATED);
}
thread_hold(target_thr_act);
while (1) {
if (!thread || target_thr_act != thread->top_act)
break;
act_unlock_thread(target_thr_act);
(void)thread_stop_wait(thread);
nthread = act_lock_thread(target_thr_act);
if (nthread == thread)
break;
thread_unstop(thread);
thread = nthread;
}
ret = act_thread_dup(source_thr_act, target_thr_act);
if (thread && target_thr_act == thread->top_act)
thread_unstop(thread);
thread_release(target_thr_act);
act_unlock_thread(target_thr_act);
return(ret);
}
kern_return_t
thread_setstatus(
thread_act_t thr_act,
int flavor,
thread_state_t tstate,
mach_msg_type_number_t count)
{
kern_return_t kr;
thread_t thread;
thread = act_lock_thread(thr_act);
assert(thread);
assert(thread->top_act == thr_act);
kr = act_machine_set_state(thr_act, flavor, tstate, count);
act_unlock_thread(thr_act);
return(kr);
}
kern_return_t
thread_getstatus(
thread_act_t thr_act,
int flavor,
thread_state_t tstate,
mach_msg_type_number_t *count)
{
kern_return_t kr;
thread_t thread;
thread = act_lock_thread(thr_act);
assert(thread);
assert(thread->top_act == thr_act);
kr = act_machine_get_state(thr_act, flavor, tstate, count);
act_unlock_thread(thr_act);
return(kr);
}
void
act_init()
{
thr_act_zone = zinit(
sizeof(struct thread_activation),
ACT_MAX * sizeof(struct thread_activation),
ACT_CHUNK * sizeof(struct thread_activation),
"activations");
act_machine_init();
}
kern_return_t
act_create(task_t task,
thread_act_t *new_act)
{
thread_act_t thr_act;
int rc;
vm_map_t map;
thr_act = (thread_act_t)zalloc(thr_act_zone);
if (thr_act == 0)
return(KERN_RESOURCE_SHORTAGE);
#if MACH_ASSERT
if (watchacts & WA_ACT_LNK)
printf("act_create(task=%x,thr_act@%x=%x)\n",
task, new_act, thr_act);
#endif
bzero((char *)thr_act, sizeof(*thr_act));
#ifdef MACH_BSD
{
extern void *uthread_alloc(void);
thr_act->uthread = uthread_alloc();
if(thr_act->uthread == 0) {
zfree(thr_act_zone, (vm_offset_t)thr_act);
return(KERN_RESOURCE_SHORTAGE);
}
}
#endif
act_lock_init(thr_act);
thr_act->ref_count = 2;
thr_act->task = task;
task_reference(task);
thr_act->r_sigbufp = (routine_descriptor_t) &thr_act->r_sigbuf;
thr_act->r_sigbuf_size = sizeof(thr_act->r_sigbuf);
#if THREAD_SWAPPER
thr_act->swap_state = TH_SW_IN;
#if MACH_ASSERT
thr_act->kernel_stack_swapped_in = TRUE;
#endif
#endif
thr_act->special_handler.next = 0;
thr_act->special_handler.handler = special_handler;
#if MACH_PROF
thr_act->act_profiled = FALSE;
thr_act->act_profiled_own = FALSE;
thr_act->profil_buffer = NULLPROFDATA;
#endif
queue_init(&thr_act->held_ulocks);
act_prof_init(thr_act, task);
ipc_thr_act_init(task, thr_act);
act_machine_create(task, thr_act);
if (task->kernel_loaded) {
act_user_to_kernel(thr_act);
}
map = task->map;
thr_act->map = map;
mutex_lock(&map->s_lock);
#if TASK_SWAPPER
assert(map->res_count > 0);
assert(map->ref_count >= map->res_count);
#endif
map->ref_count++;
mutex_unlock(&map->s_lock);
*new_act = thr_act;
return KERN_SUCCESS;
}
#if MACH_ASSERT
int dangerous_bzero = 1;
#endif
void
act_free(thread_act_t thr_act)
{
task_t task;
thread_t thr;
vm_map_t map;
unsigned int ref;
#if MACH_ASSERT
if (watchacts & WA_EXIT)
printf("act_free(%x(%d)) thr=%x tsk=%x(%d) pport=%x%sactive\n",
thr_act, thr_act->ref_count, thr_act->thread,
thr_act->task,
thr_act->task ? thr_act->task->ref_count : 0,
thr_act->pool_port,
thr_act->active ? " " : " !");
#endif
#if THREAD_SWAPPER
assert(thr_act->kernel_stack_swapped_in);
#endif
assert(!thr_act->active);
assert(!thr_act->pool_port);
task = thr_act->task;
task_lock(task);
if (thr = thr_act->thread) {
time_value_t user_time, system_time;
thread_read_times(thr, &user_time, &system_time);
time_value_add(&task->total_user_time, &user_time);
time_value_add(&task->total_system_time, &system_time);
queue_remove(&task->thr_acts, thr_act, thread_act_t, thr_acts);
thr_act->thr_acts.next = NULL;
task->thr_act_count--;
#if THREAD_SWAPPER
assert(thr_act->swap_state == TH_SW_UNSWAPPABLE ||
!thread_swap_unwire_stack);
#endif
task->res_act_count--;
task_unlock(task);
task_deallocate(task);
thread_deallocate(thr);
act_machine_destroy(thr_act);
} else {
task_unlock(task);
task_deallocate(task);
}
sigbuf_dealloc(thr_act);
act_prof_deallocate(thr_act);
ipc_thr_act_terminate(thr_act);
map = thr_act->map;
mutex_lock(&map->s_lock);
#if TASK_SWAPPER
assert(map->res_count >= 0);
assert(map->ref_count > map->res_count);
#endif
ref = --map->ref_count;
mutex_unlock(&map->s_lock);
if (ref == 0)
vm_map_destroy(map);
#ifdef MACH_BSD
{
extern void uthread_free(void *);
void *ut = thr_act->uthread;
thr_act->uthread = 0;
uthread_free(ut);
}
#endif
#if MACH_ASSERT
if (dangerous_bzero)
bzero((char *)thr_act, sizeof(*thr_act));
#endif
zfree(thr_act_zone, (vm_offset_t)thr_act);
}
void
act_attach(
thread_act_t thr_act,
thread_t thread,
unsigned init_alert_mask)
{
thread_act_t lower;
#if MACH_ASSERT
assert(thread == current_thread() || thread->top_act == THR_ACT_NULL);
if (watchacts & WA_ACT_LNK)
printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n",
thr_act, thr_act->ref_count, thread, thread->ref_count,
init_alert_mask);
#endif
thr_act->ref_count++;
thr_act->thread = thread;
thr_act->higher = THR_ACT_NULL;
thr_act->alerts = 0;
thr_act->alert_mask = init_alert_mask;
lower = thr_act->lower = thread->top_act;
if (lower != THR_ACT_NULL) {
lower->higher = thr_act;
thr_act->alerts = (lower->alerts & init_alert_mask);
}
thread->top_act = thr_act;
}
void
act_detach(
thread_act_t cur_act)
{
thread_t cur_thread = cur_act->thread;
#if MACH_ASSERT
if (watchacts & (WA_EXIT|WA_ACT_LNK))
printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n",
cur_act, cur_act->ref_count,
cur_thread, cur_thread->ref_count,
cur_act->task,
cur_act->task ? cur_act->task->ref_count : 0);
#endif
cur_thread->top_act = cur_act->lower;
cur_act->thread = 0;
cur_act->ref_count--;
assert(cur_act->ref_count > 0);
thread_pool_put_act(cur_act);
#if MACH_ASSERT
cur_act->lower = cur_act->higher = THR_ACT_NULL;
if (cur_thread->top_act)
cur_thread->top_act->higher = THR_ACT_NULL;
#endif
return;
}
thread_t
act_lock_thread(
thread_act_t thr_act)
{
ipc_port_t pport;
while (1) {
act_lock(thr_act);
pport = thr_act->pool_port;
if (!pport || ip_lock_try(pport)) {
if (!thr_act->thread)
break;
if (rpc_lock_try(thr_act->thread))
break;
if (pport)
ip_unlock(pport);
}
act_unlock(thr_act);
mutex_pause();
}
return (thr_act->thread);
}
void
act_unlock_thread(thread_act_t thr_act)
{
if (thr_act->thread)
rpc_unlock(thr_act->thread);
if (thr_act->pool_port)
ip_unlock(thr_act->pool_port);
act_unlock(thr_act);
}
thread_act_t
thread_lock_act(
thread_t thread)
{
thread_act_t thr_act;
while (1) {
rpc_lock(thread);
thr_act = thread->top_act;
if (!thr_act)
break;
if (!act_lock_try(thr_act)) {
rpc_unlock(thread);
mutex_pause();
continue;
}
if (thr_act->pool_port &&
!ip_lock_try(thr_act->pool_port)) {
rpc_unlock(thread);
act_unlock(thr_act);
mutex_pause();
continue;
}
break;
}
return (thr_act);
}
void
thread_unlock_act(
thread_t thread)
{
thread_act_t thr_act;
if (thr_act = thread->top_act) {
if (thr_act->pool_port)
ip_unlock(thr_act->pool_port);
act_unlock(thr_act);
}
rpc_unlock(thread);
}
thread_act_t
switch_act(
thread_act_t act)
{
thread_t thread;
thread_act_t old, new;
unsigned cpu;
spl_t spl;
disable_preemption();
cpu = cpu_number();
thread = current_thread();
old = thread->top_act;
if (act) {
new = act;
new->thread = thread;
}
else {
new = old->lower;
}
assert(new != THR_ACT_NULL);
#if THREAD_SWAPPER
assert(new->swap_state != TH_SW_OUT &&
new->swap_state != TH_SW_COMING_IN);
#endif
assert(cpu_data[cpu].active_thread == thread);
active_kloaded[cpu] = (new->kernel_loaded) ? new : 0;
machine_switch_act(thread, old, new, cpu);
if (act) {
act_attach(new, thread, 0);
}
else {
act_detach(old);
}
enable_preemption();
return(old);
}
void
install_special_handler(
thread_act_t thr_act)
{
spl_t spl;
thread_t thread = thr_act->thread;
#if MACH_ASSERT
if (watchacts & WA_ACT_HDLR)
printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act);
#endif
spl = splsched();
thread_lock(thread);
install_special_handler_locked(thr_act);
thread_unlock(thread);
splx(spl);
}
void
install_special_handler_locked(
thread_act_t thr_act)
{
ReturnHandler **rh;
thread_t thread = thr_act->thread;
for (rh = &thr_act->handlers; *rh; rh = &(*rh)->next)
;
if (rh != &thr_act->special_handler.next) {
*rh = &thr_act->special_handler;
}
if (thread && thr_act == thread->top_act) {
if (thread->depress_priority >= 0) {
thread->priority = thread->depress_priority;
thread->depress_priority = -2;
compute_priority(thread, FALSE);
}
}
act_set_apc(thr_act);
}
extern thread_apc_handler_t bsd_ast;
kern_return_t
thread_apc_set(
thread_act_t thr_act,
thread_apc_handler_t apc)
{
assert(apc == bsd_ast);
thread_ast_set(thr_act, AST_BSD);
if (thr_act == current_act())
ast_propagate(thr_act->ast);
return KERN_SUCCESS;
}
kern_return_t
thread_apc_clear(
thread_act_t thr_act,
thread_apc_handler_t apc)
{
assert(apc == bsd_ast);
thread_ast_clear(thr_act, AST_BSD);
if (thr_act == current_act())
ast_off(AST_BSD);
return KERN_SUCCESS;
}
kern_return_t act_set_thread_pool(
thread_act_t thr_act,
ipc_port_t pool_port)
{
thread_pool_t thread_pool;
#if MACH_ASSERT
if (watchacts & WA_ACT_LNK)
printf("act_set_thread_pool: %x(%d) -> %x\n",
thr_act, thr_act->ref_count, thread_pool);
#endif
if (pool_port == 0) {
thread_act_t *lact;
if (thr_act->pool_port == 0)
return KERN_SUCCESS;
thread_pool = &thr_act->pool_port->ip_thread_pool;
for (lact = &thread_pool->thr_acts; *lact;
lact = &((*lact)->thread_pool_next)) {
if (thr_act == *lact) {
*lact = thr_act->thread_pool_next;
break;
}
}
act_lock(thr_act);
thr_act->pool_port = 0;
thr_act->thread_pool_next = 0;
act_unlock(thr_act);
act_deallocate(thr_act);
return KERN_SUCCESS;
}
if (thr_act->pool_port != pool_port) {
thread_pool = &pool_port->ip_thread_pool;
if (thr_act->pool_port != 0) {
#if MACH_ASSERT
if (watchacts & WA_ACT_LNK)
printf("act_set_thread_pool found %x!\n",
thr_act->pool_port);
#endif
return(KERN_FAILURE);
}
act_lock(thr_act);
thr_act->pool_port = pool_port;
act_locked_act_reference(thr_act);
if ((thr_act->thread == 0) && (thr_act->suspend_count == 0)) {
thr_act->thread_pool_next = thread_pool->thr_acts;
pool_port->ip_thread_pool.thr_acts = thr_act;
if (thread_pool->waiting)
thread_pool_wakeup(thread_pool);
}
act_unlock(thr_act);
}
return KERN_SUCCESS;
}
kern_return_t act_locked_act_set_thread_pool(
thread_act_t thr_act,
ipc_port_t pool_port)
{
thread_pool_t thread_pool;
#if MACH_ASSERT
if (watchacts & WA_ACT_LNK)
printf("act_set_thread_pool: %x(%d) -> %x\n",
thr_act, thr_act->ref_count, thread_pool);
#endif
if (pool_port == 0) {
thread_act_t *lact;
if (thr_act->pool_port == 0)
return KERN_SUCCESS;
thread_pool = &thr_act->pool_port->ip_thread_pool;
for (lact = &thread_pool->thr_acts; *lact;
lact = &((*lact)->thread_pool_next)) {
if (thr_act == *lact) {
*lact = thr_act->thread_pool_next;
break;
}
}
thr_act->pool_port = 0;
thr_act->thread_pool_next = 0;
act_locked_act_deallocate(thr_act);
return KERN_SUCCESS;
}
if (thr_act->pool_port != pool_port) {
thread_pool = &pool_port->ip_thread_pool;
if (thr_act->pool_port != 0) {
#if MACH_ASSERT
if (watchacts & WA_ACT_LNK)
printf("act_set_thread_pool found %x!\n",
thr_act->pool_port);
#endif
return(KERN_FAILURE);
}
thr_act->pool_port = pool_port;
act_locked_act_reference(thr_act);
if ((thr_act->thread == 0) && (thr_act->suspend_count == 0)) {
thr_act->thread_pool_next = thread_pool->thr_acts;
pool_port->ip_thread_pool.thr_acts = thr_act;
if (thread_pool->waiting)
thread_pool_wakeup(thread_pool);
}
}
return KERN_SUCCESS;
}
void act_execute_returnhandlers(
void)
{
spl_t s;
thread_t thread;
thread_act_t thr_act = current_act();
#if MACH_ASSERT
if (watchacts & WA_ACT_HDLR)
printf("execute_rtn_hdlrs: thr_act=%x\n", thr_act);
#endif
s = splsched();
act_clr_apc(thr_act);
spllo();
while (1) {
ReturnHandler *rh;
thread = act_lock_thread(thr_act);
(void)splsched();
thread_lock(thread);
rh = thr_act->handlers;
if (!rh) {
thread_unlock(thread);
splx(s);
act_unlock_thread(thr_act);
return;
}
thr_act->handlers = rh->next;
thread_unlock(thread);
spllo();
act_unlock_thread(thr_act);
#if MACH_ASSERT
if (watchacts & WA_ACT_HDLR)
printf( (rh == &thr_act->special_handler) ?
"\tspecial_handler\n" : "\thandler=%x\n",
rh->handler);
#endif
(*rh->handler)(rh, thr_act);
}
}
void
special_handler_continue(void)
{
thread_act_t cur_act = current_act();
thread_t thread = cur_act->thread;
spl_t s;
if (cur_act->suspend_count)
install_special_handler(cur_act);
else {
s = splsched();
thread_lock(thread);
if (thread->depress_priority == -2) {
thread->depress_priority = thread->priority;
thread->priority = thread->sched_pri = DEPRESSPRI;
}
thread_unlock(thread);
splx(s);
}
thread_exception_return();
}
void
special_handler(
ReturnHandler *rh,
thread_act_t cur_act)
{
spl_t s;
thread_t lthread;
thread_t thread = act_lock_thread(cur_act);
unsigned alert_bits;
exception_data_type_t
codes[EXCEPTION_CODE_MAX];
kern_return_t kr;
kern_return_t exc_kr;
assert(thread != THREAD_NULL);
#if MACH_ASSERT
if (watchacts & WA_ACT_HDLR)
printf("\t\tspecial_handler(thr_act=%x(%d))\n", cur_act,
(cur_act ? cur_act->ref_count : 0));
#endif
s = splsched();
thread_lock(thread);
thread->state &= ~TH_ABORT;
thread_unlock(thread);
splx(s);
if (!cur_act->active) {
act_unlock_thread(cur_act);
act_machine_return(KERN_TERMINATED);
}
#ifdef CALLOUT_RPC_MODEL
alert_bits = cur_act->alerts & (~SERVER_TERMINATED);
cur_act->alerts &= ~SERVER_TERMINATED;
if ( alert_bits ) {
act_unlock_thread(cur_act);
codes[0] = alert_bits;
exc_kr = exception( EXC_RPC_ALERT, codes, 1 );
cur_act->alerts &= ~(ORPHANED | TIME_CONSTRAINT_UNSATISFIED);
if (exc_kr == KERN_RPC_TERMINATE_ORPHAN) {
#if THREAD_SWAPPER
thread_swap_disable(cur_act);
#endif
task_lock(cur_act->task);
act_lock_thread(cur_act);
kr = act_disable_task_locked(cur_act);
assert( kr == KERN_SUCCESS );
task_unlock(cur_act->task);
}
else {
act_lock_thread(cur_act);
s = splsched();
thread_lock(thread);
if (thread->depress_priority == -2) {
thread->depress_priority = thread->priority;
thread->priority = thread->sched_pri = DEPRESSPRI;
}
thread_unlock(thread);
splx(s);
}
}
#endif
if (cur_act->suspend_count) {
if( cur_act->handlers == NULL ) {
assert_wait((event_t)&cur_act->suspend_count,
THREAD_ABORTSAFE);
act_unlock_thread(cur_act);
thread_block(special_handler_continue);
}
special_handler_continue();
}
act_unlock_thread(cur_act);
}
void
nudge(thread_act_t thr_act)
{
#if MACH_ASSERT
if (watchacts & WA_ACT_HDLR)
printf("\tact_%x: nudge(%x)\n", current_act(), thr_act);
#endif
if (thr_act->thread && thr_act->thread->top_act == thr_act) {
thread_wakeup((event_t)&thr_act->suspend_count);
}
}
void
act_user_to_kernel(
thread_act_t thr_act)
{
pcb_user_to_kernel(thr_act);
thr_act->kernel_loading = TRUE;
}
kern_return_t
act_disable_task_locked(
thread_act_t thr_act)
{
thread_t thread = thr_act->thread;
task_t task = thr_act->task;
#if MACH_ASSERT
if (watchacts & WA_EXIT) {
printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive task=%x(%d)",
current_act(), thr_act, thr_act->ref_count,
(thr_act->active ? " " : " !"),
thr_act->task, thr_act->task? thr_act->task->ref_count : 0);
if (thr_act->pool_port)
printf(", pool_port %x", thr_act->pool_port);
printf("\n");
(void) dump_act(thr_act);
}
#endif
thr_act->active = 0;
ipc_thr_act_disable(thr_act);
act_ulock_release_all(thr_act);
install_special_handler(thr_act);
if (thr_act->suspend_count)
nudge(thr_act);
act_locked_act_deallocate(thr_act);
return(KERN_SUCCESS);
}
kern_return_t
act_alert(thread_act_t thr_act, unsigned alerts)
{
thread_t thread = act_lock_thread(thr_act);
#if MACH_ASSERT
if (watchacts & WA_ACT_LNK)
printf("act_alert %x: %x\n", thr_act, alerts);
#endif
if (thread) {
thread_act_t act_up = thr_act;
while ((alerts) && (act_up != thread->top_act)) {
act_up = act_up->higher;
alerts &= act_up->alert_mask;
act_up->alerts |= alerts;
}
}
act_unlock_thread(thr_act);
return KERN_SUCCESS;
}
kern_return_t act_alert_mask(thread_act_t thr_act, unsigned alert_mask)
{
panic("act_alert_mask NOT YET IMPLEMENTED\n");
return KERN_SUCCESS;
}
typedef struct GetSetState {
struct ReturnHandler rh;
int flavor;
void *state;
int *pcount;
int result;
} GetSetState;
kern_return_t get_set_state(
thread_act_t thr_act, int flavor,
thread_state_t state, int *pcount,
void (*handler)(ReturnHandler *rh, thread_act_t thr_act));
void get_state_handler(ReturnHandler *rh, thread_act_t thr_act);
void set_state_handler(ReturnHandler *rh, thread_act_t thr_act);
kern_return_t
get_set_state(thread_act_t thr_act, int flavor, thread_state_t state, int *pcount,
void (*handler)(ReturnHandler *rh, thread_act_t thr_act))
{
GetSetState gss;
spl_t s;
gss.rh.handler = handler;
gss.flavor = flavor;
gss.state = state;
gss.pcount = pcount;
gss.result = KERN_ABORTED;
gss.rh.next = thr_act->handlers;
thr_act->handlers = &gss.rh;
s = splsched();
act_set_apc(thr_act);
splx(s);
#if MACH_ASSERT
if (watchacts & WA_ACT_HDLR) {
printf("act_%x: get_set_state(thr_act=%x flv=%x state=%x ptr@%x=%x)",
current_act(), thr_act, flavor, state,
pcount, (pcount ? *pcount : 0));
printf((handler == get_state_handler ? "get_state_hdlr\n" :
(handler == set_state_handler ? "set_state_hdlr\n" :
"hndler=%x\n")), handler);
}
#endif
assert(thr_act->thread);
assert(thr_act != current_act());
for (;;) {
nudge(thr_act);
assert_wait((event_t)&gss, THREAD_ABORTSAFE);
act_unlock_thread(thr_act);
thread_block((void (*)(void))0);
if (gss.result != KERN_ABORTED)
break;
if (current_act()->handlers)
act_execute_returnhandlers();
act_lock_thread(thr_act);
}
#if MACH_ASSERT
if (watchacts & WA_ACT_HDLR)
printf("act_%x: get_set_state returns %x\n",
current_act(), gss.result);
#endif
return gss.result;
}
void
set_state_handler(ReturnHandler *rh, thread_act_t thr_act)
{
GetSetState *gss = (GetSetState*)rh;
#if MACH_ASSERT
if (watchacts & WA_ACT_HDLR)
printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n",
current_act(), rh, thr_act);
#endif
gss->result = act_machine_set_state(thr_act, gss->flavor,
gss->state, *gss->pcount);
thread_wakeup((event_t)gss);
}
void
get_state_handler(ReturnHandler *rh, thread_act_t thr_act)
{
GetSetState *gss = (GetSetState*)rh;
#if MACH_ASSERT
if (watchacts & WA_ACT_HDLR)
printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n",
current_act(), rh, thr_act);
#endif
gss->result = act_machine_get_state(thr_act, gss->flavor,
gss->state,
(mach_msg_type_number_t *) gss->pcount);
thread_wakeup((event_t)gss);
}
kern_return_t
act_get_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
mach_msg_type_number_t *pcount)
{
#if MACH_ASSERT
if (watchacts & WA_ACT_HDLR)
printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
current_act(), thr_act, flavor, state, pcount,
(pcount? *pcount : 0));
#endif
return(get_set_state(thr_act, flavor, state, (int*)pcount, get_state_handler));
}
kern_return_t
act_set_state_locked(thread_act_t thr_act, int flavor, thread_state_t state,
mach_msg_type_number_t count)
{
#if MACH_ASSERT
if (watchacts & WA_ACT_HDLR)
printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n",
current_act(), thr_act, flavor, state, count, count);
#endif
return(get_set_state(thr_act, flavor, state, (int*)&count, set_state_handler));
}
kern_return_t
act_set_state(thread_act_t thr_act, int flavor, thread_state_t state,
mach_msg_type_number_t count)
{
if (thr_act == THR_ACT_NULL || thr_act == current_act())
return(KERN_INVALID_ARGUMENT);
act_lock_thread(thr_act);
return(act_set_state_locked(thr_act, flavor, state, count));
}
kern_return_t
act_get_state(thread_act_t thr_act, int flavor, thread_state_t state,
mach_msg_type_number_t *pcount)
{
if (thr_act == THR_ACT_NULL || thr_act == current_act())
return(KERN_INVALID_ARGUMENT);
act_lock_thread(thr_act);
return(act_get_state_locked(thr_act, flavor, state, pcount));
}
void
act_set_apc(thread_act_t thr_act)
{
processor_t prssr;
thread_t thread;
mp_disable_preemption();
thread_ast_set(thr_act, AST_APC);
if (thr_act == current_act()) {
ast_propagate(thr_act->ast);
mp_enable_preemption();
return;
}
thread = thr_act->thread;
prssr = thread->last_processor;
if(prssr && (cpu_data[prssr->slot_num].active_thread == thread)) {
cause_ast_check(prssr);
}
mp_enable_preemption();
}
void
act_clr_apc(thread_act_t thr_act)
{
thread_ast_clear(thr_act, AST_APC);
}
void
act_ulock_release_all(thread_act_t thr_act)
{
ulock_t ulock;
while (!queue_empty(&thr_act->held_ulocks)) {
ulock = (ulock_t) queue_first(&thr_act->held_ulocks);
(void) lock_make_unstable(ulock, thr_act);
(void) lock_release_internal(ulock, thr_act);
}
}
#undef current_act
thread_act_t
current_act(void)
{
return(current_act_fast());
}
thread_act_t
thread_self(void)
{
thread_act_t self = current_act_fast();
act_reference(self);
return self;
}
thread_act_t
mach_thread_self(void)
{
thread_act_t self = current_act_fast();
act_reference(self);
return self;
}
#undef act_reference
void
act_reference(
thread_act_t thr_act)
{
act_reference_fast(thr_act);
}
#undef act_deallocate
void
act_deallocate(
thread_act_t thr_act)
{
act_deallocate_fast(thr_act);
}