#include <mach/mach_types.h>
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/mach_param.h>
#include <mach/task_special_ports.h>
#include <mach/thread_special_ports.h>
#include <mach/thread_status.h>
#include <mach/exception_types.h>
#include <mach/memory_object_types.h>
#include <mach/mach_traps.h>
#include <mach/task_server.h>
#include <mach/thread_act_server.h>
#include <mach/mach_host_server.h>
#include <mach/host_priv_server.h>
#include <mach/vm_map_server.h>
#include <kern/kern_types.h>
#include <kern/host.h>
#include <kern/ipc_kobject.h>
#include <kern/ipc_tt.h>
#include <kern/kalloc.h>
#include <kern/thread.h>
#include <kern/misc_protos.h>
#include <vm/vm_map.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
#include <security/mac_mach_internal.h>
task_t convert_port_to_locked_task(ipc_port_t port);
void
ipc_task_init(
task_t task,
task_t parent)
{
ipc_space_t space;
ipc_port_t kport;
ipc_port_t nport;
kern_return_t kr;
int i;
kr = ipc_space_create(&ipc_table_entries[0], &space);
if (kr != KERN_SUCCESS)
panic("ipc_task_init");
space->is_task = task;
kport = ipc_port_alloc_kernel();
if (kport == IP_NULL)
panic("ipc_task_init");
nport = ipc_port_alloc_kernel();
if (nport == IP_NULL)
panic("ipc_task_init");
itk_lock_init(task);
task->itk_self = kport;
task->itk_nself = nport;
task->itk_sself = ipc_port_make_send(kport);
task->itk_space = space;
space->is_fast = FALSE;
#if CONFIG_MACF_MACH
if (parent)
mac_task_label_associate(parent, task, &parent->maclabel,
&task->maclabel, &kport->ip_label);
else
mac_task_label_associate_kernel(task, &task->maclabel, &kport->ip_label);
#endif
if (parent == TASK_NULL) {
ipc_port_t port;
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
task->exc_actions[i].port = IP_NULL;
}
kr = host_get_host_port(host_priv_self(), &port);
assert(kr == KERN_SUCCESS);
task->itk_host = port;
task->itk_bootstrap = IP_NULL;
task->itk_seatbelt = IP_NULL;
task->itk_gssd = IP_NULL;
task->itk_automountd = IP_NULL;
task->itk_task_access = IP_NULL;
for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
task->itk_registered[i] = IP_NULL;
} else {
itk_lock(parent);
assert(parent->itk_self != IP_NULL);
for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
task->itk_registered[i] =
ipc_port_copy_send(parent->itk_registered[i]);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
task->exc_actions[i].port =
ipc_port_copy_send(parent->exc_actions[i].port);
task->exc_actions[i].flavor =
parent->exc_actions[i].flavor;
task->exc_actions[i].behavior =
parent->exc_actions[i].behavior;
task->exc_actions[i].privileged =
parent->exc_actions[i].privileged;
}
task->itk_host =
ipc_port_copy_send(parent->itk_host);
task->itk_bootstrap =
ipc_port_copy_send(parent->itk_bootstrap);
task->itk_seatbelt =
ipc_port_copy_send(parent->itk_seatbelt);
task->itk_gssd =
ipc_port_copy_send(parent->itk_gssd);
task->itk_automountd =
ipc_port_copy_send(parent->itk_automountd);
task->itk_task_access =
ipc_port_copy_send(parent->itk_task_access);
itk_unlock(parent);
}
}
void
ipc_task_enable(
task_t task)
{
ipc_port_t kport;
ipc_port_t nport;
itk_lock(task);
kport = task->itk_self;
if (kport != IP_NULL)
ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
nport = task->itk_nself;
if (nport != IP_NULL)
ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
itk_unlock(task);
}
void
ipc_task_disable(
task_t task)
{
ipc_port_t kport;
ipc_port_t nport;
itk_lock(task);
kport = task->itk_self;
if (kport != IP_NULL)
ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
nport = task->itk_nself;
if (nport != IP_NULL)
ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
itk_unlock(task);
}
void
ipc_task_terminate(
task_t task)
{
ipc_port_t kport;
ipc_port_t nport;
int i;
itk_lock(task);
kport = task->itk_self;
if (kport == IP_NULL) {
itk_unlock(task);
return;
}
task->itk_self = IP_NULL;
nport = task->itk_nself;
assert(nport != IP_NULL);
task->itk_nself = IP_NULL;
itk_unlock(task);
if (IP_VALID(task->itk_sself))
ipc_port_release_send(task->itk_sself);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
if (IP_VALID(task->exc_actions[i].port)) {
ipc_port_release_send(task->exc_actions[i].port);
}
}
if (IP_VALID(task->itk_host))
ipc_port_release_send(task->itk_host);
if (IP_VALID(task->itk_bootstrap))
ipc_port_release_send(task->itk_bootstrap);
if (IP_VALID(task->itk_seatbelt))
ipc_port_release_send(task->itk_seatbelt);
if (IP_VALID(task->itk_gssd))
ipc_port_release_send(task->itk_gssd);
if (IP_VALID(task->itk_automountd))
ipc_port_release_send(task->itk_automountd);
if (IP_VALID(task->itk_task_access))
ipc_port_release_send(task->itk_task_access);
for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
if (IP_VALID(task->itk_registered[i]))
ipc_port_release_send(task->itk_registered[i]);
ipc_port_release_send(task->wired_ledger_port);
ipc_port_release_send(task->paged_ledger_port);
ipc_port_dealloc_kernel(kport);
ipc_port_dealloc_kernel(nport);
}
void
ipc_task_reset(
task_t task)
{
ipc_port_t old_kport, new_kport;
ipc_port_t old_sself;
ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
int i;
new_kport = ipc_port_alloc_kernel();
if (new_kport == IP_NULL)
panic("ipc_task_reset");
itk_lock(task);
old_kport = task->itk_self;
if (old_kport == IP_NULL) {
itk_unlock(task);
ipc_port_dealloc_kernel(new_kport);
return;
}
task->itk_self = new_kport;
old_sself = task->itk_sself;
task->itk_sself = ipc_port_make_send(new_kport);
ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
if (!task->exc_actions[i].privileged) {
old_exc_actions[i] = task->exc_actions[i].port;
task->exc_actions[i].port = IP_NULL;
} else {
old_exc_actions[i] = IP_NULL;
}
}
itk_unlock(task);
if (IP_VALID(old_sself))
ipc_port_release_send(old_sself);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
if (IP_VALID(old_exc_actions[i])) {
ipc_port_release_send(old_exc_actions[i]);
}
}
ipc_port_dealloc_kernel(old_kport);
}
void
ipc_thread_init(
thread_t thread)
{
ipc_port_t kport;
int i;
kport = ipc_port_alloc_kernel();
if (kport == IP_NULL)
panic("ipc_thread_init");
thread->ith_self = kport;
thread->ith_sself = ipc_port_make_send(kport);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
thread->exc_actions[i].port = IP_NULL;
ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
ipc_kmsg_queue_init(&thread->ith_messages);
thread->ith_rpc_reply = IP_NULL;
}
void
ipc_thread_disable(
thread_t thread)
{
ipc_port_t kport = thread->ith_self;
if (kport != IP_NULL)
ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
}
void
ipc_thread_terminate(
thread_t thread)
{
ipc_port_t kport = thread->ith_self;
if (kport != IP_NULL) {
int i;
if (IP_VALID(thread->ith_sself))
ipc_port_release_send(thread->ith_sself);
thread->ith_sself = thread->ith_self = IP_NULL;
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
if (IP_VALID(thread->exc_actions[i].port))
ipc_port_release_send(thread->exc_actions[i].port);
}
ipc_port_dealloc_kernel(kport);
}
assert(ipc_kmsg_queue_empty(&thread->ith_messages));
if (thread->ith_rpc_reply != IP_NULL)
ipc_port_dealloc_reply(thread->ith_rpc_reply);
thread->ith_rpc_reply = IP_NULL;
}
void
ipc_thread_reset(
thread_t thread)
{
ipc_port_t old_kport, new_kport;
ipc_port_t old_sself;
ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
int i;
new_kport = ipc_port_alloc_kernel();
if (new_kport == IP_NULL)
panic("ipc_task_reset");
thread_mtx_lock(thread);
old_kport = thread->ith_self;
if (old_kport == IP_NULL) {
thread_mtx_unlock(thread);
ipc_port_dealloc_kernel(new_kport);
return;
}
thread->ith_self = new_kport;
old_sself = thread->ith_sself;
thread->ith_sself = ipc_port_make_send(new_kport);
ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
if (!thread->exc_actions[i].privileged) {
old_exc_actions[i] = thread->exc_actions[i].port;
thread->exc_actions[i].port = IP_NULL;
} else {
old_exc_actions[i] = IP_NULL;
}
}
thread_mtx_unlock(thread);
if (IP_VALID(old_sself))
ipc_port_release_send(old_sself);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
if (IP_VALID(old_exc_actions[i])) {
ipc_port_release_send(old_exc_actions[i]);
}
}
ipc_port_dealloc_kernel(old_kport);
}
ipc_port_t
retrieve_task_self_fast(
register task_t task)
{
register ipc_port_t port;
assert(task == current_task());
itk_lock(task);
assert(task->itk_self != IP_NULL);
if ((port = task->itk_sself) == task->itk_self) {
ip_lock(port);
assert(ip_active(port));
ip_reference(port);
port->ip_srights++;
ip_unlock(port);
} else
port = ipc_port_copy_send(port);
itk_unlock(task);
return port;
}
ipc_port_t
retrieve_thread_self_fast(
thread_t thread)
{
register ipc_port_t port;
assert(thread == current_thread());
thread_mtx_lock(thread);
assert(thread->ith_self != IP_NULL);
if ((port = thread->ith_sself) == thread->ith_self) {
ip_lock(port);
assert(ip_active(port));
ip_reference(port);
port->ip_srights++;
ip_unlock(port);
}
else
port = ipc_port_copy_send(port);
thread_mtx_unlock(thread);
return port;
}
mach_port_name_t
task_self_trap(
__unused struct task_self_trap_args *args)
{
task_t task = current_task();
ipc_port_t sright;
mach_port_name_t name;
sright = retrieve_task_self_fast(task);
name = ipc_port_copyout_send(sright, task->itk_space);
return name;
}
mach_port_name_t
thread_self_trap(
__unused struct thread_self_trap_args *args)
{
thread_t thread = current_thread();
task_t task = thread->task;
ipc_port_t sright;
mach_port_name_t name;
sright = retrieve_thread_self_fast(thread);
name = ipc_port_copyout_send(sright, task->itk_space);
return name;
}
mach_port_name_t
mach_reply_port(
__unused struct mach_reply_port_args *args)
{
ipc_port_t port;
mach_port_name_t name;
kern_return_t kr;
kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
if (kr == KERN_SUCCESS)
ip_unlock(port);
else
name = MACH_PORT_NULL;
return name;
}
kern_return_t
thread_get_special_port(
thread_t thread,
int which,
ipc_port_t *portp)
{
kern_return_t result = KERN_SUCCESS;
ipc_port_t *whichp;
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
switch (which) {
case THREAD_KERNEL_PORT:
whichp = &thread->ith_sself;
break;
default:
return (KERN_INVALID_ARGUMENT);
}
thread_mtx_lock(thread);
if (thread->active)
*portp = ipc_port_copy_send(*whichp);
else
result = KERN_FAILURE;
thread_mtx_unlock(thread);
return (result);
}
kern_return_t
thread_set_special_port(
thread_t thread,
int which,
ipc_port_t port)
{
kern_return_t result = KERN_SUCCESS;
ipc_port_t *whichp, old = IP_NULL;
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
switch (which) {
case THREAD_KERNEL_PORT:
whichp = &thread->ith_sself;
break;
default:
return (KERN_INVALID_ARGUMENT);
}
thread_mtx_lock(thread);
if (thread->active) {
old = *whichp;
*whichp = port;
}
else
result = KERN_FAILURE;
thread_mtx_unlock(thread);
if (IP_VALID(old))
ipc_port_release_send(old);
return (result);
}
kern_return_t
task_get_special_port(
task_t task,
int which,
ipc_port_t *portp)
{
ipc_port_t port;
if (task == TASK_NULL)
return KERN_INVALID_ARGUMENT;
itk_lock(task);
if (task->itk_self == IP_NULL) {
itk_unlock(task);
return KERN_FAILURE;
}
switch (which) {
case TASK_KERNEL_PORT:
port = ipc_port_copy_send(task->itk_sself);
break;
case TASK_NAME_PORT:
port = ipc_port_make_send(task->itk_nself);
break;
case TASK_HOST_PORT:
port = ipc_port_copy_send(task->itk_host);
break;
case TASK_BOOTSTRAP_PORT:
port = ipc_port_copy_send(task->itk_bootstrap);
break;
case TASK_WIRED_LEDGER_PORT:
port = ipc_port_copy_send(task->wired_ledger_port);
break;
case TASK_PAGED_LEDGER_PORT:
port = ipc_port_copy_send(task->paged_ledger_port);
break;
case TASK_SEATBELT_PORT:
port = ipc_port_copy_send(task->itk_seatbelt);
break;
case TASK_GSSD_PORT:
port = ipc_port_copy_send(task->itk_gssd);
break;
case TASK_ACCESS_PORT:
port = ipc_port_copy_send(task->itk_task_access);
break;
case TASK_AUTOMOUNTD_PORT:
port = ipc_port_copy_send(task->itk_automountd);
break;
default:
itk_unlock(task);
return KERN_INVALID_ARGUMENT;
}
itk_unlock(task);
*portp = port;
return KERN_SUCCESS;
}
kern_return_t
task_set_special_port(
task_t task,
int which,
ipc_port_t port)
{
ipc_port_t *whichp;
ipc_port_t old;
if (task == TASK_NULL)
return KERN_INVALID_ARGUMENT;
switch (which) {
case TASK_KERNEL_PORT:
whichp = &task->itk_sself;
break;
case TASK_HOST_PORT:
whichp = &task->itk_host;
break;
case TASK_BOOTSTRAP_PORT:
whichp = &task->itk_bootstrap;
break;
case TASK_WIRED_LEDGER_PORT:
whichp = &task->wired_ledger_port;
break;
case TASK_PAGED_LEDGER_PORT:
whichp = &task->paged_ledger_port;
break;
case TASK_SEATBELT_PORT:
whichp = &task->itk_seatbelt;
break;
case TASK_GSSD_PORT:
whichp = &task->itk_gssd;
break;
case TASK_ACCESS_PORT:
whichp = &task->itk_task_access;
break;
case TASK_AUTOMOUNTD_PORT:
whichp = &task->itk_automountd;
break;
default:
return KERN_INVALID_ARGUMENT;
}
itk_lock(task);
if (task->itk_self == IP_NULL) {
itk_unlock(task);
return KERN_FAILURE;
}
if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
&& IP_VALID(*whichp)) {
itk_unlock(task);
return KERN_NO_ACCESS;
}
#if CONFIG_MACF_MACH
if (mac_task_check_service(current_task(), task, "set_special_port")) {
itk_unlock(task);
return KERN_NO_ACCESS;
}
#endif
old = *whichp;
*whichp = port;
itk_unlock(task);
if (IP_VALID(old))
ipc_port_release_send(old);
return KERN_SUCCESS;
}
kern_return_t
mach_ports_register(
task_t task,
mach_port_array_t memory,
mach_msg_type_number_t portsCnt)
{
ipc_port_t ports[TASK_PORT_REGISTER_MAX];
unsigned int i;
if ((task == TASK_NULL) ||
(portsCnt > TASK_PORT_REGISTER_MAX))
return KERN_INVALID_ARGUMENT;
for (i = 0; i < portsCnt; i++)
ports[i] = memory[i];
for (; i < TASK_PORT_REGISTER_MAX; i++)
ports[i] = IP_NULL;
itk_lock(task);
if (task->itk_self == IP_NULL) {
itk_unlock(task);
return KERN_INVALID_ARGUMENT;
}
for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
ipc_port_t old;
old = task->itk_registered[i];
task->itk_registered[i] = ports[i];
ports[i] = old;
}
itk_unlock(task);
for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
if (IP_VALID(ports[i]))
ipc_port_release_send(ports[i]);
if (portsCnt != 0)
kfree(memory,
(vm_size_t) (portsCnt * sizeof(mach_port_t)));
return KERN_SUCCESS;
}
kern_return_t
mach_ports_lookup(
task_t task,
mach_port_array_t *portsp,
mach_msg_type_number_t *portsCnt)
{
void *memory;
vm_size_t size;
ipc_port_t *ports;
int i;
if (task == TASK_NULL)
return KERN_INVALID_ARGUMENT;
size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
memory = kalloc(size);
if (memory == 0)
return KERN_RESOURCE_SHORTAGE;
itk_lock(task);
if (task->itk_self == IP_NULL) {
itk_unlock(task);
kfree(memory, size);
return KERN_INVALID_ARGUMENT;
}
ports = (ipc_port_t *) memory;
for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
ports[i] = ipc_port_copy_send(task->itk_registered[i]);
itk_unlock(task);
*portsp = (mach_port_array_t) ports;
*portsCnt = TASK_PORT_REGISTER_MAX;
return KERN_SUCCESS;
}
task_t
convert_port_to_locked_task(ipc_port_t port)
{
int try_failed_count = 0;
while (IP_VALID(port)) {
task_t task;
ip_lock(port);
if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
ip_unlock(port);
return TASK_NULL;
}
task = (task_t) port->ip_kobject;
assert(task != TASK_NULL);
if (task_lock_try(task)) {
ip_unlock(port);
return(task);
}
try_failed_count++;
ip_unlock(port);
mutex_pause(try_failed_count);
}
return TASK_NULL;
}
task_t
convert_port_to_task(
ipc_port_t port)
{
task_t task = TASK_NULL;
if (IP_VALID(port)) {
ip_lock(port);
if ( ip_active(port) &&
ip_kotype(port) == IKOT_TASK ) {
task = (task_t)port->ip_kobject;
assert(task != TASK_NULL);
task_reference_internal(task);
}
ip_unlock(port);
}
return (task);
}
task_name_t
convert_port_to_task_name(
ipc_port_t port)
{
task_name_t task = TASK_NULL;
if (IP_VALID(port)) {
ip_lock(port);
if ( ip_active(port) &&
(ip_kotype(port) == IKOT_TASK ||
ip_kotype(port) == IKOT_TASK_NAME)) {
task = (task_name_t)port->ip_kobject;
assert(task != TASK_NAME_NULL);
task_reference_internal(task);
}
ip_unlock(port);
}
return (task);
}
ipc_space_t
convert_port_to_space(
ipc_port_t port)
{
ipc_space_t space;
task_t task;
task = convert_port_to_locked_task(port);
if (task == TASK_NULL)
return IPC_SPACE_NULL;
if (!task->active) {
task_unlock(task);
return IPC_SPACE_NULL;
}
space = task->itk_space;
is_reference(space);
task_unlock(task);
return (space);
}
vm_map_t
convert_port_to_map(
ipc_port_t port)
{
task_t task;
vm_map_t map;
task = convert_port_to_locked_task(port);
if (task == TASK_NULL)
return VM_MAP_NULL;
if (!task->active) {
task_unlock(task);
return VM_MAP_NULL;
}
map = task->map;
vm_map_reference_swap(map);
task_unlock(task);
return map;
}
thread_t
convert_port_to_thread(
ipc_port_t port)
{
thread_t thread = THREAD_NULL;
if (IP_VALID(port)) {
ip_lock(port);
if ( ip_active(port) &&
ip_kotype(port) == IKOT_THREAD ) {
thread = (thread_t)port->ip_kobject;
assert(thread != THREAD_NULL);
thread_reference_internal(thread);
}
ip_unlock(port);
}
return (thread);
}
thread_t
port_name_to_thread(
mach_port_name_t name)
{
thread_t thread = THREAD_NULL;
ipc_port_t kport;
if (MACH_PORT_VALID(name)) {
if (ipc_object_copyin(current_space(), name,
MACH_MSG_TYPE_COPY_SEND,
(ipc_object_t *)&kport) != KERN_SUCCESS)
return (THREAD_NULL);
thread = convert_port_to_thread(kport);
if (IP_VALID(kport))
ipc_port_release_send(kport);
}
return (thread);
}
task_t
port_name_to_task(
mach_port_name_t name)
{
ipc_port_t kern_port;
kern_return_t kr;
task_t task = TASK_NULL;
if (MACH_PORT_VALID(name)) {
kr = ipc_object_copyin(current_space(), name,
MACH_MSG_TYPE_COPY_SEND,
(ipc_object_t *) &kern_port);
if (kr != KERN_SUCCESS)
return TASK_NULL;
task = convert_port_to_task(kern_port);
if (IP_VALID(kern_port))
ipc_port_release_send(kern_port);
}
return task;
}
ipc_port_t
convert_task_to_port(
task_t task)
{
ipc_port_t port;
itk_lock(task);
if (task->itk_self != IP_NULL)
port = ipc_port_make_send(task->itk_self);
else
port = IP_NULL;
itk_unlock(task);
task_deallocate(task);
return port;
}
ipc_port_t
convert_task_name_to_port(
task_name_t task_name)
{
ipc_port_t port;
itk_lock(task_name);
if (task_name->itk_nself != IP_NULL)
port = ipc_port_make_send(task_name->itk_nself);
else
port = IP_NULL;
itk_unlock(task_name);
task_name_deallocate(task_name);
return port;
}
ipc_port_t
convert_thread_to_port(
thread_t thread)
{
ipc_port_t port;
thread_mtx_lock(thread);
if (thread->ith_self != IP_NULL)
port = ipc_port_make_send(thread->ith_self);
else
port = IP_NULL;
thread_mtx_unlock(thread);
thread_deallocate(thread);
return (port);
}
void
space_deallocate(
ipc_space_t space)
{
if (space != IS_NULL)
is_release(space);
}
kern_return_t
thread_set_exception_ports(
thread_t thread,
exception_mask_t exception_mask,
ipc_port_t new_port,
exception_behavior_t new_behavior,
thread_state_flavor_t new_flavor)
{
ipc_port_t old_port[EXC_TYPES_COUNT];
boolean_t privileged = current_task()->sec_token.val[0] == 0;
register int i;
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
if (exception_mask & ~EXC_MASK_ALL)
return (KERN_INVALID_ARGUMENT);
if (IP_VALID(new_port)) {
switch (new_behavior & ~MACH_EXCEPTION_CODES) {
case EXCEPTION_DEFAULT:
case EXCEPTION_STATE:
case EXCEPTION_STATE_IDENTITY:
break;
default:
return (KERN_INVALID_ARGUMENT);
}
}
if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
return (KERN_INVALID_ARGUMENT);
thread_mtx_lock(thread);
if (!thread->active) {
thread_mtx_unlock(thread);
return (KERN_FAILURE);
}
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
if (exception_mask & (1 << i)) {
old_port[i] = thread->exc_actions[i].port;
thread->exc_actions[i].port = ipc_port_copy_send(new_port);
thread->exc_actions[i].behavior = new_behavior;
thread->exc_actions[i].flavor = new_flavor;
thread->exc_actions[i].privileged = privileged;
}
else
old_port[i] = IP_NULL;
}
thread_mtx_unlock(thread);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);
if (IP_VALID(new_port))
ipc_port_release_send(new_port);
return (KERN_SUCCESS);
}
kern_return_t
task_set_exception_ports(
task_t task,
exception_mask_t exception_mask,
ipc_port_t new_port,
exception_behavior_t new_behavior,
thread_state_flavor_t new_flavor)
{
ipc_port_t old_port[EXC_TYPES_COUNT];
boolean_t privileged = current_task()->sec_token.val[0] == 0;
register int i;
if (task == TASK_NULL)
return (KERN_INVALID_ARGUMENT);
if (exception_mask & ~EXC_MASK_ALL)
return (KERN_INVALID_ARGUMENT);
if (IP_VALID(new_port)) {
switch (new_behavior & ~MACH_EXCEPTION_CODES) {
case EXCEPTION_DEFAULT:
case EXCEPTION_STATE:
case EXCEPTION_STATE_IDENTITY:
break;
default:
return (KERN_INVALID_ARGUMENT);
}
}
itk_lock(task);
if (task->itk_self == IP_NULL) {
itk_unlock(task);
return (KERN_FAILURE);
}
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
if (exception_mask & (1 << i)) {
old_port[i] = task->exc_actions[i].port;
task->exc_actions[i].port =
ipc_port_copy_send(new_port);
task->exc_actions[i].behavior = new_behavior;
task->exc_actions[i].flavor = new_flavor;
task->exc_actions[i].privileged = privileged;
}
else
old_port[i] = IP_NULL;
}
itk_unlock(task);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);
if (IP_VALID(new_port))
ipc_port_release_send(new_port);
return (KERN_SUCCESS);
}
kern_return_t
thread_swap_exception_ports(
thread_t thread,
exception_mask_t exception_mask,
ipc_port_t new_port,
exception_behavior_t new_behavior,
thread_state_flavor_t new_flavor,
exception_mask_array_t masks,
mach_msg_type_number_t *CountCnt,
exception_port_array_t ports,
exception_behavior_array_t behaviors,
thread_state_flavor_array_t flavors)
{
ipc_port_t old_port[EXC_TYPES_COUNT];
boolean_t privileged = current_task()->sec_token.val[0] == 0;
unsigned int i, j, count;
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
if (exception_mask & ~EXC_MASK_ALL)
return (KERN_INVALID_ARGUMENT);
if (IP_VALID(new_port)) {
switch (new_behavior & ~MACH_EXCEPTION_CODES) {
case EXCEPTION_DEFAULT:
case EXCEPTION_STATE:
case EXCEPTION_STATE_IDENTITY:
break;
default:
return (KERN_INVALID_ARGUMENT);
}
}
thread_mtx_lock(thread);
if (!thread->active) {
thread_mtx_unlock(thread);
return (KERN_FAILURE);
}
count = 0;
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
if (exception_mask & (1 << i)) {
for (j = 0; j < count; ++j) {
if ( thread->exc_actions[i].port == ports[j] &&
thread->exc_actions[i].behavior == behaviors[j] &&
thread->exc_actions[i].flavor == flavors[j] ) {
masks[j] |= (1 << i);
break;
}
}
if (j == count) {
masks[j] = (1 << i);
ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
behaviors[j] = thread->exc_actions[i].behavior;
flavors[j] = thread->exc_actions[i].flavor;
++count;
}
old_port[i] = thread->exc_actions[i].port;
thread->exc_actions[i].port = ipc_port_copy_send(new_port);
thread->exc_actions[i].behavior = new_behavior;
thread->exc_actions[i].flavor = new_flavor;
thread->exc_actions[i].privileged = privileged;
if (count > *CountCnt)
break;
}
else
old_port[i] = IP_NULL;
}
thread_mtx_unlock(thread);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);
if (IP_VALID(new_port))
ipc_port_release_send(new_port);
*CountCnt = count;
return (KERN_SUCCESS);
}
kern_return_t
task_swap_exception_ports(
task_t task,
exception_mask_t exception_mask,
ipc_port_t new_port,
exception_behavior_t new_behavior,
thread_state_flavor_t new_flavor,
exception_mask_array_t masks,
mach_msg_type_number_t *CountCnt,
exception_port_array_t ports,
exception_behavior_array_t behaviors,
thread_state_flavor_array_t flavors)
{
ipc_port_t old_port[EXC_TYPES_COUNT];
boolean_t privileged = current_task()->sec_token.val[0] == 0;
unsigned int i, j, count;
if (task == TASK_NULL)
return (KERN_INVALID_ARGUMENT);
if (exception_mask & ~EXC_MASK_ALL)
return (KERN_INVALID_ARGUMENT);
if (IP_VALID(new_port)) {
switch (new_behavior & ~MACH_EXCEPTION_CODES) {
case EXCEPTION_DEFAULT:
case EXCEPTION_STATE:
case EXCEPTION_STATE_IDENTITY:
break;
default:
return (KERN_INVALID_ARGUMENT);
}
}
itk_lock(task);
if (task->itk_self == IP_NULL) {
itk_unlock(task);
return (KERN_FAILURE);
}
count = 0;
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
if (exception_mask & (1 << i)) {
for (j = 0; j < count; j++) {
if ( task->exc_actions[i].port == ports[j] &&
task->exc_actions[i].behavior == behaviors[j] &&
task->exc_actions[i].flavor == flavors[j] ) {
masks[j] |= (1 << i);
break;
}
}
if (j == count) {
masks[j] = (1 << i);
ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
behaviors[j] = task->exc_actions[i].behavior;
flavors[j] = task->exc_actions[i].flavor;
++count;
}
old_port[i] = task->exc_actions[i].port;
task->exc_actions[i].port = ipc_port_copy_send(new_port);
task->exc_actions[i].behavior = new_behavior;
task->exc_actions[i].flavor = new_flavor;
task->exc_actions[i].privileged = privileged;
if (count > *CountCnt)
break;
}
else
old_port[i] = IP_NULL;
}
itk_unlock(task);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);
if (IP_VALID(new_port))
ipc_port_release_send(new_port);
*CountCnt = count;
return (KERN_SUCCESS);
}
kern_return_t
thread_get_exception_ports(
thread_t thread,
exception_mask_t exception_mask,
exception_mask_array_t masks,
mach_msg_type_number_t *CountCnt,
exception_port_array_t ports,
exception_behavior_array_t behaviors,
thread_state_flavor_array_t flavors)
{
unsigned int i, j, count;
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
if (exception_mask & ~EXC_MASK_ALL)
return (KERN_INVALID_ARGUMENT);
thread_mtx_lock(thread);
if (!thread->active) {
thread_mtx_unlock(thread);
return (KERN_FAILURE);
}
count = 0;
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
if (exception_mask & (1 << i)) {
for (j = 0; j < count; ++j) {
if ( thread->exc_actions[i].port == ports[j] &&
thread->exc_actions[i].behavior ==behaviors[j] &&
thread->exc_actions[i].flavor == flavors[j] ) {
masks[j] |= (1 << i);
break;
}
}
if (j == count) {
masks[j] = (1 << i);
ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
behaviors[j] = thread->exc_actions[i].behavior;
flavors[j] = thread->exc_actions[i].flavor;
++count;
if (count >= *CountCnt)
break;
}
}
}
thread_mtx_unlock(thread);
*CountCnt = count;
return (KERN_SUCCESS);
}
kern_return_t
task_get_exception_ports(
task_t task,
exception_mask_t exception_mask,
exception_mask_array_t masks,
mach_msg_type_number_t *CountCnt,
exception_port_array_t ports,
exception_behavior_array_t behaviors,
thread_state_flavor_array_t flavors)
{
unsigned int i, j, count;
if (task == TASK_NULL)
return (KERN_INVALID_ARGUMENT);
if (exception_mask & ~EXC_MASK_ALL)
return (KERN_INVALID_ARGUMENT);
itk_lock(task);
if (task->itk_self == IP_NULL) {
itk_unlock(task);
return (KERN_FAILURE);
}
count = 0;
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
if (exception_mask & (1 << i)) {
for (j = 0; j < count; ++j) {
if ( task->exc_actions[i].port == ports[j] &&
task->exc_actions[i].behavior == behaviors[j] &&
task->exc_actions[i].flavor == flavors[j] ) {
masks[j] |= (1 << i);
break;
}
}
if (j == count) {
masks[j] = (1 << i);
ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
behaviors[j] = task->exc_actions[i].behavior;
flavors[j] = task->exc_actions[i].flavor;
++count;
if (count > *CountCnt)
break;
}
}
}
itk_unlock(task);
*CountCnt = count;
return (KERN_SUCCESS);
}