chud_thread_i386.c [plain text]
#include <mach/mach_types.h>
#include <mach/task.h>
#include <mach/thread_act.h>
#include <kern/kern_types.h>
#include <kern/processor.h>
#include <kern/thread.h>
#include <vm/vm_map.h>
#include <vm/pmap.h>
#include <chud/chud_xnu.h>
#include <chud/chud_xnu_private.h>
#include <i386/misc_protos.h>
#include <i386/proc_reg.h>
#include <i386/mp_desc.h>
#pragma mark **** thread state ****
__private_extern__ kern_return_t
chudxnu_thread_user_state_available(thread_t thread)
{
#pragma unused (thread)
return KERN_SUCCESS;
}
__private_extern__ kern_return_t
chudxnu_thread_get_state(
thread_t thread,
thread_flavor_t flavor,
thread_state_t tstate,
mach_msg_type_number_t *count,
boolean_t user_only)
{
if (user_only) {
if (thread->task == kernel_task)
return KERN_FAILURE;
return machine_thread_get_state(thread, flavor, tstate, count);
} else {
if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
current_cpu_datap()->cpu_interrupt_level == 1) {
return machine_thread_get_state(thread, flavor, tstate, count);
} else {
return machine_thread_get_kern_state(thread, flavor, tstate, count);
}
} else {
return machine_thread_get_state(thread, flavor, tstate, count);
}
}
}
__private_extern__ kern_return_t
chudxnu_thread_set_state(
thread_t thread,
thread_flavor_t flavor,
thread_state_t tstate,
mach_msg_type_number_t count,
boolean_t user_only)
{
#pragma unused (user_only)
return machine_thread_set_state(thread, flavor, tstate, count);
}
#pragma mark **** task memory read/write ****
__private_extern__ kern_return_t
chudxnu_task_read(
task_t task,
void *kernaddr,
uint64_t usraddr,
vm_size_t size)
{
kern_return_t ret = KERN_SUCCESS;
boolean_t old_level;
if(ml_at_interrupt_context()) {
return KERN_FAILURE; }
old_level = ml_set_interrupts_enabled(TRUE);
if(current_task()==task) {
if(copyin(usraddr, kernaddr, size)) {
ret = KERN_FAILURE;
}
} else {
vm_map_t map = get_task_map(task);
ret = vm_map_read_user(map, usraddr, kernaddr, size);
}
ml_set_interrupts_enabled(old_level);
return ret;
}
__private_extern__ kern_return_t
chudxnu_task_write(
task_t task,
uint64_t useraddr,
void *kernaddr,
vm_size_t size)
{
kern_return_t ret = KERN_SUCCESS;
boolean_t old_level;
if(ml_at_interrupt_context()) {
return KERN_FAILURE; }
old_level = ml_set_interrupts_enabled(TRUE);
if(current_task()==task) {
if(copyout(kernaddr, useraddr, size)) {
ret = KERN_FAILURE;
}
} else {
vm_map_t map = get_task_map(task);
ret = vm_map_write_user(map, kernaddr, useraddr, size);
}
ml_set_interrupts_enabled(old_level);
return ret;
}
__private_extern__ kern_return_t
chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
{
return (ml_nofault_copy(srcaddr, (vm_offset_t) dstaddr, size) == size ?
KERN_SUCCESS: KERN_FAILURE);
}
__private_extern__ kern_return_t
chudxnu_kern_write(
vm_offset_t dstaddr,
void *srcaddr,
vm_size_t size)
{
return (ml_nofault_copy((vm_offset_t) srcaddr, dstaddr, size) == size ?
KERN_SUCCESS: KERN_FAILURE);
}
#define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
#define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
(supervisor ? (addr >= minKernAddr && addr <= maxKernAddr) : \
(addr != 0 && (addr <= 0x00007FFFFFFFFFFFULL || addr >= 0xFFFF800000000000ULL)))
typedef struct _cframe64_t {
uint64_t prevFP; uint64_t caller;
uint64_t args[0];
}cframe64_t;
typedef struct _cframe_t {
struct _cframe_t *prev; uint32_t caller;
uint32_t args[0];
} cframe_t;
extern void * find_user_regs(thread_t);
extern x86_saved_state32_t *find_kern_regs(thread_t);
static kern_return_t do_backtrace32(
task_t task,
thread_t thread,
x86_saved_state32_t *regs,
uint64_t *frames,
mach_msg_type_number_t *start_idx,
mach_msg_type_number_t max_idx,
boolean_t supervisor)
{
uint32_t tmpWord = 0UL;
uint64_t currPC = (uint64_t) regs->eip;
uint64_t currFP = (uint64_t) regs->ebp;
uint64_t prevPC = 0ULL;
uint64_t prevFP = 0ULL;
uint64_t kernStackMin = thread->kernel_stack;
uint64_t kernStackMax = kernStackMin + KERNEL_STACK_SIZE;
mach_msg_type_number_t ct = *start_idx;
kern_return_t kr = KERN_FAILURE;
if(ct >= max_idx)
return KERN_RESOURCE_SHORTAGE;
frames[ct++] = currPC;
while(VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
cframe_t *fp = (cframe_t *) (uint32_t) currFP;
if(!currFP) {
currPC = 0;
break;
}
if(ct >= max_idx) {
*start_idx = ct;
return KERN_RESOURCE_SHORTAGE;
}
if(supervisor) {
kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
} else {
kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
}
if(kr != KERN_SUCCESS) {
currPC = 0ULL;
break;
}
currPC = (uint64_t) tmpWord;
prevFP = 0;
if(supervisor) {
kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
} else {
kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
}
prevFP = (uint64_t) tmpWord;
if(prevFP) {
frames[ct++] = currPC;
prevPC = currPC;
}
if(prevFP < currFP) {
break;
} else {
currFP = prevFP;
}
}
*start_idx = ct;
return KERN_SUCCESS;
}
static kern_return_t do_backtrace64(
task_t task,
thread_t thread,
x86_saved_state64_t *regs,
uint64_t *frames,
mach_msg_type_number_t *start_idx,
mach_msg_type_number_t max_idx,
boolean_t supervisor)
{
uint64_t currPC = regs->isf.rip;
uint64_t currFP = regs->rbp;
uint64_t prevPC = 0ULL;
uint64_t prevFP = 0ULL;
uint64_t kernStackMin = (uint64_t)thread->kernel_stack;
uint64_t kernStackMax = (uint64_t)kernStackMin + KERNEL_STACK_SIZE;
mach_msg_type_number_t ct = *start_idx;
kern_return_t kr = KERN_FAILURE;
if(*start_idx >= max_idx)
return KERN_RESOURCE_SHORTAGE;
frames[ct++] = currPC;
while(VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) {
uint64_t caller = currFP + sizeof(uint64_t);
if(!currFP) {
currPC = 0;
break;
}
if(ct >= max_idx) {
*start_idx = ct;
return KERN_RESOURCE_SHORTAGE;
}
if(supervisor) {
kr = KERN_FAILURE;
} else {
kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
}
if(kr != KERN_SUCCESS) {
currPC = 0ULL;
break;
}
prevFP = 0;
if(supervisor) {
kr = KERN_FAILURE;
} else {
kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
}
if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
frames[ct++] = currPC;
prevPC = currPC;
}
if(prevFP < currFP) {
break;
} else {
currFP = prevFP;
}
}
*start_idx = ct;
return KERN_SUCCESS;
}
__private_extern__
kern_return_t chudxnu_thread_get_callstack64(
thread_t thread,
uint64_t *callstack,
mach_msg_type_number_t *count,
boolean_t user_only)
{
kern_return_t kr = KERN_FAILURE;
task_t task = thread->task;
uint64_t currPC = 0;
boolean_t supervisor = FALSE;
mach_msg_type_number_t bufferIndex = 0;
mach_msg_type_number_t bufferMaxIndex = *count;
x86_saved_state_t *tagged_regs = NULL; x86_saved_state64_t *regs64 = NULL;
x86_saved_state32_t *regs32 = NULL;
x86_saved_state32_t *u_regs32 = NULL;
x86_saved_state64_t *u_regs64 = NULL;
if(ml_at_interrupt_context()) {
if(user_only) {
return KERN_FAILURE;
}
if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
tagged_regs = current_cpu_datap()->cpu_int_state;
if(is_saved_state64(tagged_regs)) {
regs64 = saved_state64(tagged_regs);
supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
} else {
regs32 = saved_state32(tagged_regs);
supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
}
}
}
if(!tagged_regs) {
tagged_regs = USER_STATE(thread);
if(is_saved_state64(tagged_regs)) {
regs64 = saved_state64(tagged_regs);
supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U);
} else {
regs32 = saved_state32(tagged_regs);
supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U);
}
}
*count = 0;
if(supervisor) {
if(user_only) {
return KERN_FAILURE;
}
} else {
u_regs32 = regs32;
u_regs64 = regs64;
regs32 = NULL;
regs64 = NULL;
}
if (user_only) {
if(!(u_regs32 || u_regs64)) {
return KERN_FAILURE;
}
}
if(regs64) {
currPC = regs64->isf.rip;
} else if(regs32) {
currPC = (uint64_t) regs32->eip;
} else if(u_regs64) {
currPC = u_regs64->isf.rip;
} else if(u_regs32) {
currPC = (uint64_t) u_regs32->eip;
}
if(!currPC) {
return KERN_FAILURE;
}
bufferIndex = 0;
if(bufferMaxIndex < 1) {
*count = 0;
return KERN_RESOURCE_SHORTAGE;
}
if(regs64) {
uint64_t rsp = 0ULL;
kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex,
bufferMaxIndex, TRUE);
if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (addr64_t) regs64->isf.rsp, sizeof(uint64_t)) &&
bufferIndex < bufferMaxIndex) {
callstack[bufferIndex++] = rsp;
}
} else if(regs32) {
uint32_t esp = 0UL;
kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex,
bufferMaxIndex, TRUE);
if(KERN_SUCCESS == chudxnu_kern_read(&esp, (addr64_t) regs32->uesp, sizeof(uint32_t)) &&
bufferIndex < bufferMaxIndex) {
callstack[bufferIndex++] = (uint64_t) esp;
}
} else if(u_regs64) {
uint64_t rsp = 0ULL;
kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex,
bufferMaxIndex, FALSE);
if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) &&
bufferIndex < bufferMaxIndex) {
callstack[bufferIndex++] = rsp;
}
} else if(u_regs32) {
uint32_t esp = 0UL;
kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex,
bufferMaxIndex, FALSE);
if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) &&
bufferIndex < bufferMaxIndex) {
callstack[bufferIndex++] = (uint64_t) esp;
}
}
*count = bufferIndex;
return kr;
}
#pragma mark **** DEPRECATED ****
__private_extern__ kern_return_t
chudxnu_thread_get_callstack(
thread_t thread,
uint32_t *callStack,
mach_msg_type_number_t *count,
boolean_t user_only)
{
kern_return_t kr;
task_t task = thread->task;
uint32_t currPC;
uint32_t currFP;
uint32_t prevFP = 0;
uint32_t prevPC = 0;
uint32_t esp = 0;
uint32_t kernStackMin = thread->kernel_stack;
uint32_t kernStackMax = kernStackMin + KERNEL_STACK_SIZE;
uint32_t *buffer = callStack;
int bufferIndex = 0;
int bufferMaxIndex = *count;
boolean_t supervisor;
x86_saved_state32_t *regs = NULL;
if (user_only) {
if (task == kernel_task) {
return KERN_FAILURE;
}
regs = USER_REGS32(thread);
} else {
regs = saved_state32(current_cpu_datap()->cpu_int_state);
}
if (regs == NULL) {
*count = 0;
return KERN_FAILURE;
}
supervisor = ((regs->cs & SEL_PL) != SEL_PL_U);
currPC = regs->eip;
currFP = regs->ebp;
bufferIndex = 0;
if(!supervisor)
bufferMaxIndex -= 1; if (bufferMaxIndex < 1) {
*count = 0;
return KERN_RESOURCE_SHORTAGE;
}
buffer[bufferIndex++] = currPC;
while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
cframe_t *fp = (cframe_t *) currFP;
if (bufferIndex >= bufferMaxIndex) {
*count = bufferMaxIndex;
return KERN_RESOURCE_SHORTAGE;
}
if (supervisor) {
kr = chudxnu_kern_read(
&currPC,
(vm_offset_t) &fp->caller,
sizeof(currPC));
} else {
kr = chudxnu_task_read(
task,
&currPC,
(vm_offset_t) &fp->caller,
sizeof(currPC));
}
if (kr != KERN_SUCCESS)
break;
prevFP = 0;
if (supervisor) {
kr = chudxnu_kern_read(
&prevFP,
(vm_offset_t) &fp->prev,
sizeof(prevFP));
} else {
kr = chudxnu_task_read(
task,
&prevFP,
(vm_offset_t) &fp->prev,
sizeof(prevFP));
}
if (prevFP) {
buffer[bufferIndex++] = currPC;
prevPC = currPC;
}
if (prevFP < currFP) {
break;
} else {
currFP = prevFP;
}
}
if(!supervisor) {
kr = chudxnu_task_read(task, &esp, regs->uesp, sizeof(uint32_t));
if(kr == KERN_SUCCESS) {
buffer[bufferIndex++] = esp;
}
}
*count = bufferIndex;
return KERN_SUCCESS;
}