chud_thread_i386.c [plain text]
#include <mach/mach_types.h>
#include <mach/task.h>
#include <mach/thread_act.h>
#include <kern/kern_types.h>
#include <kern/processor.h>
#include <kern/thread.h>
#include <vm/vm_map.h>
#include <vm/pmap.h>
#include <chud/chud_xnu.h>
#include <chud/chud_xnu_private.h>
#include <i386/misc_protos.h>
#include <i386/proc_reg.h>
#include <i386/mp_desc.h>
#pragma mark **** thread state ****
__private_extern__ kern_return_t
chudxnu_thread_user_state_available(thread_t thread)
{
#pragma unused (thread)
return KERN_SUCCESS;
}
__private_extern__ kern_return_t
chudxnu_thread_get_state(
thread_t thread,
thread_flavor_t flavor,
thread_state_t tstate,
mach_msg_type_number_t *count,
boolean_t user_only)
{
if (user_only) {
if (thread->task == kernel_task)
return KERN_FAILURE;
return machine_thread_get_state(thread, flavor, tstate, count);
} else {
if(thread == current_thread() && current_cpu_datap()->cpu_int_state) {
if(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
current_cpu_datap()->cpu_interrupt_level == 1) {
return machine_thread_get_state(thread, flavor, tstate, count);
} else {
return machine_thread_get_kern_state(thread, flavor, tstate, count);
}
} else {
return machine_thread_get_state(thread, flavor, tstate, count);
}
}
}
__private_extern__ kern_return_t
chudxnu_thread_set_state(
thread_t thread,
thread_flavor_t flavor,
thread_state_t tstate,
mach_msg_type_number_t count,
boolean_t user_only)
{
#pragma unused (user_only)
return machine_thread_set_state(thread, flavor, tstate, count);
}
#pragma mark **** task memory read/write ****
__private_extern__ kern_return_t
chudxnu_task_read(
task_t task,
void *kernaddr,
uint64_t usraddr,
vm_size_t size)
{
kern_return_t ret = KERN_SUCCESS;
if(current_task()==task) {
if(ml_at_interrupt_context()) {
return KERN_FAILURE; }
if(copyin(usraddr, kernaddr, size)) {
ret = KERN_FAILURE;
}
} else {
vm_map_t map = get_task_map(task);
ret = vm_map_read_user(map, usraddr, kernaddr, size);
}
return ret;
}
__private_extern__ kern_return_t
chudxnu_task_write(
task_t task,
uint64_t useraddr,
void *kernaddr,
vm_size_t size)
{
kern_return_t ret = KERN_SUCCESS;
if(current_task()==task) {
if(ml_at_interrupt_context()) {
return KERN_FAILURE; }
if(copyout(kernaddr, useraddr, size)) {
ret = KERN_FAILURE;
}
} else {
vm_map_t map = get_task_map(task);
ret = vm_map_write_user(map, kernaddr, useraddr, size);
}
return ret;
}
__private_extern__ kern_return_t
chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size)
{
while(size>0) {
ppnum_t pp;
addr64_t phys_addr;
pp = pmap_find_phys(kernel_pmap, srcaddr);
if(!pp) {
return KERN_FAILURE;
}
phys_addr = ((addr64_t)pp << 12) |
(srcaddr & 0x0000000000000FFFULL);
if(phys_addr >= mem_actual) {
return KERN_FAILURE;
}
if((phys_addr&0x1) || size==1) {
*((uint8_t *)dstaddr) =
ml_phys_read_byte_64(phys_addr);
dstaddr = ((uint8_t *)dstaddr) + 1;
srcaddr += sizeof(uint8_t);
size -= sizeof(uint8_t);
} else if((phys_addr&0x3) || size<=2) {
*((uint16_t *)dstaddr) =
ml_phys_read_half_64(phys_addr);
dstaddr = ((uint16_t *)dstaddr) + 1;
srcaddr += sizeof(uint16_t);
size -= sizeof(uint16_t);
} else {
*((uint32_t *)dstaddr) =
ml_phys_read_word_64(phys_addr);
dstaddr = ((uint32_t *)dstaddr) + 1;
srcaddr += sizeof(uint32_t);
size -= sizeof(uint32_t);
}
}
return KERN_SUCCESS;
}
__private_extern__ kern_return_t
chudxnu_kern_write(
vm_offset_t dstaddr,
void *srcaddr,
vm_size_t size)
{
while(size>0) {
ppnum_t pp;
addr64_t phys_addr;
pp = pmap_find_phys(kernel_pmap, dstaddr);
if(!pp) {
return KERN_FAILURE;
}
phys_addr = ((addr64_t)pp << 12) |
(dstaddr & 0x0000000000000FFFULL);
if(phys_addr > mem_actual) {
return KERN_FAILURE;
}
if((phys_addr&0x1) || size==1) {
ml_phys_write_byte_64(phys_addr, *((uint8_t *)srcaddr));
srcaddr = ((uint8_t *)srcaddr) + 1;
dstaddr += sizeof(uint8_t);
size -= sizeof(uint8_t);
} else if((phys_addr&0x3) || size<=2) {
ml_phys_write_half_64(phys_addr, *((uint16_t *)srcaddr));
srcaddr = ((uint16_t *)srcaddr) + 1;
dstaddr += sizeof(uint16_t);
size -= sizeof(uint16_t);
} else {
ml_phys_write_word_64(phys_addr, *((uint32_t *)srcaddr));
srcaddr = ((uint32_t *)srcaddr) + 1;
dstaddr += sizeof(uint32_t);
size -= sizeof(uint32_t);
}
}
return KERN_SUCCESS;
}
#define VALID_STACK_ADDRESS(supervisor, addr, minKernAddr, maxKernAddr) (supervisor ? (addr>=minKernAddr && addr<=maxKernAddr) : TRUE)
#define VALID_STACK_ADDRESS64(supervisor, addr, minKernAddr, maxKernAddr) \
(supervisor ? (addr >= minKernAddr && addr <= maxKernAddr) : \
(addr != 0 && (addr <= 0x00007FFFFFFFFFFFULL || addr >= 0xFFFF800000000000ULL)))
typedef struct _cframe64_t {
uint64_t prevFP; uint64_t caller;
uint64_t args[0];
}cframe64_t;
typedef struct _cframe_t {
struct _cframe_t *prev; uint32_t caller;
uint32_t args[0];
} cframe_t;
__private_extern__
kern_return_t chudxnu_thread_get_callstack64(
thread_t thread,
uint64_t *callstack,
mach_msg_type_number_t *count,
boolean_t user_only)
{
kern_return_t kr = KERN_FAILURE;
kern_return_t ret = KERN_SUCCESS;
task_t task = thread->task;
uint64_t currPC = 0;
uint64_t prevPC = 0;
uint64_t currFP = 0;
uint64_t prevFP = 0;
uint64_t rsp = 0;
uint64_t kernStackMin = min_valid_stack_address();
uint64_t kernStackMax = max_valid_stack_address();
uint64_t *buffer = callstack;
int bufferIndex = 0;
int bufferMaxIndex = *count;
boolean_t supervisor = FALSE;
boolean_t is64bit = FALSE;
void * t_regs;
if (user_only) {
if (task == kernel_task) {
return KERN_FAILURE;
}
t_regs = USER_STATE(thread);
if(is_saved_state64(t_regs)) {
void *int_state = current_cpu_datap()->cpu_int_state;
x86_saved_state64_t *s64 = saved_state64(t_regs);
if(int_state) { supervisor = !(t_regs == int_state && current_cpu_datap()->cpu_interrupt_level == 1);
} else {
if(s64) {
supervisor = ((s64->isf.cs & SEL_PL) != SEL_PL_U);
} else {
supervisor = FALSE;
}
}
is64bit = TRUE;
} else {
x86_saved_state32_t *regs;
regs = saved_state32(t_regs);
supervisor = ((regs->cs & SEL_PL) != SEL_PL_U);
is64bit = FALSE;
}
} else {
t_regs = current_cpu_datap()->cpu_int_state;
x86_saved_state32_t *regs;
regs = saved_state32(t_regs);
supervisor = ((regs->cs & SEL_PL) != SEL_PL_U);
is64bit = FALSE;
}
if(is64bit) {
x86_saved_state64_t *regs = saved_state64(t_regs);
if(user_only) {
if(task == kernel_task) {
return KERN_FAILURE;
}
regs = USER_REGS64(thread);
}
currPC = regs->isf.rip;
currFP = regs->rbp;
if(!currPC)
{
*count = 0;
return KERN_FAILURE;
}
bufferIndex = 0;
if(!supervisor)
bufferMaxIndex = bufferMaxIndex - 1;
if(bufferMaxIndex < 1) {
*count = 0;
return KERN_RESOURCE_SHORTAGE;
}
buffer[bufferIndex++] = currPC;
while (VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax))
{
uint64_t caller = currFP + sizeof(uint64_t);
if(!currFP) {
currPC = 0;
break;
}
if(bufferIndex >= bufferMaxIndex) {
*count = bufferMaxIndex;
return KERN_RESOURCE_SHORTAGE;
}
kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t));
if(kr != KERN_SUCCESS) {
currPC = 0;
break;
}
prevFP = 0;
kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t));
if(kr != KERN_SUCCESS) {
currPC = 0;
break;
}
if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) {
buffer[bufferIndex++] = currPC;
prevPC = currPC;
}
if(prevFP < currFP) {
break;
} else {
currFP = prevFP;
}
}
kr = chudxnu_task_read(task, &rsp, (addr64_t) regs->isf.rsp, sizeof(uint64_t));
if(kr == KERN_SUCCESS) {
buffer[bufferIndex++] = rsp;
}
} else {
uint32_t tmpWord = 0;
x86_saved_state32_t *regs = NULL;
if(user_only) {
if(task == kernel_task || supervisor) {
return 0x11;
}
regs = USER_REGS32(thread);
} else {
regs = saved_state32(current_cpu_datap()->cpu_int_state);
}
if(regs == NULL) {
*count = 0;
return 0x12;
}
currPC = (uint64_t) regs->eip;
currFP = (uint64_t) regs->ebp;
bufferIndex = 0;
if(bufferMaxIndex < 1) {
*count = 0;
return KERN_RESOURCE_SHORTAGE;
}
buffer[bufferIndex++] = currPC;
while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax))
{
cframe_t *fp = (cframe_t *) (uint32_t) currFP;
if(bufferIndex >= bufferMaxIndex) {
*count = bufferMaxIndex;
return KERN_RESOURCE_SHORTAGE;
}
if(supervisor) {
kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
} else {
kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t));
}
if(kr != KERN_SUCCESS) {
currPC = 0;
break;
}
currPC = (uint64_t) tmpWord;
prevFP = 0;
if(supervisor) {
kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
} else {
kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t));
}
prevFP = (uint64_t) tmpWord;
if(prevFP) {
buffer[bufferIndex++] = currPC;
prevPC = currPC;
}
if(prevFP < currFP) {
break;
} else {
currFP = prevFP;
}
}
if(!supervisor) {
kr = chudxnu_task_read(task, &tmpWord, regs->uesp, sizeof(uint32_t));
if(kr == KERN_SUCCESS) {
rsp = (uint64_t) tmpWord; buffer[bufferIndex++] = rsp;
}
}
}
*count = bufferIndex;
return ret;
}
__private_extern__ kern_return_t
chudxnu_thread_get_callstack(
thread_t thread,
uint32_t *callStack,
mach_msg_type_number_t *count,
boolean_t user_only)
{
kern_return_t kr;
task_t task = thread->task;
uint32_t currPC;
uint32_t currFP;
uint32_t prevFP = 0;
uint32_t prevPC = 0;
uint32_t esp = 0;
uint32_t kernStackMin = min_valid_stack_address();
uint32_t kernStackMax = max_valid_stack_address();
uint32_t *buffer = callStack;
int bufferIndex = 0;
int bufferMaxIndex = *count;
boolean_t supervisor;
x86_saved_state32_t *regs = NULL;
if (user_only) {
if (task == kernel_task) {
return KERN_FAILURE;
}
regs = USER_REGS32(thread);
} else {
regs = saved_state32(current_cpu_datap()->cpu_int_state);
}
if (regs == NULL) {
*count = 0;
return KERN_FAILURE;
}
supervisor = ((regs->cs & SEL_PL) != SEL_PL_U);
currPC = regs->eip;
currFP = regs->ebp;
bufferIndex = 0;
if(!supervisor)
bufferMaxIndex -= 1; if (bufferMaxIndex < 1) {
*count = 0;
return KERN_RESOURCE_SHORTAGE;
}
buffer[bufferIndex++] = currPC;
while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) {
cframe_t *fp = (cframe_t *) currFP;
if (bufferIndex >= bufferMaxIndex) {
*count = bufferMaxIndex;
return KERN_RESOURCE_SHORTAGE;
}
if (supervisor) {
kr = chudxnu_kern_read(
&currPC,
(vm_offset_t) &fp->caller,
sizeof(currPC));
} else {
kr = chudxnu_task_read(
task,
&currPC,
(vm_offset_t) &fp->caller,
sizeof(currPC));
}
if (kr != KERN_SUCCESS)
break;
prevFP = 0;
if (supervisor) {
kr = chudxnu_kern_read(
&prevFP,
(vm_offset_t) &fp->prev,
sizeof(prevFP));
} else {
kr = chudxnu_task_read(
task,
&prevFP,
(vm_offset_t) &fp->prev,
sizeof(prevFP));
}
if (prevFP) {
buffer[bufferIndex++] = currPC;
prevPC = currPC;
}
if (prevFP < currFP) {
break;
} else {
currFP = prevFP;
}
}
if(!supervisor) {
kr = chudxnu_task_read(task, &esp, regs->uesp, sizeof(uint32_t));
if(kr == KERN_SUCCESS) {
buffer[bufferIndex++] = esp;
}
}
*count = bufferIndex;
return KERN_SUCCESS;
}
#pragma mark **** DEPRECATED ****
__private_extern__
kern_return_t chudxnu_bind_current_thread(int cpu)
{
return chudxnu_bind_thread(current_thread(), cpu);
}
kern_return_t chudxnu_unbind_current_thread(void)
{
return chudxnu_unbind_thread(current_thread());
}
__private_extern__
kern_return_t chudxnu_current_thread_get_callstack(
uint32_t *callStack,
mach_msg_type_number_t *count,
boolean_t user_only)
{
return chudxnu_thread_get_callstack(
current_thread(), callStack, count, user_only);
}
__private_extern__
thread_t chudxnu_current_act(void)
{
return chudxnu_current_thread();
}