#include <mach/mach_types.h>
#include <mach/kern_return.h>
#include <kern/kalloc.h>
#include <kern/kern_types.h>
#include <kern/host.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <ppc/PseudoKernel.h>
#include <ppc/exception.h>
#include <ppc/misc_protos.h>
#include <ppc/proc_reg.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
void bbSetRupt(ReturnHandler *rh, thread_t ct);
kern_return_t syscall_notify_interrupt ( void ) {
UInt32 interruptState;
task_t task;
thread_t act, fact;
bbRupt *bbr;
BTTD_t *bttd;
int i;
task = current_task();
task_lock(task);
fact = (thread_t)task->threads.next;
act = 0;
for(i = 0; i < task->thread_count; i++) {
if(fact->machine.bbDescAddr) {
bttd = (BTTD_t *)(fact->machine.bbDescAddr & -PAGE_SIZE);
if(bttd->InterruptVector) {
act = fact;
break;
}
}
fact = (thread_t)fact->task_threads.next;
}
if(!act) {
task_unlock(task);
return KERN_FAILURE;
}
thread_reference(act);
task_unlock(task);
thread_mtx_lock(act);
if ( (unsigned int)act == (unsigned int)current_thread() ) {
bttd->InterruptControlWord = bttd->InterruptControlWord |
((bttd->postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask);
thread_mtx_unlock(act);
thread_deallocate(act);
return KERN_SUCCESS;
}
if(act->machine.emPendRupts >= 16) {
thread_mtx_unlock(act);
thread_deallocate(act);
return KERN_RESOURCE_SHORTAGE;
}
if(!(bbr = (bbRupt *)kalloc(sizeof(bbRupt)))) {
thread_mtx_unlock(act);
thread_deallocate(act);
return KERN_RESOURCE_SHORTAGE;
}
(void)hw_atomic_add(&act->machine.emPendRupts, 1);
bbr->rh.handler = bbSetRupt;
bbr->rh.next = act->handlers;
act->handlers = &bbr->rh;
act_set_apc(act);
thread_mtx_unlock(act);
thread_deallocate(act);
return KERN_SUCCESS;
}
void bbSetRupt(ReturnHandler *rh, thread_t act) {
savearea *sv;
BTTD_t *bttd;
bbRupt *bbr;
UInt32 interruptState;
bbr = (bbRupt *)rh;
if(!(act->machine.bbDescAddr)) {
kfree(bbr, sizeof(bbRupt));
return;
}
(void)hw_atomic_sub(&act->machine.emPendRupts, 1);
if(!(sv = find_user_regs(act))) {
kfree(bbr, sizeof(bbRupt));
return;
}
bttd = (BTTD_t *)(act->machine.bbDescAddr & -PAGE_SIZE);
interruptState = (bttd->InterruptControlWord & kInterruptStateMask) >> kInterruptStateShift;
switch (interruptState) {
case kInSystemContext:
sv->save_cr |= bttd->postIntMask;
break;
case kInAlternateContext:
bttd->InterruptControlWord = (bttd->InterruptControlWord & ~kInterruptStateMask) |
(kInPseudoKernel << kInterruptStateShift);
bttd->exceptionInfo.srr0 = (unsigned int)sv->save_srr0;
sv->save_srr0 = (uint64_t)act->machine.bbInterrupt;
bttd->exceptionInfo.sprg1 = (unsigned int)sv->save_r1;
sv->save_r1 = (uint64_t)bttd->exceptionInfo.sprg0;
bttd->exceptionInfo.srr1 = (unsigned int)sv->save_srr1;
sv->save_srr1 &= ~(MASK(MSR_BE)|MASK(MSR_SE));
act->machine.specFlags &= ~bbNoMachSC;
disable_preemption();
getPerProc()->spcFlags = act->machine.specFlags;
enable_preemption();
case kInExceptionHandler:
case kInPseudoKernel:
case kOutsideBlue:
bttd->InterruptControlWord = bttd->InterruptControlWord |
((bttd->postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask);
break;
default:
break;
}
kfree(bbr, sizeof(bbRupt));
return;
}
kern_return_t enable_bluebox(
host_t host,
void *taskID,
void *TWI_TableStart,
char *Desc_TableStart
) {
thread_t th;
vm_offset_t kerndescaddr, origdescoffset;
kern_return_t ret;
ppnum_t physdescpage;
BTTD_t *bttd;
th = current_thread();
if ( host == HOST_NULL ) return KERN_INVALID_HOST;
if ( ! is_suser() ) return KERN_FAILURE;
if ( th->machine.bbDescAddr ) return KERN_FAILURE;
if ( ! (unsigned int) Desc_TableStart ) return KERN_FAILURE;
if ( ! TWI_TableStart ) return KERN_FAILURE;
origdescoffset = (vm_offset_t)Desc_TableStart & (PAGE_SIZE - 1);
Desc_TableStart = (char *)((vm_offset_t)Desc_TableStart & -PAGE_SIZE);
ret = vm_map_wire(th->map,
(vm_offset_t)Desc_TableStart,
(vm_offset_t)Desc_TableStart + PAGE_SIZE,
VM_PROT_READ | VM_PROT_WRITE,
FALSE);
if(ret != KERN_SUCCESS) {
return KERN_FAILURE;
}
physdescpage =
pmap_find_phys(th->map->pmap, (addr64_t)Desc_TableStart);
ret = kmem_alloc_pageable(kernel_map, &kerndescaddr, PAGE_SIZE);
if(ret != KERN_SUCCESS) {
(void) vm_map_unwire(th->map,
(vm_offset_t)Desc_TableStart,
(vm_offset_t)Desc_TableStart + PAGE_SIZE,
TRUE);
return KERN_FAILURE;
}
(void) pmap_enter(kernel_pmap,
kerndescaddr, physdescpage, VM_PROT_READ|VM_PROT_WRITE,
VM_WIMG_USE_DEFAULT, TRUE);
bttd = (BTTD_t *)kerndescaddr;
th->machine.bbDescAddr = (unsigned int)kerndescaddr+origdescoffset;
th->machine.bbUserDA = (unsigned int)Desc_TableStart;
th->machine.bbTableStart = (unsigned int)TWI_TableStart;
th->machine.bbTaskID = (unsigned int)taskID;
th->machine.bbTaskEnv = 0;
th->machine.emPendRupts = 0;
th->machine.bbTrap = bttd->TrapVector;
th->machine.bbSysCall = bttd->SysCallVector;
th->machine.bbInterrupt = bttd->InterruptVector;
th->machine.bbPending = bttd->PendingIntVector;
th->machine.specFlags &= ~(bbNoMachSC | bbPreemptive);
th->machine.specFlags |= bbThread;
if(!(bttd->InterruptVector)) {
th->machine.specFlags |= bbPreemptive;
}
disable_preemption();
getPerProc()->spcFlags = th->machine.specFlags;
enable_preemption();
{
extern void tbeproc(void *proc);
tbeproc(th->task->bsd_info);
}
return KERN_SUCCESS;
}
kern_return_t disable_bluebox( host_t host ) {
thread_t act;
act = current_thread();
if (host == HOST_NULL) return KERN_INVALID_HOST;
if(!is_suser()) return KERN_FAILURE;
if(!act->machine.bbDescAddr) return KERN_FAILURE;
disable_bluebox_internal(act);
return KERN_SUCCESS;
}
void disable_bluebox_internal(thread_t act) {
(void) vm_map_unwire(act->map,
(vm_offset_t)act->machine.bbUserDA,
(vm_offset_t)act->machine.bbUserDA + PAGE_SIZE,
FALSE);
kmem_free(kernel_map, (vm_offset_t)act->machine.bbDescAddr & -PAGE_SIZE, PAGE_SIZE);
act->machine.bbDescAddr = 0;
act->machine.bbUserDA = 0;
act->machine.bbTableStart = 0;
act->machine.bbTaskID = 0;
act->machine.bbTaskEnv = 0;
act->machine.emPendRupts = 0;
act->machine.specFlags &= ~(bbNoMachSC | bbPreemptive | bbThread);
disable_preemption();
getPerProc()->spcFlags = act->machine.specFlags;
enable_preemption();
return;
}
int bb_enable_bluebox( struct savearea *save )
{
kern_return_t rc;
rc = enable_bluebox( (host_t)0xFFFFFFFF, (void *)save->save_r3, (void *)save->save_r4, (char *)save->save_r5 );
save->save_r3 = rc;
return 1;
}
int bb_disable_bluebox( struct savearea *save )
{
kern_return_t rc;
rc = disable_bluebox( (host_t)0xFFFFFFFF );
save->save_r3 = rc;
return 1;
}
int bb_settaskenv( struct savearea *save )
{
int i;
task_t task;
thread_t act, fact;
task = current_task();
task_lock(task);
fact = (thread_t)task->threads.next;
act = 0;
for(i = 0; i < task->thread_count; i++) {
if(fact->machine.bbDescAddr) {
if ( fact->machine.bbTaskID == save->save_r3 ) {
act = fact;
break;
}
}
fact = (thread_t)fact->task_threads.next;
}
if ( !act || !act->active) {
task_unlock(task);
save->save_r3 = -1;
return 1;
}
thread_reference(act);
task_unlock(task);
thread_mtx_lock(act);
act->machine.bbTaskEnv = save->save_r4;
if(act == current_thread()) {
disable_preemption();
getPerProc()->ppbbTaskEnv = act->machine.bbTaskEnv;
enable_preemption();
}
thread_mtx_unlock(act);
thread_deallocate(act);
save->save_r3 = 0;
return 1;
}