#include <mach/mach_types.h>
#include <mach/kern_return.h>
#include <kern/host.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <ppc/PseudoKernel.h>
#include <ppc/exception.h>
#include <ppc/misc_protos.h>
#include <ppc/proc_reg.h>
#include <vm/vm_kern.h>
void bbSetRupt(ReturnHandler *rh, thread_act_t ct);
kern_return_t syscall_notify_interrupt ( void ) {
UInt32 interruptState;
task_t task;
spl_t s;
thread_act_t act, fact;
thread_t thread;
bbRupt *bbr;
BTTD_t *bttd;
int i;
task = current_task();
task_lock(task);
fact = (thread_act_t)task->thr_acts.next;
act = 0;
for(i = 0; i < task->thr_act_count; i++) {
if(fact->mact.bbDescAddr) {
bttd = (BTTD_t *)(fact->mact.bbDescAddr & -PAGE_SIZE);
if(bttd->InterruptVector) {
act = fact;
break;
}
}
fact = (thread_act_t)fact->thr_acts.next;
}
if(!act) {
task_unlock(task);
return KERN_FAILURE;
}
act_lock_thread(act);
task_unlock(task);
if ( (unsigned int)act == (unsigned int)current_act() ) {
bttd->InterruptControlWord = bttd->InterruptControlWord |
((bttd->postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask);
act_unlock_thread(act);
return KERN_SUCCESS;
}
if(act->mact.emPendRupts >= 16) {
act_unlock_thread(act);
return KERN_RESOURCE_SHORTAGE;
}
if(!(bbr = (bbRupt *)kalloc(sizeof(bbRupt)))) {
act_unlock_thread(act);
return KERN_RESOURCE_SHORTAGE;
}
(void)hw_atomic_add(&act->mact.emPendRupts, 1);
bbr->rh.handler = bbSetRupt;
bbr->rh.next = act->handlers;
act->handlers = &bbr->rh;
s = splsched();
act_set_apc(act);
splx(s);
act_unlock_thread(act);
return KERN_SUCCESS;
}
void bbSetRupt(ReturnHandler *rh, thread_act_t act) {
savearea *sv;
BTTD_t *bttd;
bbRupt *bbr;
UInt32 interruptState;
bbr = (bbRupt *)rh;
if(!(act->mact.bbDescAddr)) {
kfree((vm_offset_t)bbr, sizeof(bbRupt));
return;
}
(void)hw_atomic_sub(&act->mact.emPendRupts, 1);
if(!(sv = (savearea *)find_user_regs(act))) {
kfree((vm_offset_t)bbr, sizeof(bbRupt));
return;
}
bttd = (BTTD_t *)(act->mact.bbDescAddr & -PAGE_SIZE);
interruptState = (bttd->InterruptControlWord & kInterruptStateMask) >> kInterruptStateShift;
switch (interruptState) {
case kInSystemContext:
sv->save_cr |= bttd->postIntMask;
break;
case kInAlternateContext:
bttd->InterruptControlWord = (bttd->InterruptControlWord & ~kInterruptStateMask) |
(kInPseudoKernel << kInterruptStateShift);
bttd->exceptionInfo.srr0 = sv->save_srr0;
sv->save_srr0 = bttd->InterruptVector;
bttd->exceptionInfo.sprg1 = sv->save_r1;
sv->save_r1 = bttd->exceptionInfo.sprg0;
bttd->exceptionInfo.srr1 = sv->save_srr1;
sv->save_srr1 &= ~(MASK(MSR_BE)|MASK(MSR_SE));
act->mact.specFlags &= ~bbNoMachSC;
disable_preemption();
per_proc_info[cpu_number()].spcFlags = act->mact.specFlags;
enable_preemption();
case kInExceptionHandler:
case kInPseudoKernel:
case kOutsideBlue:
bttd->InterruptControlWord = bttd->InterruptControlWord |
((bttd->postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask);
break;
default:
break;
}
kfree((vm_offset_t)bbr, sizeof(bbRupt));
return;
}
kern_return_t enable_bluebox(
host_t host,
void *taskID,
void *TWI_TableStart,
char *Desc_TableStart
) {
thread_t th;
vm_offset_t kerndescaddr, physdescaddr, origdescoffset;
kern_return_t ret;
th = current_thread();
if ( host == HOST_NULL ) return KERN_INVALID_HOST;
if ( ! is_suser() ) return KERN_FAILURE;
if ( th->top_act->mact.bbDescAddr ) return KERN_FAILURE;
if ( ! (unsigned int) Desc_TableStart ) return KERN_FAILURE;
if ( ! TWI_TableStart ) return KERN_FAILURE;
origdescoffset = (vm_offset_t)Desc_TableStart & (PAGE_SIZE - 1);
Desc_TableStart = (char *)((vm_offset_t)Desc_TableStart & -PAGE_SIZE);
ret = vm_map_wire(th->top_act->map,
(vm_offset_t)Desc_TableStart,
(vm_offset_t)Desc_TableStart + PAGE_SIZE,
VM_PROT_READ | VM_PROT_WRITE,
FALSE);
if(ret != KERN_SUCCESS) {
return KERN_FAILURE;
}
physdescaddr =
pmap_extract(th->top_act->map->pmap, (vm_offset_t) Desc_TableStart);
ret = kmem_alloc_pageable(kernel_map, &kerndescaddr, PAGE_SIZE);
if(ret != KERN_SUCCESS) {
(void) vm_map_unwire(th->top_act->map,
(vm_offset_t)Desc_TableStart,
(vm_offset_t)Desc_TableStart + PAGE_SIZE,
TRUE);
return KERN_FAILURE;
}
(void) pmap_enter(kernel_pmap,
kerndescaddr, physdescaddr, VM_PROT_READ|VM_PROT_WRITE,
TRUE);
th->top_act->mact.bbDescAddr = (unsigned int)kerndescaddr+origdescoffset;
th->top_act->mact.bbUserDA = (unsigned int)Desc_TableStart;
th->top_act->mact.bbTableStart = (unsigned int)TWI_TableStart;
th->top_act->mact.bbTaskID = (unsigned int)taskID;
th->top_act->mact.bbTaskEnv = 0;
th->top_act->mact.emPendRupts = 0;
th->top_act->mact.specFlags &= ~(bbNoMachSC | bbPreemptive);
th->top_act->mact.specFlags |= bbThread;
if(!(((BTTD_t *)kerndescaddr)->InterruptVector)) {
th->top_act->mact.specFlags |= bbPreemptive;
}
disable_preemption();
per_proc_info[cpu_number()].spcFlags = th->top_act->mact.specFlags;
enable_preemption();
{
extern void tbeproc(void *proc);
tbeproc(th->top_act->task->bsd_info);
}
return KERN_SUCCESS;
}
kern_return_t disable_bluebox( host_t host ) {
thread_act_t act;
act = current_act();
if (host == HOST_NULL) return KERN_INVALID_HOST;
if(!is_suser()) return KERN_FAILURE;
if(!act->mact.bbDescAddr) return KERN_FAILURE;
disable_bluebox_internal(act);
return KERN_SUCCESS;
}
void disable_bluebox_internal(thread_act_t act) {
(void) vm_map_unwire(act->map,
(vm_offset_t)act->mact.bbUserDA,
(vm_offset_t)act->mact.bbUserDA + PAGE_SIZE,
FALSE);
kmem_free(kernel_map, (vm_offset_t)act->mact.bbDescAddr & -PAGE_SIZE, PAGE_SIZE);
act->mact.bbDescAddr = 0;
act->mact.bbUserDA = 0;
act->mact.bbTableStart = 0;
act->mact.bbTaskID = 0;
act->mact.bbTaskEnv = 0;
act->mact.emPendRupts = 0;
act->mact.specFlags &= ~(bbNoMachSC | bbPreemptive | bbThread);
disable_preemption();
per_proc_info[cpu_number()].spcFlags = act->mact.specFlags;
enable_preemption();
return;
}
int bb_enable_bluebox( struct savearea *save )
{
kern_return_t rc;
rc = enable_bluebox( (host_t)0xFFFFFFFF, (void *)save->save_r3, (void *)save->save_r4, (char *)save->save_r5 );
save->save_r3 = rc;
return 1;
}
int bb_disable_bluebox( struct savearea *save )
{
kern_return_t rc;
rc = disable_bluebox( (host_t)0xFFFFFFFF );
save->save_r3 = rc;
return 1;
}
int bb_settaskenv( struct savearea *save )
{
int i;
task_t task;
thread_act_t act, fact;
task = current_task();
task_lock(task);
fact = (thread_act_t)task->thr_acts.next;
act = 0;
for(i = 0; i < task->thr_act_count; i++) {
if(fact->mact.bbDescAddr) {
if ( fact->mact.bbTaskID == save->save_r3 ) {
act = fact;
break;
}
}
fact = (thread_act_t)fact->thr_acts.next;
}
if ( !act || !act->active) {
task_unlock(task);
goto failure;
}
act_lock_thread(act);
task_unlock(task);
act->mact.bbTaskEnv = save->save_r4;
if(act == current_act()) {
disable_preemption();
per_proc_info[cpu_number()].spcFlags = act->mact.specFlags;
enable_preemption();
}
act_unlock_thread(act);
save->save_r3 = 0;
return 1;
failure:
save->save_r3 = -1;
return 1;
}