#include <cpus.h>
#include <debug.h>
#include <types.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/thread_act.h>
#include <kern/thread_swap.h>
#include <mach/thread_status.h>
#include <vm/vm_kern.h>
#include <kern/mach_param.h>
#include <kern/misc_protos.h>
#include <ppc/misc_protos.h>
#include <ppc/exception.h>
#include <ppc/proc_reg.h>
#include <kern/spl.h>
#include <ppc/pmap.h>
#include <ppc/trap.h>
#include <ppc/mappings.h>
#include <ppc/savearea.h>
#include <ppc/Firmware.h>
#include <ppc/asm.h>
#include <ppc/thread_act.h>
#include <ppc/vmachmon.h>
#include <ppc/low_trace.h>
#include <sys/kdebug.h>
extern int real_ncpus;
extern struct Saveanchor saveanchor;
void machine_act_terminate(thread_act_t act);
#define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
#if DEBUG
int fpu_trap_count = 0;
int fpu_switch_count = 0;
int vec_trap_count = 0;
int vec_switch_count = 0;
#endif
void
consider_machine_collect()
{
return;
}
void
consider_machine_adjust()
{
consider_mapping_adjust();
}
thread_t
machine_switch_context(
thread_t old,
thread_continue_t continuation,
thread_t new)
{
register thread_act_t old_act = old->top_act, new_act = new->top_act;
register thread_t retval;
pmap_t new_pmap;
facility_context *fowner;
struct per_proc_info *ppinfo;
if (old == new)
panic("machine_switch_context");
ppinfo = getPerProc();
ppinfo->old_thread = (unsigned int)old;
ppinfo->cpu_flags &= ~traceBE;
check_simple_locks();
if(real_ncpus > 1) {
fowner = ppinfo->FPU_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
fpu_save(fowner);
}
}
fowner = ppinfo->VMX_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
vec_save(fowner);
}
}
}
if(old_act->mact.specFlags & runningVM) {
old_act->mact.specFlags &= ~(userProtKey|FamVMmode);
old_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
}
old_act->mact.specFlags &= ~OnProc;
new_act->mact.specFlags |= OnProc;
if(new_act->mact.specFlags & runningVM) {
pmap_switch(new_act->mact.vmmCEntry->vmmPmap);
ppinfo->VMMareaPhys = new_act->mact.vmmCEntry->vmmContextPhys;
ppinfo->VMMXAFlgs = new_act->mact.vmmCEntry->vmmXAFlgs;
ppinfo->FAMintercept = new_act->mact.vmmCEntry->vmmFAMintercept;
}
else {
new_pmap = new_act->task->map->pmap;
if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
if(old_act->mact.cioSpace != invalSpace) {
old_act->mact.cioSpace |= cioSwitchAway;
hw_blow_seg(copyIOaddr);
hw_blow_seg(copyIOaddr + 0x10000000ULL);
}
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
old->reason, (int)new, old->sched_pri, new->sched_pri, 0);
retval = Switch_context(old, continuation, new);
assert(retval != (struct thread_shuttle*)NULL);
if (branch_tracing_enabled()) {
ppinfo = getPerProc();
ppinfo->cpu_flags |= traceBE;
}
return retval;
}
kern_return_t
machine_thread_create(
thread_t thread,
task_t task)
{
savearea *sv;
unsigned int *CIsTooLimited, i;
hw_atomic_add((uint32_t *)&saveanchor.savetarget, 4);
assert(thread->mact.pcb == (savearea *)0);
sv = save_alloc();
bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(savearea) - sizeof(savearea_comm)));
sv->save_hdr.save_prev = 0;
sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft);
sv->save_hdr.save_act = (struct thread_activation *)thread;
thread->mact.pcb = sv;
thread->mact.curctx = &thread->mact.facctx;
thread->mact.facctx.facAct = thread;
thread->mact.cioSpace = invalSpace;
thread->mact.preemption_count = 0;
thread->mact.upcb = sv;
sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET;
sv->save_fpscr = 0;
sv->save_vrsave = 0;
sv->save_vscr[0] = 0x00000000;
sv->save_vscr[1] = 0x00000000;
sv->save_vscr[2] = 0x00000000;
sv->save_vscr[3] = 0x00010000;
return(KERN_SUCCESS);
}
void
machine_thread_destroy(
thread_t thread)
{
register savearea *pcb, *ppsv;
register savearea_vec *vsv, *vpsv;
register savearea_fpu *fsv, *fpsv;
register savearea *svp;
register int i;
machine_act_terminate(thread);
toss_live_vec(thread->mact.curctx);
vsv = thread->mact.curctx->VMXsave;
while(vsv) {
vpsv = vsv;
vsv = CAST_DOWN(savearea_vec *, vsv->save_hdr.save_prev);
save_release((savearea *)vpsv);
}
thread->mact.curctx->VMXsave = 0;
toss_live_fpu(thread->mact.curctx);
fsv = thread->mact.curctx->FPUsave;
while(fsv) {
fpsv = fsv;
fsv = CAST_DOWN(savearea_fpu *, fsv->save_hdr.save_prev);
save_release((savearea *)fpsv);
}
thread->mact.curctx->FPUsave = 0;
pcb = thread->mact.pcb;
while(pcb) {
ppsv = pcb;
pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev);
save_release(ppsv);
}
hw_atomic_sub((uint32_t *)&saveanchor.savetarget, 4);
}
int switch_act_swapins = 0;
void
machine_switch_act(
thread_t thread,
thread_act_t old,
thread_act_t new)
{
pmap_t new_pmap;
facility_context *fowner;
struct per_proc_info *ppinfo;
ppinfo = getPerProc();
if(real_ncpus > 1) {
fowner = ppinfo->FPU_owner;
if(fowner) {
if(fowner->facAct == old) {
fpu_save(fowner);
}
}
fowner = ppinfo->VMX_owner;
if(fowner) {
if(fowner->facAct == old) {
vec_save(fowner);
}
}
}
old->mact.cioSpace |= cioSwitchAway;
ast_context(new, cpu_number());
if(new->mact.specFlags & runningVM) {
pmap_switch(new->mact.vmmCEntry->vmmPmap);
}
else {
new_pmap = new->task->map->pmap;
if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
}
void
act_machine_sv_free(thread_act_t act)
{
register savearea *pcb, *userpcb;
register savearea_vec *vsv, *vpst, *vsvt;
register savearea_fpu *fsv, *fpst, *fsvt;
register savearea *svp;
register int i;
if(act->mact.curctx->VMXlevel) {
toss_live_vec(act->mact.curctx);
vsv = act->mact.curctx->VMXsave;
while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev;
if(!hw_lock_to((hw_lock_t)&act->mact.curctx->VMXsync, LockTimeOut)) {
panic("act_machine_sv_free - timeout getting VMX sync lock\n");
}
vsvt = act->mact.curctx->VMXsave;
act->mact.curctx->VMXsave = vsv;
act->mact.curctx->VMXlevel = 0;
hw_lock_unlock((hw_lock_t)&act->mact.curctx->VMXsync);
while(vsvt) {
if (vsvt == vsv) break;
vpst = vsvt;
vsvt = (savearea_vec *)vsvt->save_hdr.save_prev;
save_ret((savearea *)vpst);
}
}
if(act->mact.curctx->FPUlevel) {
toss_live_fpu(act->mact.curctx);
fsv = act->mact.curctx->FPUsave;
while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev;
if(!hw_lock_to((hw_lock_t)&act->mact.curctx->FPUsync, LockTimeOut)) {
panic("act_machine_sv_free - timeout getting FPU sync lock\n");
}
fsvt = act->mact.curctx->FPUsave;
act->mact.curctx->FPUsave = fsv;
act->mact.curctx->FPUlevel = 0;
hw_lock_unlock((hw_lock_t)&act->mact.curctx->FPUsync);
while(fsvt) {
if (fsvt == fsv) break;
fpst = fsvt;
fsvt = (savearea_fpu *)fsvt->save_hdr.save_prev;
save_ret((savearea *)fpst);
}
}
pcb = act->mact.pcb;
userpcb = 0;
while(pcb) {
if (pcb->save_srr1 & MASK(MSR_PR)) {
userpcb = pcb;
break;
}
svp = pcb;
pcb = CAST_DOWN(savearea *, pcb->save_hdr.save_prev);
save_ret(svp);
}
act->mact.pcb = userpcb;
}
void
machine_thread_set_current(thread_t thread)
{
set_machine_current_act(thread->top_act);
}
void
machine_act_terminate(
thread_act_t act)
{
if(act->mact.bbDescAddr) {
disable_bluebox_internal(act);
}
if(act->mact.vmmControl) {
vmm_tear_down_all(act);
}
}
void
machine_thread_terminate_self(void)
{
machine_act_terminate(current_act());
}
void
machine_thread_init(void)
{
#ifdef MACHINE_STACK
#if KERNEL_STACK_SIZE > PPC_PGBYTES
panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
#endif
#endif
}
#if MACH_ASSERT
void
dump_thread(thread_t th)
{
printf(" thread @ 0x%x:\n", th);
}
int
dump_act(thread_act_t thr_act)
{
if (!thr_act)
return(0);
printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
thr_act, thr_act->ref_count,
thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0,
thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
printf("\tsusp=%x active=%x hi=%x lo=%x\n",
0 , 0 ,
thr_act->suspend_count, thr_act->active,
thr_act->higher, thr_act->lower);
return((int)thr_act);
}
#endif
unsigned int
get_useraddr()
{
return(current_act()->mact.upcb->save_srr0);
}
vm_offset_t
machine_stack_detach(
thread_t thread)
{
vm_offset_t stack;
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
thread, thread->priority,
thread->sched_pri, 0, 0);
if (thread->top_act)
act_machine_sv_free(thread->top_act);
stack = thread->kernel_stack;
thread->kernel_stack = 0;
return(stack);
}
void
machine_stack_attach(
thread_t thread,
vm_offset_t stack,
void (*start)(thread_t))
{
thread_act_t thr_act;
unsigned int *kss;
struct savearea *sv;
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
thread, thread->priority,
thread->sched_pri, start,
0);
assert(stack);
kss = (unsigned int *)STACK_IKS(stack);
thread->kernel_stack = stack;
if ((thr_act = thread->top_act) != 0) {
sv = save_get();
sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft);
sv->save_hdr.save_act = (struct thread_activation *)thr_act;
sv->save_hdr.save_prev = (addr64_t)((uintptr_t)thr_act->mact.pcb);
thr_act->mact.pcb = sv;
sv->save_srr0 = (unsigned int) start;
sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
sv->save_fpscr = 0;
sv->save_vrsave = 0;
sv->save_vscr[3] = 0x00010000;
*(CAST_DOWN(int *, sv->save_r1)) = 0;
thr_act->mact.ksp = 0;
}
return;
}
void
machine_stack_handoff(
thread_t old,
thread_t new)
{
vm_offset_t stack;
pmap_t new_pmap;
facility_context *fowner;
mapping *mp;
struct per_proc_info *ppinfo;
assert(new->top_act);
assert(old->top_act);
if (old == new)
panic("machine_stack_handoff");
stack = machine_stack_detach(old);
new->kernel_stack = stack;
if (stack == old->reserved_stack) {
assert(new->reserved_stack);
old->reserved_stack = new->reserved_stack;
new->reserved_stack = stack;
}
ppinfo = getPerProc();
ppinfo->cpu_flags &= ~traceBE;
if(real_ncpus > 1) {
fowner = ppinfo->FPU_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
fpu_save(fowner);
}
}
fowner = ppinfo->VMX_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
vec_save(fowner);
}
}
}
if(old->top_act->mact.specFlags & runningVM) {
old->top_act->mact.specFlags &= ~(userProtKey|FamVMmode);
old->top_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
}
old->top_act->mact.specFlags &= ~OnProc;
new->top_act->mact.specFlags |= OnProc;
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
old->reason, (int)new, old->sched_pri, new->sched_pri, 0);
if(new->top_act->mact.specFlags & runningVM) {
pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap);
ppinfo->VMMareaPhys = new->top_act->mact.vmmCEntry->vmmContextPhys;
ppinfo->VMMXAFlgs = new->top_act->mact.vmmCEntry->vmmXAFlgs;
ppinfo->FAMintercept = new->top_act->mact.vmmCEntry->vmmFAMintercept;
}
else {
new_pmap = new->top_act->task->map->pmap;
if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
machine_thread_set_current(new);
ppinfo->Uassist = new->top_act->mact.cthread_self;
ppinfo->ppbbTaskEnv = new->top_act->mact.bbTaskEnv;
ppinfo->spcFlags = new->top_act->mact.specFlags;
old->top_act->mact.cioSpace |= cioSwitchAway;
mp = (mapping *)&ppinfo->ppCIOmp;
mp->mpSpace = invalSpace;
if (branch_tracing_enabled())
ppinfo->cpu_flags |= traceBE;
if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act, 0);
return;
}
void
call_continuation(void (*continuation)(void) )
{
unsigned int *kss;
vm_offset_t tsp;
assert(current_thread()->kernel_stack);
kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack);
assert(continuation);
tsp = (vm_offset_t)((int)kss - KF_SIZE);
assert(tsp);
*((int *)tsp) = 0;
Call_continuation(continuation, tsp);
return;
}