#include <cpus.h>
#include <debug.h>
#include <types.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/thread_act.h>
#include <kern/thread_swap.h>
#include <mach/thread_status.h>
#include <vm/vm_kern.h>
#include <kern/mach_param.h>
#include <kern/misc_protos.h>
#include <ppc/misc_protos.h>
#include <ppc/exception.h>
#include <ppc/proc_reg.h>
#include <kern/spl.h>
#include <ppc/pmap.h>
#include <ppc/trap.h>
#include <ppc/mappings.h>
#include <ppc/savearea.h>
#include <ppc/Firmware.h>
#include <ppc/asm.h>
#include <ppc/thread_act.h>
#include <ppc/vmachmon.h>
#include <ppc/low_trace.h>
#include <sys/kdebug.h>
extern int real_ncpus;
extern struct Saveanchor saveanchor;
#define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
#if DEBUG
int fpu_trap_count = 0;
int fpu_switch_count = 0;
int vec_trap_count = 0;
int vec_switch_count = 0;
#endif
extern struct thread_shuttle *Switch_context(
struct thread_shuttle *old,
void (*cont)(void),
struct thread_shuttle *new);
#if MACH_LDEBUG || MACH_KDB
void log_thread_action (char *, long, long, long);
#endif
void
consider_machine_collect()
{
return;
}
void
consider_machine_adjust()
{
consider_mapping_adjust();
}
void
machine_kernel_stack_init(
struct thread_shuttle *thread,
void (*start_pos)(thread_t))
{
vm_offset_t stack;
unsigned int *kss, *stck;
struct savearea *sv;
assert(thread->top_act->mact.pcb);
assert(thread->kernel_stack);
stack = thread->kernel_stack;
kss = (unsigned int *)STACK_IKS(stack);
sv = thread->top_act->mact.pcb;
sv->save_lr = (uint64_t) start_pos;
sv->save_srr0 = (uint64_t) start_pos;
sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
stck = (unsigned int *)((unsigned int)kss - KF_SIZE);
sv->save_r1 = (uint64_t)stck;
sv->save_fpscr = 0;
sv->save_vrsave = 0;
sv->save_vscr[3] = 0x00010000;
*stck = 0;
thread->top_act->mact.ksp = 0;
}
struct thread_shuttle*
switch_context(
struct thread_shuttle *old,
void (*continuation)(void),
struct thread_shuttle *new)
{
register thread_act_t old_act = old->top_act, new_act = new->top_act;
register struct thread_shuttle* retval;
pmap_t new_pmap;
facility_context *fowner;
struct per_proc_info *ppinfo;
#if MACH_LDEBUG || MACH_KDB
log_thread_action("switch",
(long)old,
(long)new,
(long)__builtin_return_address(0));
#endif
ppinfo = getPerProc();
ppinfo->old_thread = (unsigned int)old;
ppinfo->cpu_flags &= ~traceBE;
check_simple_locks();
if(real_ncpus > 1) {
fowner = ppinfo->FPU_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
fpu_save(fowner);
}
}
fowner = ppinfo->VMX_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
vec_save(fowner);
}
}
}
if(old_act->mact.specFlags & runningVM) {
old_act->mact.specFlags &= ~(userProtKey|FamVMmode);
old_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
}
if(new_act->mact.specFlags & runningVM) {
pmap_switch(new_act->mact.vmmCEntry->vmmPmap);
ppinfo->VMMareaPhys = new_act->mact.vmmCEntry->vmmContextPhys;
ppinfo->VMMXAFlgs = new_act->mact.vmmCEntry->vmmXAFlgs;
ppinfo->FAMintercept = new_act->mact.vmmCEntry->vmmFAMintercept;
}
else {
new_pmap = new_act->task->map->pmap;
if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
if(old_act->mact.cioSpace != invalSpace) {
old_act->mact.cioSpace |= cioSwitchAway;
hw_blow_seg(copyIOaddr);
hw_blow_seg(copyIOaddr + 0x10000000ULL);
}
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
(int)old, (int)new, old->sched_pri, new->sched_pri, 0);
retval = Switch_context(old, continuation, new);
assert(retval != (struct thread_shuttle*)NULL);
if (branch_tracing_enabled()) {
ppinfo = getPerProc();
ppinfo->cpu_flags |= traceBE;
}
return retval;
}
void
thread_set_syscall_return(
struct thread_shuttle *thread,
kern_return_t retval)
{
thread->top_act->mact.pcb->save_r3 = retval;
}
kern_return_t
thread_machine_create(
struct thread_shuttle *thread,
thread_act_t thr_act,
void (*start_pos)(thread_t))
{
savearea *sv;
unsigned int *CIsTooLimited, i;
hw_atomic_add(&saveanchor.savetarget, 4);
assert(thr_act->mact.pcb == (savearea *)0);
sv = save_alloc();
bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(savearea) - sizeof(savearea_comm)));
sv->save_hdr.save_prev = 0;
sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft);
sv->save_hdr.save_act = thr_act;
thr_act->mact.pcb = sv;
thr_act->mact.curctx = &thr_act->mact.facctx;
thr_act->mact.facctx.facAct = thr_act;
thr_act->mact.cioSpace = invalSpace;
thr_act->mact.preemption_count = 0;
sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET;
sv->save_fpscr = 0;
sv->save_vrsave = 0;
sv->save_vscr[0] = 0x00000000;
sv->save_vscr[1] = 0x00000000;
sv->save_vscr[2] = 0x00000000;
sv->save_vscr[3] = 0x00010000;
return(KERN_SUCCESS);
}
void
thread_machine_destroy( thread_t thread )
{
spl_t s;
if (thread->kernel_stack) {
s = splsched();
stack_free(thread);
splx(s);
}
}
void
thread_machine_flush( thread_act_t cur_act )
{
}
int switch_act_swapins = 0;
void
machine_switch_act(
thread_t thread,
thread_act_t old,
thread_act_t new,
int cpu)
{
pmap_t new_pmap;
facility_context *fowner;
struct per_proc_info *ppinfo;
ppinfo = getPerProc();
if(real_ncpus > 1) {
fowner = ppinfo->FPU_owner;
if(fowner) {
if(fowner->facAct == old) {
fpu_save(fowner);
}
}
fowner = ppinfo->VMX_owner;
if(fowner) {
if(fowner->facAct == old) {
vec_save(fowner);
}
}
}
old->mact.cioSpace |= cioSwitchAway;
active_stacks[cpu] = thread->kernel_stack;
ast_context(new, cpu);
if(new->mact.specFlags & runningVM) {
pmap_switch(new->mact.vmmCEntry->vmmPmap);
}
else {
new_pmap = new->task->map->pmap;
if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
}
void
pcb_user_to_kernel(thread_act_t act)
{
return;
}
void
act_machine_sv_free(thread_act_t act)
{
register savearea *pcb, *userpcb;
register savearea_vec *vsv, *vpst, *vsvt;
register savearea_fpu *fsv, *fpst, *fsvt;
register savearea *svp;
register int i;
if(act->mact.curctx->VMXlevel) {
toss_live_vec(act->mact.curctx);
vsv = act->mact.curctx->VMXsave;
while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev;
if(!hw_lock_to((hw_lock_t)&act->mact.curctx->VMXsync, LockTimeOut)) {
panic("act_machine_sv_free - timeout getting VMX sync lock\n");
}
vsvt = act->mact.curctx->VMXsave;
act->mact.curctx->VMXsave = vsv;
act->mact.curctx->VMXlevel = 0;
hw_lock_unlock((hw_lock_t)&act->mact.curctx->VMXsync);
while(vsvt) {
if (vsvt == vsv) break;
vpst = vsvt;
vsvt = (savearea_vec *)vsvt->save_hdr.save_prev;
save_ret((savearea *)vpst);
}
}
if(act->mact.curctx->FPUlevel) {
toss_live_fpu(act->mact.curctx);
fsv = act->mact.curctx->FPUsave;
while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev;
if(!hw_lock_to((hw_lock_t)&act->mact.curctx->FPUsync, LockTimeOut)) {
panic("act_machine_sv_free - timeout getting FPU sync lock\n");
}
fsvt = act->mact.curctx->FPUsave;
act->mact.curctx->FPUsave = fsv;
act->mact.curctx->FPUlevel = 0;
hw_lock_unlock((hw_lock_t)&act->mact.curctx->FPUsync);
while(fsvt) {
if (fsvt == fsv) break;
fpst = fsvt;
fsvt = (savearea_fpu *)fsvt->save_hdr.save_prev;
save_ret((savearea *)fpst);
}
}
pcb = act->mact.pcb;
userpcb = 0;
while(pcb) {
if (pcb->save_srr1 & MASK(MSR_PR)) {
userpcb = pcb;
break;
}
svp = pcb;
pcb = pcb->save_hdr.save_prev;
save_ret(svp);
}
act->mact.pcb = userpcb;
}
void
act_virtual_machine_destroy(thread_act_t act)
{
if(act->mact.bbDescAddr) {
disable_bluebox_internal(act);
}
if(act->mact.vmmControl) {
vmm_tear_down_all(act);
}
}
void
act_machine_destroy(thread_act_t act)
{
register savearea *pcb, *ppsv;
register savearea_vec *vsv, *vpsv;
register savearea_fpu *fsv, *fpsv;
register savearea *svp;
register int i;
act_virtual_machine_destroy(act);
toss_live_vec(act->mact.curctx);
vsv = act->mact.curctx->VMXsave;
while(vsv) {
vpsv = vsv;
vsv = (savearea_vec *)vsv->save_hdr.save_prev;
save_release((savearea *)vpsv);
}
act->mact.curctx->VMXsave = 0;
toss_live_fpu(act->mact.curctx);
fsv = act->mact.curctx->FPUsave;
while(fsv) {
fpsv = fsv;
fsv = (savearea_fpu *)fsv->save_hdr.save_prev;
save_release((savearea *)fpsv);
}
act->mact.curctx->FPUsave = 0;
pcb = act->mact.pcb;
while(pcb) {
ppsv = pcb;
pcb = pcb->save_hdr.save_prev;
save_release(ppsv);
}
hw_atomic_sub(&saveanchor.savetarget, 4);
}
kern_return_t
act_machine_create(task_t task, thread_act_t thr_act)
{
return KERN_SUCCESS;
}
void act_machine_init()
{
assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX );
assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT );
assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT );
assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT );
}
void
act_machine_return(int code)
{
thread_act_t thr_act = current_act();
assert( code == KERN_TERMINATED );
assert( thr_act );
assert(thr_act->thread->top_act == thr_act);
thread_terminate_self();
panic("act_machine_return: TALKING ZOMBIE! (1)");
}
void
thread_machine_set_current(struct thread_shuttle *thread)
{
register int my_cpu = cpu_number();
set_machine_current_thread(thread);
set_machine_current_act(thread->top_act);
active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL;
}
void
thread_machine_init(void)
{
#ifdef MACHINE_STACK
#if KERNEL_STACK_SIZE > PPC_PGBYTES
panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
#endif
#endif
}
#if MACH_ASSERT
void
dump_thread(thread_t th)
{
printf(" thread @ 0x%x:\n", th);
}
int
dump_act(thread_act_t thr_act)
{
if (!thr_act)
return(0);
printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
thr_act, thr_act->ref_count,
thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0,
thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n",
thr_act->alerts, thr_act->alert_mask,
thr_act->suspend_count, thr_act->active,
thr_act->higher, thr_act->lower);
return((int)thr_act);
}
#endif
unsigned int
get_useraddr()
{
thread_act_t thr_act = current_act();
return(thr_act->mact.pcb->save_srr0);
}
vm_offset_t
stack_detach(thread_t thread)
{
vm_offset_t stack;
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
thread, thread->priority,
thread->sched_pri, 0, 0);
if (thread->top_act)
act_machine_sv_free(thread->top_act);
stack = thread->kernel_stack;
thread->kernel_stack = 0;
return(stack);
}
void
stack_attach(struct thread_shuttle *thread,
vm_offset_t stack,
void (*start_pos)(thread_t))
{
thread_act_t thr_act;
unsigned int *kss;
struct savearea *sv;
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
thread, thread->priority,
thread->sched_pri, start_pos,
0);
assert(stack);
kss = (unsigned int *)STACK_IKS(stack);
thread->kernel_stack = stack;
if ((thr_act = thread->top_act) != 0) {
sv = save_get();
sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft);
sv->save_hdr.save_act = thr_act;
sv->save_hdr.save_prev = thr_act->mact.pcb;
thr_act->mact.pcb = sv;
sv->save_srr0 = (unsigned int) start_pos;
sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
sv->save_fpscr = 0;
sv->save_vrsave = 0;
sv->save_vscr[3] = 0x00010000;
*((int *)sv->save_r1) = 0;
thr_act->mact.ksp = 0;
}
return;
}
void
stack_handoff(thread_t old,
thread_t new)
{
vm_offset_t stack;
pmap_t new_pmap;
facility_context *fowner;
int my_cpu;
mapping *mp;
struct per_proc_info *ppinfo;
assert(new->top_act);
assert(old->top_act);
my_cpu = cpu_number();
stack = stack_detach(old);
new->kernel_stack = stack;
if (stack == old->stack_privilege) {
assert(new->stack_privilege);
old->stack_privilege = new->stack_privilege;
new->stack_privilege = stack;
}
ppinfo = getPerProc();
ppinfo->cpu_flags &= ~traceBE;
if(real_ncpus > 1) {
fowner = ppinfo->FPU_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
fpu_save(fowner);
}
}
fowner = ppinfo->VMX_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
vec_save(fowner);
}
}
}
if(old->top_act->mact.specFlags & runningVM) {
old->top_act->mact.specFlags &= ~(userProtKey|FamVMmode);
old->top_act->mact.specFlags |= (ppinfo->spcFlags) & (userProtKey|FamVMmode);
}
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
(int)old, (int)new, old->sched_pri, new->sched_pri, 0);
if(new->top_act->mact.specFlags & runningVM) {
pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap);
ppinfo->VMMareaPhys = new->top_act->mact.vmmCEntry->vmmContextPhys;
ppinfo->VMMXAFlgs = new->top_act->mact.vmmCEntry->vmmXAFlgs;
ppinfo->FAMintercept = new->top_act->mact.vmmCEntry->vmmFAMintercept;
}
else {
new_pmap = new->top_act->task->map->pmap;
if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
thread_machine_set_current(new);
active_stacks[my_cpu] = new->kernel_stack;
ppinfo->Uassist = new->top_act->mact.cthread_self;
ppinfo->ppbbTaskEnv = new->top_act->mact.bbTaskEnv;
ppinfo->spcFlags = new->top_act->mact.specFlags;
old->top_act->mact.cioSpace |= cioSwitchAway;
mp = (mapping *)&ppinfo->ppCIOmp;
mp->mpSpace = invalSpace;
if (branch_tracing_enabled())
ppinfo->cpu_flags |= traceBE;
if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act, 0);
return;
}
void
call_continuation(void (*continuation)(void) )
{
unsigned int *kss;
vm_offset_t tsp;
assert(current_thread()->kernel_stack);
kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack);
assert(continuation);
tsp = (vm_offset_t)((int)kss - KF_SIZE);
assert(tsp);
*((int *)tsp) = 0;
Call_continuation(continuation, tsp);
return;
}
void
thread_swapin_mach_alloc(thread_t thread)
{
struct savearea *sv;
assert(thread->top_act->mact.pcb == 0);
sv = save_alloc();
assert(sv);
sv->save_hdr.save_prev = 0;
sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft);
sv->save_hdr.save_act = thread->top_act;
thread->top_act->mact.pcb = sv;
}