#include <cpus.h>
#include <debug.h>
#include <types.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/thread_act.h>
#include <kern/thread_swap.h>
#include <mach/thread_status.h>
#include <vm/vm_kern.h>
#include <kern/mach_param.h>
#include <kern/misc_protos.h>
#include <ppc/misc_protos.h>
#include <ppc/exception.h>
#include <ppc/proc_reg.h>
#include <kern/spl.h>
#include <ppc/pmap.h>
#include <ppc/trap.h>
#include <ppc/mappings.h>
#include <ppc/savearea.h>
#include <ppc/Firmware.h>
#include <ppc/asm.h>
#include <ppc/thread_act.h>
#include <ppc/vmachmon.h>
#include <ppc/low_trace.h>
#include <sys/kdebug.h>
extern int real_ncpus;
extern struct Saveanchor saveanchor;
#define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
#if DEBUG
int fpu_trap_count = 0;
int fpu_switch_count = 0;
int vec_trap_count = 0;
int vec_switch_count = 0;
#endif
extern struct thread_shuttle *Switch_context(
struct thread_shuttle *old,
void (*cont)(void),
struct thread_shuttle *new);
#if MACH_LDEBUG || MACH_KDB
void log_thread_action (char *, long, long, long);
#endif
void
consider_machine_collect()
{
return;
}
void
consider_machine_adjust()
{
consider_mapping_adjust();
}
void
machine_kernel_stack_init(
struct thread_shuttle *thread,
void (*start_pos)(thread_t))
{
vm_offset_t stack;
unsigned int *kss;
struct savearea *sv;
assert(thread->top_act->mact.pcb);
assert(thread->kernel_stack);
stack = thread->kernel_stack;
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread,stack,start_pos);
#endif
kss = (unsigned int *)STACK_IKS(stack);
sv = thread->top_act->mact.pcb;
sv->save_lr = (unsigned int) start_pos;
sv->save_srr0 = (unsigned int) start_pos;
sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
sv->save_r1 = (vm_offset_t) ((int)kss - KF_SIZE);
sv->save_fpscr = 0;
sv->save_vrsave = 0;
sv->save_vscr[3] = 0x00010000;
*((int *)sv->save_r1) = 0;
thread->top_act->mact.ksp = 0;
}
struct thread_shuttle*
switch_context(
struct thread_shuttle *old,
void (*continuation)(void),
struct thread_shuttle *new)
{
register thread_act_t old_act = old->top_act, new_act = new->top_act;
register struct thread_shuttle* retval;
pmap_t new_pmap;
facility_context *fowner;
int my_cpu;
#if MACH_LDEBUG || MACH_KDB
log_thread_action("switch",
(long)old,
(long)new,
(long)__builtin_return_address(0));
#endif
my_cpu = cpu_number();
per_proc_info[my_cpu].old_thread = (unsigned int)old;
per_proc_info[my_cpu].cpu_flags &= ~traceBE;
assert(old_act->kernel_loaded ||
active_stacks[my_cpu] == old_act->thread->kernel_stack);
check_simple_locks();
if(real_ncpus > 1) {
fowner = per_proc_info[my_cpu].FPU_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
fpu_save(fowner);
}
}
fowner = per_proc_info[my_cpu].VMX_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
vec_save(fowner);
}
}
}
#if DEBUG
if (watchacts & WA_PCB) {
printf("switch_context(0x%08x, 0x%x, 0x%08x)\n",
old,continuation,new);
}
#endif
if(old_act->mact.specFlags & runningVM) {
old_act->mact.specFlags &= ~(userProtKey|FamVMmode);
old_act->mact.specFlags |= (per_proc_info[my_cpu].spcFlags) & (userProtKey|FamVMmode);
}
if(new_act->mact.specFlags & runningVM) {
pmap_switch(new_act->mact.vmmCEntry->vmmPmap);
per_proc_info[my_cpu].VMMareaPhys = (vm_offset_t)new_act->mact.vmmCEntry->vmmContextPhys;
per_proc_info[my_cpu].FAMintercept = new_act->mact.vmmCEntry->vmmFAMintercept;
}
else {
new_pmap = new_act->task->map->pmap;
if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
(int)old, (int)new, old->sched_pri, new->sched_pri, 0);
retval = Switch_context(old, continuation, new);
assert(retval != (struct thread_shuttle*)NULL);
if (branch_tracing_enabled())
per_proc_info[my_cpu].cpu_flags |= traceBE;
return retval;
}
void
thread_set_syscall_return(
struct thread_shuttle *thread,
kern_return_t retval)
{
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread,retval);
#endif
thread->top_act->mact.pcb->save_r3 = retval;
}
kern_return_t
thread_machine_create(
struct thread_shuttle *thread,
thread_act_t thr_act,
void (*start_pos)(thread_t))
{
savearea *sv;
unsigned int *CIsTooLimited, i;
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread, thr_act, start_pos);
#endif
hw_atomic_add(&saveanchor.savetarget, 4);
assert(thr_act->mact.pcb == (savearea *)0);
sv = save_alloc();
bzero((char *)((unsigned int)sv + sizeof(savearea_comm)), (sizeof(savearea) - sizeof(savearea_comm)));
sv->save_hdr.save_prev = 0;
sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft);
sv->save_hdr.save_act = thr_act;
sv->save_vscr[3] = 0x00010000;
thr_act->mact.pcb = sv;
thr_act->mact.curctx = &thr_act->mact.facctx;
thr_act->mact.facctx.facAct = thr_act;
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("pcb_init(%x) pcb=%x\n", thr_act, sv);
#endif
sv->save_srr1 = MSR_EXPORT_MASK_SET;
CIsTooLimited = (unsigned int *)(&sv->save_sr0);
for(i=0; i<16; i++) {
CIsTooLimited[i] = SEG_REG_PROT | (i << 20) | thr_act->task->map->pmap->space;
}
return(KERN_SUCCESS);
}
void
thread_machine_destroy( thread_t thread )
{
spl_t s;
if (thread->kernel_stack) {
s = splsched();
stack_free(thread);
splx(s);
}
}
void
thread_machine_flush( thread_act_t cur_act )
{
}
int switch_act_swapins = 0;
void
machine_switch_act(
thread_t thread,
thread_act_t old,
thread_act_t new,
int cpu)
{
pmap_t new_pmap;
facility_context *fowner;
if(real_ncpus > 1) {
fowner = per_proc_info[cpu_number()].FPU_owner;
if(fowner) {
if(fowner->facAct == old) {
fpu_save(fowner);
}
}
fowner = per_proc_info[cpu_number()].VMX_owner;
if(fowner) {
if(fowner->facAct == old) {
vec_save(fowner);
}
}
}
active_stacks[cpu] = thread->kernel_stack;
ast_context(new, cpu);
if(new->mact.specFlags & runningVM) {
pmap_switch(new->mact.vmmCEntry->vmmPmap);
}
else {
new_pmap = new->task->map->pmap;
if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
}
void
pcb_user_to_kernel(thread_act_t act)
{
return;
}
void
act_machine_sv_free(thread_act_t act)
{
register savearea *pcb, *userpcb;
register savearea_vec *vsv, *vpsv;
register savearea_fpu *fsv, *fpsv;
register savearea *svp;
register int i;
if(act->mact.curctx->VMXlevel) {
toss_live_vec(act->mact.curctx);
act->mact.curctx->VMXlevel = 0;
}
vsv = act->mact.curctx->VMXsave;
while(vsv) {
vpsv = vsv;
if (!vsv->save_hdr.save_level) break;
vsv = (savearea_vec *)vsv->save_hdr.save_prev;
save_ret((savearea *)vpsv);
}
act->mact.curctx->VMXsave = vsv;
if(act->mact.curctx->FPUlevel) {
toss_live_fpu(act->mact.curctx);
act->mact.curctx->FPUlevel = 0;
}
fsv = act->mact.curctx->FPUsave;
while(fsv) {
fpsv = fsv;
if (!fsv->save_hdr.save_level) break;
fsv = (savearea_fpu *)fsv->save_hdr.save_prev;
save_ret((savearea *)fpsv);
}
act->mact.curctx->FPUsave = fsv;
pcb = act->mact.pcb;
userpcb = 0;
while(pcb) {
if (pcb->save_srr1 & MASK(MSR_PR)) {
userpcb = pcb;
break;
}
svp = pcb;
pcb = pcb->save_hdr.save_prev;
save_ret(svp);
}
act->mact.pcb = userpcb;
}
void
act_virtual_machine_destroy(thread_act_t act)
{
if(act->mact.bbDescAddr) {
disable_bluebox_internal(act);
}
if(act->mact.vmmControl) {
vmm_tear_down_all(act);
}
}
void
act_machine_destroy(thread_act_t act)
{
register savearea *pcb, *ppsv;
register savearea_vec *vsv, *vpsv;
register savearea_fpu *fsv, *fpsv;
register savearea *svp;
register int i;
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("act_machine_destroy(0x%x)\n", act);
#endif
act_virtual_machine_destroy(act);
toss_live_vec(act->mact.curctx);
vsv = act->mact.curctx->VMXsave;
while(vsv) {
vpsv = vsv;
vsv = (savearea_vec *)vsv->save_hdr.save_prev;
save_release((savearea *)vpsv);
}
act->mact.curctx->VMXsave = 0;
toss_live_fpu(act->mact.curctx);
fsv = act->mact.curctx->FPUsave;
while(fsv) {
fpsv = fsv;
fsv = (savearea_fpu *)fsv->save_hdr.save_prev;
save_release((savearea *)fpsv);
}
act->mact.curctx->FPUsave = 0;
pcb = act->mact.pcb;
while(pcb) {
ppsv = pcb;
pcb = pcb->save_hdr.save_prev;
save_release(ppsv);
}
hw_atomic_sub(&saveanchor.savetarget, 4);
}
kern_return_t
act_machine_create(task_t task, thread_act_t thr_act)
{
return KERN_SUCCESS;
}
void act_machine_init()
{
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("act_machine_init()\n");
#endif
assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX );
assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT );
assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT );
assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT );
}
void
act_machine_return(int code)
{
thread_act_t thr_act = current_act();
#if MACH_ASSERT
if (watchacts & WA_EXIT)
printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n",
code, thr_act, thr_act->ref_count,
thr_act->thread, thr_act->thread->ref_count);
#endif
assert( code == KERN_TERMINATED );
assert( thr_act );
assert(thr_act->thread->top_act == thr_act);
thread_terminate_self();
panic("act_machine_return: TALKING ZOMBIE! (1)");
}
void
thread_machine_set_current(struct thread_shuttle *thread)
{
register int my_cpu = cpu_number();
set_machine_current_thread(thread);
set_machine_current_act(thread->top_act);
active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL;
}
void
thread_machine_init(void)
{
#ifdef MACHINE_STACK
#if KERNEL_STACK_SIZE > PPC_PGBYTES
panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
#endif
#endif
}
#if MACH_ASSERT
void
dump_thread(thread_t th)
{
printf(" thread @ 0x%x:\n", th);
}
int
dump_act(thread_act_t thr_act)
{
if (!thr_act)
return(0);
printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
thr_act, thr_act->ref_count,
thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0,
thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n",
thr_act->alerts, thr_act->alert_mask,
thr_act->suspend_count, thr_act->active,
thr_act->higher, thr_act->lower);
return((int)thr_act);
}
#endif
unsigned int
get_useraddr()
{
thread_act_t thr_act = current_act();
return(thr_act->mact.pcb->save_srr0);
}
vm_offset_t
stack_detach(thread_t thread)
{
vm_offset_t stack;
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
thread, thread->priority,
thread->sched_pri, 0, 0);
if (thread->top_act)
act_machine_sv_free(thread->top_act);
stack = thread->kernel_stack;
thread->kernel_stack = 0;
return(stack);
}
void
stack_attach(struct thread_shuttle *thread,
vm_offset_t stack,
void (*start_pos)(thread_t))
{
thread_act_t thr_act;
unsigned int *kss;
struct savearea *sv;
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
thread, thread->priority,
thread->sched_pri, start_pos,
0);
assert(stack);
kss = (unsigned int *)STACK_IKS(stack);
thread->kernel_stack = stack;
if ((thr_act = thread->top_act) != 0) {
sv = save_get();
sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft);
sv->save_hdr.save_act = thr_act;
sv->save_hdr.save_prev = thr_act->mact.pcb;
thr_act->mact.pcb = sv;
sv->save_srr0 = (unsigned int) start_pos;
sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
sv->save_fpscr = 0;
sv->save_vrsave = 0;
sv->save_vscr[3] = 0x00010000;
*((int *)sv->save_r1) = 0;
thr_act->mact.ksp = 0;
}
return;
}
void
stack_handoff(thread_t old,
thread_t new)
{
vm_offset_t stack;
pmap_t new_pmap;
facility_context *fowner;
int my_cpu;
assert(new->top_act);
assert(old->top_act);
my_cpu = cpu_number();
stack = stack_detach(old);
new->kernel_stack = stack;
if (stack == old->stack_privilege) {
assert(new->stack_privilege);
old->stack_privilege = new->stack_privilege;
new->stack_privilege = stack;
}
per_proc_info[my_cpu].cpu_flags &= ~traceBE;
if(real_ncpus > 1) {
fowner = per_proc_info[my_cpu].FPU_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
fpu_save(fowner);
}
}
fowner = per_proc_info[my_cpu].VMX_owner;
if(fowner) {
if(fowner->facAct == old->top_act) {
vec_save(fowner);
}
}
}
if(old->top_act->mact.specFlags & runningVM) {
old->top_act->mact.specFlags &= ~(userProtKey|FamVMmode);
old->top_act->mact.specFlags |= (per_proc_info[my_cpu].spcFlags) & (userProtKey|FamVMmode);
}
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
(int)old, (int)new, old->sched_pri, new->sched_pri, 0);
if(new->top_act->mact.specFlags & runningVM) {
pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap);
per_proc_info[my_cpu].VMMareaPhys = (vm_offset_t)new->top_act->mact.vmmCEntry->vmmContextPhys;
per_proc_info[my_cpu].FAMintercept = new->top_act->mact.vmmCEntry->vmmFAMintercept;
}
else {
new_pmap = new->top_act->task->map->pmap;
if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
thread_machine_set_current(new);
active_stacks[my_cpu] = new->kernel_stack;
per_proc_info[my_cpu].Uassist = new->top_act->mact.cthread_self;
per_proc_info[my_cpu].ppbbTaskEnv = new->top_act->mact.bbTaskEnv;
per_proc_info[my_cpu].spcFlags = new->top_act->mact.specFlags;
if (branch_tracing_enabled())
per_proc_info[my_cpu].cpu_flags |= traceBE;
if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act);
return;
}
void
call_continuation(void (*continuation)(void) )
{
unsigned int *kss;
vm_offset_t tsp;
assert(current_thread()->kernel_stack);
kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack);
assert(continuation);
tsp = (vm_offset_t)((int)kss - KF_SIZE);
assert(tsp);
*((int *)tsp) = 0;
Call_continuation(continuation, tsp);
return;
}
void
thread_swapin_mach_alloc(thread_t thread)
{
struct savearea *sv;
assert(thread->top_act->mact.pcb == 0);
sv = save_alloc();
assert(sv);
sv->save_hdr.save_prev = 0;
sv->save_hdr.save_flags = (sv->save_hdr.save_flags & ~SAVtype) | (SAVgeneral << SAVtypeshft);
sv->save_hdr.save_act = thread->top_act;
thread->top_act->mact.pcb = sv;
}