#include <cpus.h>
#include <debug.h>
#include <types.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/thread_act.h>
#include <kern/thread_swap.h>
#include <mach/thread_status.h>
#include <vm/vm_kern.h>
#include <kern/mach_param.h>
#include <kern/misc_protos.h>
#include <ppc/misc_protos.h>
#include <ppc/fpu_protos.h>
#include <ppc/exception.h>
#include <ppc/proc_reg.h>
#include <kern/spl.h>
#include <ppc/pmap.h>
#include <ppc/trap.h>
#include <ppc/mappings.h>
#include <ppc/savearea.h>
#include <ppc/Firmware.h>
#include <ppc/asm.h>
#include <ppc/thread_act.h>
#include <ppc/vmachmon.h>
#include <ppc/low_trace.h>
#include <sys/kdebug.h>
extern int real_ncpus;
extern struct Saveanchor saveanchor;
#define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE)
#if DEBUG
int fpu_trap_count = 0;
int fpu_switch_count = 0;
int vec_trap_count = 0;
int vec_switch_count = 0;
#endif
extern struct thread_shuttle *Switch_context(
struct thread_shuttle *old,
void (*cont)(void),
struct thread_shuttle *new);
#if MACH_LDEBUG || MACH_KDB
void log_thread_action (char *, long, long, long);
#endif
void
consider_machine_collect()
{
return;
}
void
consider_machine_adjust()
{
consider_mapping_adjust();
}
void
machine_kernel_stack_init(
struct thread_shuttle *thread,
void (*start_pos)(thread_t))
{
vm_offset_t stack;
unsigned int *kss;
struct savearea *sv;
assert(thread->top_act->mact.pcb);
assert(thread->kernel_stack);
stack = thread->kernel_stack;
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread,stack,start_pos);
#endif
kss = (unsigned int *)STACK_IKS(stack);
sv=(savearea *)(thread->top_act->mact.pcb);
sv->save_lr = (unsigned int) start_pos;
sv->save_srr0 = (unsigned int) start_pos;
sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
sv->save_r1 = (vm_offset_t) ((int)kss - KF_SIZE);
sv->save_xfpscrpad = 0;
sv->save_xfpscr = 0;
*((int *)sv->save_r1) = 0;
thread->top_act->mact.ksp = 0;
}
struct thread_shuttle*
switch_context(
struct thread_shuttle *old,
void (*continuation)(void),
struct thread_shuttle *new)
{
register thread_act_t old_act = old->top_act, new_act = new->top_act;
register struct thread_shuttle* retval;
pmap_t new_pmap;
#if MACH_LDEBUG || MACH_KDB
log_thread_action("switch",
(long)old,
(long)new,
(long)__builtin_return_address(0));
#endif
per_proc_info[cpu_number()].old_thread = old;
per_proc_info[cpu_number()].cpu_flags &= ~traceBE;
assert(old_act->kernel_loaded ||
active_stacks[cpu_number()] == old_act->thread->kernel_stack);
check_simple_locks();
if(real_ncpus > 1) {
fpu_save(old_act);
vec_save(old_act);
}
#if DEBUG
if (watchacts & WA_PCB) {
printf("switch_context(0x%08x, 0x%x, 0x%08x)\n",
old,continuation,new);
}
#endif
if(new_act->mact.specFlags & runningVM) {
pmap_switch(new_act->mact.vmmCEntry->vmmPmap);
}
else {
new_pmap = new_act->task->map->pmap;
if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
#if 0
printf("************* stack=%08X; R1=%08X; LR=%08X; old=%08X; cont=%08X; new=%08X\n",
new->kernel_stack, new_act->mact.pcb->ss.r1,
new_act->mact.pcb->ss.lr, old, continuation, new);
assert((new->kernel_stack < new_act->mact.pcb->ss.r1) &&
((unsigned int)STACK_IKS(new->kernel_stack) >
new_act->mact.pcb->ss.r1));
assert(new_act->mact.pcb->ss.lr < VM_MAX_KERNEL_ADDRESS);
#endif
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE,
(int)old, (int)new, old->sched_pri, new->sched_pri, 0);
retval = Switch_context(old, continuation, new);
assert(retval != (struct thread_shuttle*)NULL);
if (branch_tracing_enabled())
per_proc_info[cpu_number()].cpu_flags |= traceBE;
return retval;
}
void
thread_set_syscall_return(
struct thread_shuttle *thread,
kern_return_t retval)
{
struct ppc_saved_state *ssp = &thread->top_act->mact.pcb->ss;
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread,retval);
#endif
ssp->r3 = retval;
}
kern_return_t
thread_machine_create(
struct thread_shuttle *thread,
thread_act_t thr_act,
void (*start_pos)(thread_t))
{
savearea *sv;
unsigned int *CIsTooLimited, i;
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread, thr_act, start_pos);
#endif
hw_atomic_add(&saveanchor.saveneed, 4);
assert(thr_act->mact.pcb == (pcb_t)0);
sv = save_alloc();
bzero((char *) sv, sizeof(struct pcb));
sv->save_act = thr_act;
sv->save_vrsave = 0;
thr_act->mact.pcb = (pcb_t)sv;
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("pcb_init(%x) pcb=%x\n", thr_act, sv);
#endif
sv->save_srr1 = MSR_EXPORT_MASK_SET;
CIsTooLimited = (unsigned int *)(&sv->save_sr0);
for(i=0; i<16; i++) {
CIsTooLimited[i] = SEG_REG_PROT | (i << 20) | thr_act->task->map->pmap->space;
}
sv->save_sr_copyin = SEG_REG_PROT | (SR_COPYIN_NUM<<20) | thr_act->task->map->pmap->space;
return(KERN_SUCCESS);
}
void
thread_machine_destroy( thread_t thread )
{
spl_t s;
if (thread->kernel_stack) {
s = splsched();
stack_free(thread);
splx(s);
}
}
void
thread_machine_flush( thread_act_t cur_act )
{
}
int switch_act_swapins = 0;
void
machine_switch_act(
thread_t thread,
thread_act_t old,
thread_act_t new,
int cpu)
{
pmap_t new_pmap;
if(real_ncpus > 1) {
fpu_save(old);
vec_save(old);
}
active_stacks[cpu] = thread->kernel_stack;
ast_context(new, cpu);
if(new->mact.specFlags & runningVM) {
pmap_switch(new->mact.vmmCEntry->vmmPmap);
}
else {
new_pmap = new->task->map->pmap;
if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
}
void
pcb_user_to_kernel(thread_act_t act)
{
return;
}
void
act_machine_sv_free(thread_act_t act)
{
register pcb_t pcb,userpcb,npcb;
register savearea *svp;
register int i;
pcb = act->mact.VMX_pcb;
while(pcb) {
svp = (savearea *)pcb;
if (svp->save_level_vec == 0) break;
pcb = (pcb_t)svp->save_prev_vector;
svp->save_flags &= ~SAVvmxvalid;
if(!(svp->save_flags & SAVinuse)) {
save_ret(svp);
}
}
act->mact.VMX_pcb = pcb;
if (act->mact.VMX_lvl != 0) {
for(i=0; i < real_ncpus; i++) {
(void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].VMX_thread);
}
}
pcb = act->mact.FPU_pcb;
while(pcb) {
svp = (savearea *)pcb;
if (svp->save_level_fp == 0) break;
pcb = (pcb_t)svp->save_prev_float;
svp->save_flags &= ~SAVfpuvalid;
if(!(svp->save_flags & SAVinuse)) {
save_ret(svp);
}
}
act->mact.FPU_pcb = pcb;
if (act->mact.FPU_lvl != 0) {
for(i=0; i < real_ncpus; i++) {
(void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].FPU_thread);
}
}
pcb = act->mact.pcb;
userpcb = (pcb_t)0;
while(pcb) {
svp = (savearea *)pcb;
if ((svp->save_srr1 & MASK(MSR_PR))) {
assert(userpcb == (pcb_t)0);
userpcb = pcb;
svp = (savearea *)userpcb;
npcb = (pcb_t)svp->save_prev;
svp->save_prev = (struct savearea *)0;
} else {
svp->save_flags &= ~SAVattach;
npcb = (pcb_t)svp->save_prev;
if(!(svp->save_flags & SAVinuse))
save_ret(svp);
}
pcb = npcb;
}
act->mact.pcb = userpcb;
}
void
act_virtual_machine_destroy(thread_act_t act)
{
if(act->mact.bbDescAddr) {
disable_bluebox_internal(act);
}
if(act->mact.vmmControl) {
vmm_tear_down_all(act);
}
}
void
act_machine_destroy(thread_act_t act)
{
register pcb_t pcb, opcb;
int i;
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("act_machine_destroy(0x%x)\n", act);
#endif
act_virtual_machine_destroy(act);
for(i=0; i < real_ncpus; i++) {
(void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].FPU_thread);
(void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].VMX_thread);
}
pcb = act->mact.VMX_pcb;
while(pcb) {
opcb = pcb;
pcb = (pcb_t)(((savearea *)pcb)->save_prev_vector);
((savearea *)opcb)->save_flags &= ~SAVvmxvalid;
if(!(((savearea *)opcb)->save_flags & SAVinuse)) {
save_release((savearea *)opcb);
}
}
act->mact.VMX_pcb = (pcb_t)0;
pcb = act->mact.FPU_pcb;
while(pcb) {
opcb = pcb;
pcb = (pcb_t)(((savearea *)pcb)->save_prev_float);
((savearea *)opcb)->save_flags &= ~SAVfpuvalid;
if(!(((savearea *)opcb)->save_flags & SAVinuse)) {
save_release((savearea *)opcb);
}
}
act->mact.FPU_pcb = (pcb_t)0;
pcb = act->mact.pcb;
act->mact.pcb = (pcb_t)0;
while(pcb) {
opcb = pcb;
pcb = (pcb_t)(((savearea *)pcb)->save_prev);
((savearea *)opcb)->save_flags = 0;
save_release((savearea *)opcb);
}
hw_atomic_sub(&saveanchor.saveneed, 4);
}
kern_return_t
act_machine_create(task_t task, thread_act_t thr_act)
{
register pcb_t pcb;
register int i;
unsigned int *CIsTooLimited;
pmap_t pmap;
return KERN_SUCCESS;
}
void act_machine_init()
{
#if MACH_ASSERT
if (watchacts & WA_PCB)
printf("act_machine_init()\n");
#endif
assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX );
assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT );
assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT );
assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT );
assert( THREAD_STATE_MAX >= sizeof(struct ppc_saved_state)/sizeof(int));
}
void
act_machine_return(int code)
{
thread_act_t thr_act = current_act();
#if MACH_ASSERT
if (watchacts & WA_EXIT)
printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n",
code, thr_act, thr_act->ref_count,
thr_act->thread, thr_act->thread->ref_count);
#endif
assert( code == KERN_TERMINATED );
assert( thr_act );
act_lock_thread(thr_act);
#ifdef CALLOUT_RPC_MODEL
if (thr_act->thread->top_act != thr_act) {
act_unlock_thread(thr_act);
panic("act_machine_return: ORPHAN CASE NOT YET IMPLEMENTED");
}
if (thr_act->lower != THR_ACT_NULL) {
thread_t cur_thread = current_thread();
thread_act_t cur_act;
struct ipc_port *iplock;
iplock = thr_act->pool_port;
thr_act->lower->alerts |= SERVER_TERMINATED;
install_special_handler(thr_act->lower);
act_locked_act_reference(thr_act);
act_switch_swapcheck(cur_thread, (ipc_port_t)0);
(void) switch_act(THR_ACT_NULL);
cur_act = cur_thread->top_act;
MACH_RPC_RET(cur_act) = KERN_RPC_SERVER_TERMINATED;
machine_kernel_stack_init(cur_thread, mach_rpc_return_error);
rpc_unlock(cur_thread);
if (iplock) ip_unlock(iplock);
act_unlock(thr_act);
act_deallocate(thr_act);
Load_context(cur_thread);
panic("act_machine_return: TALKING ZOMBIE! (2)");
}
#endif
assert(thr_act->thread->top_act == thr_act);
act_unlock_thread(thr_act);
thread_terminate_self();
panic("act_machine_return: TALKING ZOMBIE! (1)");
}
void
thread_machine_set_current(struct thread_shuttle *thread)
{
register int my_cpu = cpu_number();
cpu_data[my_cpu].active_thread = thread;
active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL;
}
void
thread_machine_init(void)
{
#ifdef MACHINE_STACK
#if KERNEL_STACK_SIZE > PPC_PGBYTES
panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n");
#endif
#endif
}
#if MACH_ASSERT
void
dump_pcb(pcb_t pcb)
{
printf("pcb @ %8.8x:\n", pcb);
#if DEBUG
regDump(&pcb->ss);
#endif
}
void
dump_thread(thread_t th)
{
printf(" thread @ 0x%x:\n", th);
}
int
dump_act(thread_act_t thr_act)
{
if (!thr_act)
return(0);
printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n",
thr_act, thr_act->ref_count,
thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0,
thr_act->task, thr_act->task ? thr_act->task->ref_count : 0);
printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n",
thr_act->alerts, thr_act->alert_mask,
thr_act->suspend_count, thr_act->active,
thr_act->higher, thr_act->lower);
return((int)thr_act);
}
#endif
unsigned int
get_useraddr()
{
thread_act_t thr_act = current_act();
return(thr_act->mact.pcb->ss.srr0);
}
vm_offset_t
stack_detach(thread_t thread)
{
vm_offset_t stack;
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH),
thread, thread->priority,
thread->sched_pri, 0,
0);
stack = thread->kernel_stack;
thread->kernel_stack = 0;
return(stack);
}
void
stack_attach(struct thread_shuttle *thread,
vm_offset_t stack,
void (*start_pos)(thread_t))
{
thread_act_t thr_act;
unsigned int *kss;
struct savearea *sv;
KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH),
thread, thread->priority,
thread->sched_pri, start_pos,
0);
assert(stack);
kss = (unsigned int *)STACK_IKS(stack);
thread->kernel_stack = stack;
if ((thr_act = thread->top_act) != 0) {
sv = save_get();
sv->save_act = thr_act;
sv->save_prev = (struct savearea *)thr_act->mact.pcb;
thr_act->mact.pcb = (pcb_t)sv;
sv->save_srr0 = (unsigned int) start_pos;
sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE);
sv->save_srr1 = MSR_SUPERVISOR_INT_OFF;
sv->save_xfpscrpad = 0;
sv->save_xfpscr = 0;
*((int *)sv->save_r1) = 0;
thr_act->mact.ksp = 0;
}
return;
}
void
stack_handoff(thread_t old,
thread_t new)
{
vm_offset_t stack;
pmap_t new_pmap;
assert(new->top_act);
assert(old->top_act);
stack = stack_detach(old);
new->kernel_stack = stack;
per_proc_info[cpu_number()].cpu_flags &= ~traceBE;
#if NCPUS > 1
if (real_ncpus > 1) {
fpu_save(old->top_act);
vec_save(old->top_act);
}
#endif
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE,
(int)old, (int)new, old->sched_pri, new->sched_pri, 0);
if(new->top_act->mact.specFlags & runningVM) {
pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap);
}
else {
new_pmap = new->top_act->task->map->pmap;
if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) {
pmap_switch(new_pmap);
}
}
thread_machine_set_current(new);
active_stacks[cpu_number()] = new->kernel_stack;
per_proc_info[cpu_number()].Uassist = new->top_act->mact.cthread_self;
#if 1
per_proc_info[cpu_number()].ppbbTaskEnv = new->top_act->mact.bbTaskEnv;
per_proc_info[cpu_number()].spcFlags = new->top_act->mact.specFlags;
#endif
if (branch_tracing_enabled())
per_proc_info[cpu_number()].cpu_flags |= traceBE;
if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act);
return;
}
void
call_continuation(void (*continuation)(void) )
{
unsigned int *kss;
vm_offset_t tsp;
assert(current_thread()->kernel_stack);
kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack);
assert(continuation);
tsp = (vm_offset_t)((int)kss - KF_SIZE);
assert(tsp);
*((int *)tsp) = 0;
Call_continuation(continuation, tsp);
return;
}
void
thread_swapin_mach_alloc(thread_t thread)
{
struct savearea *sv;
assert(thread->top_act->mact.pcb == 0);
sv = save_alloc();
assert(sv);
sv->save_act = thread->top_act;
thread->top_act->mact.pcb = (pcb_t)sv;
}