/*
* Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* The contents of this file constitute Original Code as defined in and
* are subject to the Apple Public Source License Version 1.1 (the
* "License"). You may not use this file except in compliance with the
* License. Please obtain a copy of the License at
* http://www.apple.com/publicsource and read it before using this file.
*
* This Original Code and all software distributed under the License are
* distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
* License for the specific language governing rights and limitations
* under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
/* Low level routines dealing with exception entry and exit.
* There are various types of exception:
*
* Interrupt, trap, system call and debugger entry. Each has it's own
* handler since the state save routine is different for each. The
* code is very similar (a lot of cut and paste).
*
* The code for the FPU disabled handler (lazy fpu) is in cswtch.s
*/
#include <debug.h>
#include <mach_assert.h>
#include <mach/exception_types.h>
#include <mach/ppc/vm_param.h>
#include <assym.s>
#include <ppc/asm.h>
#include <ppc/proc_reg.h>
#include <ppc/trap.h>
#include <ppc/exception.h>
#include <ppc/spl.h>
#define VERIFYSAVE 0
#define FPVECDBG 0
/*
* thandler(type)
*
* ENTRY: VM switched ON
* Interrupts OFF
* R3 contains exception code
* R4 points to the saved context (virtual address)
* Everything is saved in savearea
*/
/*
* If pcb.ksp == 0 then the kernel stack is already busy,
* we save ppc_saved state below the current stack pointer,
* leaving enough space for the 'red zone' in case the
* trapped thread was in the middle of saving state below
* its stack pointer.
*
* otherwise we save a ppc_saved_state in the pcb, and switch to
* the kernel stack (setting pcb.ksp to 0)
*
* on return, we do the reverse, the last state is popped from the pcb
* and pcb.ksp is set to the top of stack
*/
/* TRAP_SPACE_NEEDED is the space assumed free on the kernel stack when
* another trap is taken. We need at least enough space for a saved state
* structure plus two small backpointer frames, and we add a few
* hundred bytes for the space needed by the C (which may be less but
* may be much more). We're trying to catch kernel stack overflows :-)
*/
#define TRAP_SPACE_NEEDED FM_REDZONE+(2*FM_SIZE)+256
.text
.align 5
.globl EXT(thandler)
LEXT(thandler) /* Trap handler */
#if 0
cmplwi r25,0x298 deadloop: addi r25,r25,1 addi r25,r25,1 addi r25,r25,1 addi r25,r25,1 addi r25,r25,1 addi r25,r25,1 #endif
mfsprg r25,0 /* Get the per_proc */
lwz r1,PP_ISTACKPTR(r25) lwz r6,PP_CPU_DATA(r25) /* Get point to cpu specific data */
cmpwi cr0,r1,0 beq- cr0,EXT(ihandler) lwz r26,ACT_MACT_SPF(r13) rlwinm. r26,r26,0,bbThreadbit,bbThreadbit bnel- checkassist /* See if we should assist this */
stw r4,ACT_MACT_PCB(r13) /* Point to our savearea */
stw r8,SAVprev(r4) /* Queue the new save area in the front */
#if VERIFYSAVE
bl versave
lwz r9,THREAD_KERNEL_STACK(r6) stw r13,SAVact(r4)
bne+ cr1,.L_kstackfree subi r1,r26,FM_REDZONE .L_kstackfree:
lwz r7,savesrr1(r4) /* Pick up the entry MSR */
sub r9,r1,r9 cmplwi cr2,r9,KERNEL_STACK_SIZE
stw r0,ACT_MACT_KSP(r13) /* Show that we have taken the stack */
.L_state_on_kstack:
rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT kernelStackNotBad: lwz r3,savevrsave(r4)
tvecoff: rlwinm. r3,r7,0,MSR_FP_BIT,MSR_FP_BIT beq+ tfpoff /* Floating point was off... */
lwz r3,savexfpscr(r4)
tfpoff: stw r26,FM_BACKPTR(r1) #if DEBUG
/* If debugging, we need two frames, the first being a dummy
* which links back to the trapped routine. The second is
* that which the C routine below will need
*/
lwz r3,savesrr0(r4) /* Get the point of interruption */
stw r3,FM_LR_SAVE(r1) /* save old instr ptr as LR value */
stwu r1, -FM_SIZE(r1) /* and make new frame */
#endif /* DEBUG */
/* call trap handler proper, with
* ARG0 = type (not yet, holds pcb ptr)
* ARG1 = saved_state ptr (already there)
* ARG2 = dsisr (already there)
* ARG3 = dar (already there)
*/
lwz r3,saveexception(r4) /* Get the exception code */
lwz r0,ACT_MACT_SPF(r13) addi r5,r3,-T_DATA_ACCESS cmplwi cr2,r5,T_RUNMODE_TRACE-T_DATA_ACCESS lwz r5,savedsisr(r4) /* Get the saved DSISR */
crnor cr7_eq,cr0_eq,cr2_gt
cmpi cr2,r3,T_PREEMPT crandc cr0_eq,cr7_eq,cr0_eq lwz r6,savedar(r4) /* Get the DAR */
beq- cr2, .L_call_trap /* Don't turn on interrupts for T_PREEMPT */
beq- exitFromVM /* syscall exception might warp here if there's nothing left
* to do except generate a trap
*/
.L_call_trap:
#if 0
lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
sc /* (TEST/DEBUG) */
#endif
bl EXT(trap)
/*
* Ok, return from C function
*
* This is also the point where new threads come when they are created.
* The new thread is setup to look like a thread that took an
* interrupt and went immediatly into trap.
*
*/
thread_return:
mfmsr r7 /* Get the MSR */
lwz r4,SAVprev(r3) /* Pick up the previous savearea */
rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear the interrupt enable mask */
lwz r11,SAVflags(r3) /* Get the flags of the current savearea */
mtmsr r7 /* Disable for interrupts */
mfsprg r10,0 /* Restore the per_proc info */
lwz r8,savesrr1(r3) rlwinm r11,r11,0,15,13 /* Clear the syscall flag */
lwz r1,CPU_ACTIVE_THREAD(r1) /* and the active thread */
rlwinm. r8,r8,0,MSR_PR_BIT,MSR_PR_BIT stw r11,SAVflags(r3) /* Save back the flags (with reset stack cleared) */
#if 0
lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
sc /* (TEST/DEBUG) */
#endif
stw r4,ACT_MACT_PCB(r8) /* Point to the previous savearea (or 0 if none) */
beq- chkfac lwz r5,THREAD_KERNEL_STACK(r1) /* Get the base pointer to the stack */
addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE /* Reset to empty */
stw r5,ACT_MACT_KSP(r8) /* Save the empty stack pointer */
b chkfac /* Go end it all... */
kernelStackBad:
lwz r3,PP_DEBSTACK_TOP_SS(r25) sub r3,r1,r3 blt+ cr2,kernelStackNotBad lis r0,hi16(Choke) li r3,failStack
/*
* shandler(type)
*
* ENTRY: VM switched ON
* Interrupts OFF
* R3 contains exception code
* R4 points to the saved context (virtual address)
* Everything is saved in savearea
*/
/*
* If pcb.ksp == 0 then the kernel stack is already busy,
* this is an error - jump to the debugger entry
*
* otherwise depending upon the type of
* syscall, look it up in the kernel table
* or pass it to the server.
*
* on return, we do the reverse, the state is popped from the pcb
* and pcb.ksp is set to the top of stack.
*/
/*
* NOTE:
* mach system calls are negative
* BSD system calls are low positive
* PPC-only system calls are in the range 0x6xxx
* PPC-only "fast" traps are in the range 0x7xxx
*/
.align 5
.globl EXT(shandler)
LEXT(shandler) /* System call handler */
mfsprg r25,0 /* Get the per proc area */
lwz r0,saver0(r4) /* Get the original syscall number */
lwz r17,PP_ISTACKPTR(r25) lwz r16,PP_CPU_DATA(r25) /* Assume we need this */
mr. r17,r17 beq- EXT(ihandler) lwz r16,CPU_ACTIVE_THREAD(r16) /* Get the thread pointer */
beq+ svecoff stw r6,liveVRS(r25) svecoff: rlwinm. r6,r7,0,MSR_FP_BIT,MSR_FP_BIT beq+ sfpoff stw r9,liveFPSCR(r25) sfpoff: lwz r6,ACT_MACT_SPF(r13) crmove cr6_eq,runningVMbit lwz r26,ACT_MACT_BEDA(r13)
noassist: cmplwi r15,0x7000 /* Do we have a fast path trap? */
lwz r14,ACT_MACT_PCB(r13) /* Now point to the PCB */
beql+ fastpath /* We think it's a fastpath... */
lwz r1,ACT_MACT_KSP(r13) /* Get the kernel stack pointer */
#if DEBUG
mr. r1,r1 /* Are we already on the kernel stack? */
li r3,T_SYSTEM_CALL /* Yup, pretend we had an interrupt... */
beq- EXT(ihandler) /* Bad boy, bad boy... What'cha gonna do when they come for you? */
#endif /* DEBUG */
stw r4,ACT_MACT_PCB(r13) /* Point to our savearea */
li r0,0 /* Clear this out */
stw r14,SAVprev(r4) /* Queue the new save area in the front */
stw r13,SAVact(r4) /* Point the savearea at its activation */
#if VERIFYSAVE
bl versave
mr r30,r4 /* Save pointer to the new context savearea */
lwz r15,saver1(r4) /* Grab interrupt time stack */
stw r0,ACT_MACT_KSP(r13) /* Mark stack as busy with 0 val */
stw r15,FM_BACKPTR(r1) /* Link backwards */
#if DEBUG
/* If debugging, we need two frames, the first being a dummy
* which links back to the trapped routine. The second is
* that which the C routine below will need
*/
lwz r8,savesrr0(r30) /* Get the point of interruption */
stw r8,FM_LR_SAVE(r1) /* save old instr ptr as LR value */
stwu r1, -FM_SIZE(r1) /* and make new frame */
#endif /* DEBUG */
mfmsr r11 /* Get the MSR */
lwz r15,SAVflags(r4) /* Get the savearea flags */
ori r11,r11,lo16(MASK(MSR_EE)) /* Turn on interruption enabled bit */
lwz r0,saver0(r30) rlwinm r10,r0,0,0,19 cmplwi r10,0x6000 beq- cr6,exitFromVM beq- ppcscall mtmsr r11 /* Enable interruptions */
/* Call a function that can print out our syscall info */
/* Note that we don't care about any volatiles yet */
mr r4,r30
bl EXT(syscall_trace)
lwz r0,saver0(r30) /* Get the system call selector */
mr. r0,r0 /* What kind is it? */
blt- .L_kernel_syscall /* -ve syscall - go to kernel */
/* +ve syscall - go to server */
cmpwi cr0,r0,0x7FFA
beq- .L_notify_interrupt_syscall
#ifdef MACH_BSD
mr r3,r30 /* Get PCB/savearea */
lwz r4,saver4(r30) /* Restore r4 */
lwz r5,saver5(r30) /* Restore r5 */
lwz r6,saver6(r30) /* Restore r6 */
lwz r7,saver7(r30) /* Restore r7 */
lwz r8,saver8(r30) /* Restore r8 */
lwz r9,saver9(r30) /* Restore r9 */
lwz r10,saver10(r30) /* Restore r10 */
bl EXT(unix_syscall) /* Check out unix... */
#endif
.L_call_server_syscall_exception:
li r3,EXC_SYSCALL /* doexception(EXC_SYSCALL, num, 1) */
.L_call_server_exception:
mr r4,r0 /* Set syscall selector */
li r5,1
b EXT(doexception) /* Go away, never to return... */
/* The above, but with EXC_MACH_SYSCALL */
.L_call_server_mach_syscall:
li r3,EXC_MACH_SYSCALL
b .L_call_server_exception /* Join the common above... */
.L_notify_interrupt_syscall:
lwz r3,saver3(r30) b .L_syscall_return
ppcscall: rlwinm r11,r0,2,18,29 cmplwi r11,PPCcallmax bgt- .L_call_server_syscall_exception
mr r3,r30 mr. r11,r11 beq- .L_call_server_syscall_exception
.globl EXT(ppcscret)
LEXT(ppcscret)
mr. r3,r3 bgt+ .L_thread_syscall_ret_check_ast blt+ .L_thread_syscall_return b .L_call_server_syscall_exception
/* Once here, we know that the syscall was -ve
* we should still have r1=ksp,
* r16 = pointer to current thread,
* r13 = pointer to top activation,
* r0 = syscall number
* r30 = pointer to saved state (in pcb)
*/
.L_kernel_syscall:
neg r31, r0 /* Make number +ve and put in r31*/
/* If out of range, call server with syscall exception */
addis r29, 0, HIGH_CADDR(EXT(mach_trap_count))
addi r29, r29, LOW_ADDR(EXT(mach_trap_count))
lwz r29, 0(r29)
cmp cr0, r31, r29
bge- cr0, .L_call_server_syscall_exception
addis r29, 0, HIGH_CADDR(EXT(mach_trap_table))
addi r29, r29, LOW_ADDR(EXT(mach_trap_table))
/* multiply the trap number to get offset into table */
slwi r31, r31, MACH_TRAP_OFFSET_POW2
/* r31 now holds offset into table of our trap entry,
* add on the table base, and it then holds pointer to entry
*/
add r31, r31, r29
/* If the function is kern_invalid, prepare to send an exception.
This is messy, but parallels the x86. We need it for task_by_pid,
at least. */
lis r29, HIGH_CADDR(EXT(kern_invalid))
addi r29, r29, LOW_ADDR(EXT(kern_invalid))
lwz r0, MACH_TRAP_FUNCTION(r31)
cmp cr0, r0, r29
beq- .L_call_server_syscall_exception
/* get arg count. If argc > 8 then not all args were in regs,
* so we must perform copyin.
*/
lwz r29, MACH_TRAP_ARGC(r31)
cmpwi cr0, r29, 8
ble+ .L_syscall_got_args
/* argc > 8 - perform a copyin */
/* if the syscall came from kernel space, we can just copy */
lwz r0,savesrr1(r30) /* Pick up exception time MSR */
andi. r0,r0,MASK(MSR_PR) /* Check the priv bit */
bne+ .L_syscall_arg_copyin /* We're not priviliged... */
/* we came from a privilaged task, just do a copy */
/* get user's stack pointer */
lwz r28,saver1(r30) /* Get the stack pointer */
subi r29,r29,8 /* Get the number of arguments to copy */
addi r28,r28,COPYIN_ARG0_OFFSET-4 /* Point to source - 4 */
addi r27,r1,FM_ARG0-4 /* Point to sink - 4 */
.L_syscall_copy_word_loop:
addic. r29,r29,-1 /* Count down the number of arguments left */
lwz r0,4(r28) /* Pick up the argument from the stack */
addi r28,r28,4 /* Point to the next source */
stw r0,4(r27) /* Store the argument */
addi r27,r27,4 /* Point to the next sink */
bne+ .L_syscall_copy_word_loop /* Move all arguments... */
b .L_syscall_got_args /* Go call it now... */
/* we came from a user task, pay the price of a real copyin */
/* set recovery point */
.L_syscall_arg_copyin:
lwz r8,ACT_VMMAP(r13) lwz r8,VMMAP_PMAP(r8) addi r8,r8,PMAP_SEGS
/* We can manipulate the COPYIN segment register quite easily
* here, but we've also got to make sure we don't go over a
* segment boundary - hence some mess.
* Registers from 12-29 are free for our use.
*/
lwz r28,saver1(r30) /* Get the stack pointer */
subi r29,r29,8 /* Get the number of arguments to copy */
addi r28,r28,COPYIN_ARG0_OFFSET /* Set source in user land */
/* set up SR_COPYIN to allow us to copy, we may need to loop
* around if we change segments. We know that this previously
* pointed to user space, so the sid doesn't need setting.
*/
rlwinm r7,r28,6,26,29 .L_syscall_copyin_seg_loop:
lwzx r10,r8,r7 mtsr SR_COPYIN,r10
oris r26,r26,(SR_COPYIN_NUM << (28-16)) /* Make r27 point to address-4 of where we will store copied args */
addi r27,r1,FM_ARG0-4
.L_syscall_copyin_word_loop:
lwz r0,0(r26) /* MAY CAUSE PAGE FAULT! */
subi r29,r29,1 stw r0,4(r27) addi r27,r27,4
rlwinm. r0,r26,0,4,29 bne+ .L_syscall_copyin_word_loop addi r7,r7,4
/* Don't bother restoring SR_COPYIN, we can leave it trashed */
/* clear thread recovery as we're done touching user data */
.L_syscall_copyin_done:
li r0,0
stw r0,THREAD_RECOVER(r16) /* R16 still holds thread ptr */
.L_syscall_got_args:
lwz r8,ACT_TASK(r13) /* Get our task */
lis r10,hi16(EXT(c_syscalls_mach)) /* Get top half of counter address */
lwz r7,TASK_SYSCALLS_MACH(r8) addi r7,r7,1 stw r7,TASK_SYSCALLS_MACH(r8) lwz r9,0(r10) /* Get counter */
lwz r5,saver5(r30) /* Restore r5 */
lwz r6,saver6(r30) /* Restore r6 */
addi r9,r9,1 /* Add 1 */
lwz r7,saver7(r30) /* Restore r7 */
lwz r8,saver8(r30) /* Restore r8 */
stw r9,0(r10) /* Save it back */
lwz r9,saver9(r30) /* Restore r9 */
lwz r10,saver10(r30) /* Restore r10 */
lwz r0,MACH_TRAP_FUNCTION(r31)
/* calling this function, all the callee-saved registers are
* still valid except for r30 and r31 which are in the PCB
* r30 holds pointer to saved state (ie. pcb)
* r31 is scrap
*/
mtctr r0
bctrl /* perform the actual syscall */
/* 'standard' syscall returns here - INTERRUPTS ARE STILL ON */
/* r3 contains value that we're going to return to the user
*/
/*
* Ok, return from C function, ARG0 = return value
*
* get the active thread's PCB pointer and thus pointer to user state
* saved state is still in R30 and the active thread is in R16 .
*/
/* Store return value into saved state structure, since
* we need to pick up the value from here later - the
* syscall may perform a thread_set_syscall_return
* followed by a thread_exception_return, ending up
* at thread_syscall_return below, with SS_R3 having
* been set up already
*/
/* When we are here, r16 should point to the current thread,
* r30 should point to the current pcb
*/
/* save off return value, we must load it
* back anyway for thread_exception_return
* TODO NMGS put in register?
*/
.L_syscall_return:
mr r31,r16 /* Move the current thread pointer */
stw r3,saver3(r30) /* Stash the return code */
/* Call a function that records the end of */
/* the mach system call */
mr r4,r30
bl EXT(syscall_trace_end)
#if 0
lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */
mr r4,r31 /* (TEST/DEBUG) */
oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */
mr r5,r30 /* (TEST/DEBUG) */
sc /* (TEST/DEBUG) */
#endif
.L_thread_syscall_ret_check_ast:
mfmsr r12 /* Get the current MSR */
rlwinm r12,r12,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off interruptions enable bit */
mtmsr r12 /* Turn interruptions off */
mfsprg r10,0 /* Get the per_processor block */
/* Check to see if there's an outstanding AST */
lwz r4,PP_NEED_AST(r10)
lwz r4,0(r4)
cmpi cr0,r4, 0
beq cr0,.L_syscall_no_ast
/* Yes there is, call ast_taken
* pretending that the user thread took an AST exception here,
* ast_taken will save all state and bring us back here
*/
#if DEBUG
/* debug assert - make sure that we're not returning to kernel */
lwz r3,savesrr1(r30)
andi. r3,r3,MASK(MSR_PR)
bne+ 0f /* returning to user level, check */
lis r0,hi16(Choke) li r3,failContext
0:
#endif /* DEBUG */
li r3, AST_ALL
li r4, 1
bl EXT(ast_taken)
b .L_thread_syscall_ret_check_ast
/* thread_exception_return returns to here, almost all
* registers intact. It expects a full context restore
* of what it hasn't restored itself (ie. what we use).
*
* In particular for us,
* we still have r31 points to the current thread,
* r30 points to the current pcb
*/
.L_syscall_no_ast:
.L_thread_syscall_return:
mr r3,r30
lwz r11,SAVflags(r30) /* Get the flags */
lwz r5,THREAD_KERNEL_STACK(r31) /* Get the base pointer to the stack */
rlwinm r11,r11,0,15,13 /* Clear the syscall flag */
lwz r4,SAVprev(r30) addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE /* Reset to empty */
stw r4,ACT_MACT_PCB(r8)
b chkfac
.L_syscall_copyin_recover:
/* This is the catcher for any data faults in the copyin
* of arguments from the user's stack.
* r30 still holds a pointer to the PCB
*
* call syscall_error(EXC_BAD_ACCESS, EXC_PPC_VM_PROT_READ, sp, ssp),
*
* we already had a frame so we can do this
*/
li r3,EXC_BAD_ACCESS
li r4,EXC_PPC_VM_PROT_READ
lwz r5,saver1(r30)
mr r6,r30
bl EXT(syscall_error)
b .L_syscall_return
/*
* thread_exception_return()
*
* Return to user mode directly from within a system call.
*/
.align 5
.globl EXT(thread_bootstrap_return)
LEXT(thread_bootstrap_return) .globl EXT(thread_exception_return)
LEXT(thread_exception_return) .L_thread_exc_ret_check_ast:
mfmsr r3 /* Get the MSR */
rlwinm r3,r3,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear EE */
mtmsr r3 /* Disable interrupts */
/* Check to see if there's an outstanding AST */
/* We don't bother establishing a call frame even though CHECK_AST
can invoke ast_taken(), because it can just borrow our caller's
frame, given that we're not going to return.
*/
mfsprg r10,0 /* Get the per_processor block */
lwz r4,PP_NEED_AST(r10)
lwz r4,0(r4)
cmpi cr0,r4, 0
beq cr0,.L_exc_ret_no_ast
/* Yes there is, call ast_taken
* pretending that the user thread took an AST exception here,
* ast_taken will save all state and bring us back here
*/
li r3,AST_ALL
li r4,1
bl EXT(ast_taken)
b .L_thread_exc_ret_check_ast /* check for a second AST (rare)*/
/* arriving here, interrupts should be disabled */
/* Get the active thread's PCB pointer to restore regs
*/
.L_exc_ret_no_ast:
lwz r31,PP_CPU_DATA(r10)
lwz r31,CPU_ACTIVE_THREAD(r31)
lwz r30,THREAD_TOP_ACT(r31)
lwz r30,ACT_MACT_PCB(r30)
mr. r30,r30 #if DEBUG
/*
* debug assert - make sure that we're not returning to kernel
* get the active thread's PCB pointer and thus pointer to user state
*/
lwz r3,savesrr1(r30)
andi. r3,r3,MASK(MSR_PR)
bne+ ret_user2 lis r0,hi16(Choke) li r3,failContext
ret_user2:
#endif /* DEBUG */
/* If the MSR_SYSCALL_MASK isn't set, then we came from a trap,
* so warp into the return_from_trap (thread_return) routine,
* which takes PCB pointer in R3, not in r30!
*/
lwz r0,SAVflags(r30)
mr r3,r30 /* Copy pcb pointer into r3 in case */
andis. r0,r0,SAVsyscall>>16 /* Are we returning from a syscall? */
beq- cr0,thread_return /* Nope, must be a thread return... */
b .L_thread_syscall_return
makeDummyCtx:
bl EXT(save_get) addi r2,r3,savefp0
cleardummy: stw r0,0(r4) cmplw r4,r2
lis r2,hi16(MSR_EXPORT_MASK_SET) stw r2,savesrr1(r3) b thread_return /*
* ihandler(type)
*
* ENTRY: VM switched ON
* Interrupts OFF
* R3 contains exception code
* R4 points to the saved context (virtual address)
* Everything is saved in savearea
*
*/
.align 5
.globl EXT(ihandler)
LEXT(ihandler) /* Interrupt handler */
/*
* get the value of istackptr, if it's zero then we're already on the
* interrupt stack, otherwise it points to a saved_state structure
* at the top of the interrupt stack.
*/
lwz r10,savesrr1(r4) /* Get SRR1 */
mfsprg r25,0 /* Get the per_proc block */
li r14,0 /* Zero this for now */
rlwinm. r13,r10,0,MSR_VEC_BIT,MSR_VEC_BIT crmove cr1_eq,cr0_eq rlwinm. r10,r10,0,MSR_FP_BIT,MSR_FP_BIT lwz r16,CPU_ACTIVE_THREAD(r16) /* Get the thread pointer */
beq+ cr1,ivecoff stw r7,liveVRS(r25) ivecoff: li r0,0 /* Get a constant 0 */
cmplwi cr1,r16,0 /* Are we still booting? */
beq+ ifpoff stw r9,liveFPSCR(r25) ifpoff: mr. r1,r1 /* Is it active? */
beq- cr1,ihboot1 /* We're still coming up... */
lwz r13,THREAD_TOP_ACT(r16) /* Pick up the active thread */
lwz r14,ACT_MACT_PCB(r13) /* Now point to the PCB */
ihboot1: lwz r9,saver1(r4) /* Pick up the 'rupt time stack */
stw r14,SAVprev(r4) /* Queue the new save area in the front */
stw r13,SAVact(r4) /* Point the savearea at its activation */
beq- cr1,ihboot4 /* We're still coming up... */
stw r4,ACT_MACT_PCB(r13) /* Point to our savearea */
ihboot4: bne .L_istackfree /* Nope... */
/* We're already on the interrupt stack, get back the old
* stack pointer and make room for a frame
*/
lwz r10,PP_INTSTACK_TOP_SS(r25) subi r1,r9,FM_REDZONE cmplwi r5,INTSTACK_SIZE-FM_SIZE
lwz r5,PP_DEBSTACK_TOP_SS(r25) sub r5,r1,r5 blt+ ihsetback lis r0,hi16(Choke) li r3,failStack
.align 5
.L_istackfree:
lwz r10,SAVflags(r4)
stw r0,PP_ISTACKPTR(r25) /* Mark the stack in use */
oris r10,r10,HIGH_ADDR(SAVrststk) /* Indicate we reset stack when we return from this one */
stw r10,SAVflags(r4) /* Stick it back */
/*
* To summarize, when we reach here, the state has been saved and
* the stack is marked as busy. We now generate a small
* stack frame with backpointers to follow the calling
* conventions. We set up the backpointers to the trapped
* routine allowing us to backtrace.
*/
ihsetback: subi r1,r1,FM_SIZE /* Make a new frame */
stw r9,FM_BACKPTR(r1) /* point back to previous stackptr */
#if VERIFYSAVE
bl versave
#if DEBUG
/* If debugging, we need two frames, the first being a dummy
* which links back to the trapped routine. The second is
* that which the C routine below will need
*/
lwz r5,savesrr0(r4) /* Get interrupt address */
stw r5,FM_LR_SAVE(r1) /* save old instr ptr as LR value */
stwu r1,-FM_SIZE(r1) /* Make another new frame for C routine */
#endif /* DEBUG */
lwz r5,savedsisr(r4) /* Get the DSISR */
lwz r6,savedar(r4) /* Get the DAR */
bl EXT(interrupt)
/* interrupt() returns a pointer to the saved state in r3
*
* Ok, back from C. Disable interrupts while we restore things
*/
.globl EXT(ihandler_ret)
LEXT(ihandler_ret) /* Marks our return point from debugger entry */
mfmsr r0 /* Get our MSR */
rlwinm r0,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Flip off the interrupt enabled bit */
mtmsr r0 /* Make sure interrupts are disabled */
mfsprg r10,0 /* Get the per_proc block */
lwz r8,PP_CPU_DATA(r10) /* Get the CPU data area */
lwz r7,SAVflags(r3) /* Pick up the flags */
lwz r8,CPU_ACTIVE_THREAD(r8) /* and the active thread */
lwz r9,SAVprev(r3) /* Get previous save area */
cmplwi cr1,r8,0 /* Are we still initializing? */
lwz r12,savesrr1(r3) /* Get the MSR we will load on return */
beq- cr1,ihboot2 /* Skip if we are still in init... */
lwz r8,THREAD_TOP_ACT(r8) /* Pick up the active thread */
ihboot2: andis. r11,r7,HIGH_ADDR(SAVrststk) /* Is this the first on the stack? */
beq- cr1,ihboot3 /* Skip if we are still in init... */
stw r9,ACT_MACT_PCB(r8) /* Point to previous context savearea */
ihboot3: mr r4,r3 /* Move the savearea pointer */
beq .L_no_int_ast2 /* Get going if not the top o' stack... */
/* We're the last frame on the stack. Restore istackptr to empty state.
*
* Check for ASTs if one of the below is true:
* returning to user mode
* returning to a kloaded server
*/
lwz r9,PP_INTSTACK_TOP_SS(r10) /* Get the empty stack value */
lwz r5,PP_CPU_DATA(r10) /* Get cpu_data ptr */
andc r7,r7,r11 /* Remove the stack reset bit in case we pass this one */
stw r9,PP_ISTACKPTR(r10) /* Save that saved state ptr */
lwz r3,CPU_PREEMPTION_LEVEL(r5) /* Get preemption level */
stw r7,SAVflags(r4) /* Save the flags */
cmplwi r3, 0 /* Check for preemption */
bne .L_no_int_ast /* Don't preempt if level is not zero */
andi. r6,r12,MASK(MSR_PR) /* privilege mode */
lwz r11,PP_NEED_AST(r10) /* Get the AST request address */
lwz r11,0(r11) /* Get the request */
beq- .L_kernel_int_ast /* In kernel space, AST_URGENT check */
li r3,T_AST /* Assume the worst */
mr. r11,r11 /* Are there any pending? */
beq .L_no_int_ast /* Nope... */
b .L_call_thandler
.L_kernel_int_ast:
andi. r11,r11,AST_URGENT /* AST_URGENT */
li r3,T_PREEMPT /* Assume the worst */
beq .L_no_int_ast /* Nope... */
.L_call_thandler:
/*
* There is a pending AST. Massage things to make it look like
* we took a trap and jump into the trap handler. To do this
* we essentially pretend to return from the interrupt but
* at the last minute jump into the trap handler with an AST
* trap instead of performing an rfi.
*/
stw r3,saveexception(r4) /* Set the exception code to T_AST/T_PREEMPT */
b EXT(thandler) /* hyperspace into AST trap */
.L_no_int_ast:
mr r3,r4 rlwinm r7,r7,0,15,13 /* Clear the syscall bit */
li r4,0 beq- cr1,chkfac
chkfac: mr. r8,r8
lwz r20,ACT_MACT_FPUlvl(r8) cmplw cr1,r20,r3 rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 lhz r26,PP_CPU_NUMBER(r10) bne- cr0,chkvecnr #if FPVECDBG
lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) #endif
li r22,ACT_MACT_FPUcpu cfSpin2: lwarx r27,r22,r8 oris r0,r27,hi16(fvChk) stwcx. r0,r22,r8
isync cmplw r4,r20 cmplw cr1,r27,r26 beq+ cr1,chkfpnlvl stw r0,PP_FPU_THREAD(r10)
chkfpnlvl: bne- chkvec ori r12,r12,lo16(MASK(MSR_FP)) li r0,1 lwz r25,SAVlvlfp(r24) bne+ chkvec b chkvec chkfpfree: li r0,0
bne- cr2,chkfpnfr chkfpnfr:
#if FPVECDBG
lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) #endif
mr. r24,r24
#if FPVECDBG
rlwinm. r0,r24,0,0,15 BREAKPOINT_TRAP lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) #endif
lwz r25,SAVlvlfp(r24) cmplw cr1,r25,r3 bne cr1,chkvec fptoss: lwz r25,SAVprefp(r24) lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) sc mr. r25,r25 beq fptoplvl rlwinm. r0,r25,0,0,15 BREAKPOINT_TRAP #endif
lwz r25,SAVlvlfp(r25) fptoplvl: lwz r19,SAVflags(r24) rlwinm. r0,r19,0,1,1 BREAKPOINT_TRAP #endif
#if FPVECDBG
lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) #endif
rlwinm r22,r24,0,0,19 stw r25,ACT_MACT_FPUlvl(r8) stw r19,SAVflags(r24) #if FPVECDBG
lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) #endif
#if FPVECDBG
rlwinm. r0,r24,0,0,15 BREAKPOINT_TRAP #endif
lwz r23,SACvrswap(r22) xor r23,r24,r23 stw r23,PP_QUICKFRET(r10) invlivefp: lis r20,hi16(EXT(real_ncpus)) ori r20,r20,lo16(EXT(real_ncpus)) lwz r20,0(r20) li r2,0 invlivefl: cmplw r23,r10 beq invlivefn invlivefa: lwarx r0,r25,r23 bne invlivefn bne- invlivefa invlivefn: mr. r20,r20 bgt invlivefl
stw r27,ACT_MACT_FPUcpu(r8) chkvecnr: lwz r20,ACT_MACT_VMXlvl(r8) cmplw cr1,r20,r3 rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 bne- cr0,setenanr #if FPVECDBG
lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) #endif
li r22,ACT_MACT_VMXcpu cvSpin2: lwarx r27,r22,r8 oris r0,r27,hi16(fvChk) stwcx. r0,r22,r8
isync cmplw r4,r20 cmplw cr1,r27,r26 beq+ cr1,chkvecnlvl stw r0,PP_VMX_THREAD(r10)
chkvecnlvl: bne- setena oris r12,r12,hi16(MASK(MSR_VEC)) li r0,1 lwz r25,SAVlvlvec(r24) bne+ setena b setena chkvecfree: li r0,0
bne- cr2,chkvecnfr chkvecnfr:
#if FPVECDBG
lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) #endif
mr. r24,r24
#if FPVECDBG
lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) #endif
lwz r25,SAVlvlvec(r24) cmplw cr1,r25,r3 bne cr1,setena vectoss: lwz r25,SAVprevec(r24) lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) sc mr. r25,r25 beq vectoplvl
vectoplvl: lwz r19,SAVflags(r24) #if FPVECDBG
lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) #endif
rlwinm r22,r24,0,0,19 stw r25,ACT_MACT_VMXlvl(r8) stw r19,SAVflags(r24) #if FPVECDBG
lis r0,HIGH_ADDR(CutTrace) oris r0,r0,LOW_ADDR(CutTrace) #endif
lwz r23,SACvrswap(r22) xor r23,r24,r23 stw r23,PP_QUICKFRET(r10) invliveve: lis r20,hi16(EXT(real_ncpus)) ori r20,r20,lo16(EXT(real_ncpus)) lwz r20,0(r20) li r2,0 invlivevl: cmplw r23,r10 beq invlivevn invliveva: lwarx r0,r25,r23 bne invlivevn bne- invliveva invlivevn: mr. r20,r20 bgt invlivevl setena: sync
setenanr: rlwinm r20,r12,(((31-vectorCngbit)+(MSR_VEC_BIT+1))&31),vectorCngbit,vectorCngbit beq setenaa lwz r5,ACT_MACT_SPF(r8) or r5,r5,r20 stw r5,ACT_MACT_SPF(r8)
setenaa: stw r12,savesrr1(r3) mfdec r24 mr. r24,r24 ble- chkenax segtb: mftbu r20 mftbu r19 cmplw cr1,r20,r19 bne- cr1,segtb subfc r6,r21,r23 subfe r5,r20,r22 andc. r12,r5,r0 bne chkenax bge+ chkenax mtdec r13 chkenax: lwz r6,SAVflags(r3)
#if DEBUG
lwz r20,SAVact(r3) lwz r21,CPU_ACTIVE_THREAD(r21) beq- yeswereok cmplw r21,r20
lis r0,hi16(Choke) mr r21,r3 sc yeswereok:
#endif
rlwinm r5,r3,0,0,19 lwz r5,SACvrswap(r5) xor r3,r3,r5
/*
* Here's where we handle the fastpath stuff
* We'll do what we can here because registers are already
* loaded and it will be less confusing that moving them around.
* If we need to though, we'll branch off somewhere's else.
*
* Registers when we get here:
*
* r0 = syscall number
* r4 = savearea/pcb
* r13 = activation
* r14 = previous savearea (if any)
* r16 = thread
* r25 = per_proc
*/
.align 5
fastpath: cmplwi cr3,r0,0x7FF1
/*
* void cthread_set_self(cproc_t p)
*
* set's thread state "user_value"
*
* This op is invoked as follows:
* li r0, CthreadSetSelfNumber // load the fast-trap number
* sc // invoke fast-trap
* blr
*
*/
CthreadSetSelfNumber:
lwz r5,saver3(r4) /* Retrieve the self number */
stw r5,CTHREAD_SELF(r13) /* Remember it */
stw r5,UAW(r25) /* Prime the per_proc_info with it */
.globl EXT(fastexit)
EXT(fastexit):
lwz r8,SAVflags(r4) /* Pick up the flags */
rlwinm r9,r4,0,0,19 /* Round down to the base savearea block */
rlwinm r8,r8,0,1,31 /* Clear the attached bit */
lwz r9,SACvrswap(r9) /* Get the conversion from virtual to real */
stw r8,SAVflags(r4) /* Set the flags */
xor r3,r4,r9 /* Switch savearea to physical addressing */
b EXT(exception_exit) /* Go back to the caller... */
/*
* Here's where we check for a hit on the Blue Box Assist
* Most registers are non-volatile, so be careful here. If we don't
* recognize the trap instruction we go back for regular processing.
* Otherwise we transfer to the assist code.
*/
.align 5
checkassist:
lwz r0,saveexception(r4) lwz r26,ACT_MACT_BEDA(r13) lwz r24,ACT_MACT_BTS(r13) lwz r27,savesrr0(r4) sub r24,r27,r24 cmplwi cr1,r24,BB_MAX_TRAP btlr- cr0_eq
.align 5
exitFromVM: mr r30,r4
b EXT(vmm_exit) .align 5
.globl EXT(retFromVM)
LEXT(retFromVM)
mfsprg r10,0 lwz r4,SAVprev(r30) lwz r11,SAVflags(r30) lwz r1,ACT_THREAD(r8)
stw r4,ACT_MACT_PCB(r8) lwz r5,THREAD_KERNEL_STACK(r1) stw r5,ACT_MACT_KSP(r8)
.align 5
.globl EXT(chandler)
LEXT(chandler) /* Choke handler */
lis r25,hi16(EXT(trcWork)) ori r25,r25,lo16(EXT(trcWork))
mfsprg r25,0 lwz r1,PP_DEBSTACKPTR(r25) bne chokefirst chokespin: addi r31,r31,1 addi r31,r31,1 addi r31,r31,1 b chokespin chokefirst: li r0,-1 lwz r10,saver1(r4) bne chokestart lwz r2,PP_DEBSTACK_TOP_SS(r25)
cmplwi r11,KERNEL_STACK_SIZE-FM_SIZE-TRAP_SPACE_NEEDED
subi r1,r10,FM_REDZONE chokestart: li r0,0
bl EXT(SysChoked)
#if VERIFYSAVE
versave:
#if 0
lis r28,hi16(EXT(default_pset)) ori r28,r28,lo16(EXT(default_pset)) li r20,0 lwz r27,psthreadcnt(r28) lwz r28,psthreads(r28)
fcknxtth: mr. r27,r27
lwz r26,THREAD_TOP_ACT(r28) fckact: mr. r26,r26
lwz r28,THREAD_PSTHRN(r28) b fcknxtth fckact2: lwz r20,ACT_MACT_FPU(r26) beq+ fcknact fckact3: lwz r20,SAVprefp(r20) beq+ fcknact lwz r29,SAVlvlfp(r20) cmplwi r29,1
lis r27,hi16(EXT(DebugWork)) stw r27,0(r27)
fcknact: lwz r26,ACT_LOWER(r26) #endif
#if 1
lis r28,hi16(EXT(default_pset)) ori r28,r28,lo16(EXT(default_pset)) li r20,0 lwz r27,psthreadcnt(r28) lwz r28,psthreads(r28)
fcknxtth: mr. r27,r27
lwz r26,THREAD_TOP_ACT(r28) fckact: mr. r26,r26
lwz r28,THREAD_PSTHRN(r28) b fcknxtth fckact2: lwz r20,ACT_MACT_FPU(r26) li r22,0 fckact3: mr. r20,r20
addi r22,r22,1 lwz r21,SAVflags(r20) bne+ fckact3a ori r27,r27,lo16(EXT(DebugWork)) BREAKPOINT_TRAP fckact3a: cmplwi r22,1 lwz r21,SAVlvlfp(r20) bne+ fckact3b ori r27,r27,lo16(EXT(DebugWork)) BREAKPOINT_TRAP fckact3b: lwz r21,SAVact(r20) beq+ fckact3c ori r27,r27,lo16(EXT(DebugWork)) BREAKPOINT_TRAP fckact3c: mr. r21,r21
lis r27,hi16(EXT(DebugWork)) stw r27,0(r27)
fckact4: stb r29,SAVflags+3(r20) b fckact3 fckact5: lwz r20,ACT_MACT_FPU(r26)
fckact6: mr. r20,r20
stb r29,SAVflags+3(r20) b fckact6 fcknact: lwz r26,ACT_LOWER(r26) #endif
#if 0
lis r28,hi16(EXT(default_pset)) ori r28,r28,lo16(EXT(default_pset)) li r20,0 lwz r27,psthreadcnt(r28) lwz r28,psthreads(r28)
cknxtth: mr. r27,r27
lwz r26,THREAD_TOP_ACT(r28) ckact: mr. r26,r26
lwz r28,THREAD_PSTHRN(r28) b cknxtth ckact2: lwz r29,ACT_MACT_PCB(r26) cknorm: mr. r29,r29
addi r20,r20,1 lwz r29,SAVprev(r29)
cknormd: lwz r29,ACT_MACT_FPU(r26) ckfpu: mr. r29,r29
lwz r21,SAVflags(r29) bne- cknfpu addi r20,r20,1 cknfpu: lwz r29,SAVprefp(r29)
ckfpud: lwz r29,ACT_MACT_VMX(r26) ckvmx: mr. r29,r29
lwz r21,SAVflags(r29) bne- cknvmx addi r20,r20,1 cknvmx: lwz r29,SAVprevec(r29)
ckvmxd: lwz r26,ACT_LOWER(r26)
cktotal: lis r28,hi16(EXT(saveanchor)) ori r28,r28,lo16(EXT(saveanchor))
lwz r21,SVinuse(r28) sub. r29,r21,r20 sub r26,r29,r27 bltlr+ badsave: lis r27,hi16(EXT(DebugWork)) stw r27,0(r27) #endif
#endif