/*
* Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* The contents of this file constitute Original Code as defined in and
* are subject to the Apple Public Source License Version 1.1 (the
* "License"). You may not use this file except in compliance with the
* License. Please obtain a copy of the License at
* http://www.apple.com/publicsource and read it before using this file.
*
* This Original Code and all software distributed under the License are
* distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
* License for the specific language governing rights and limitations
* under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
/* void _longjmp(jmp_buf env, int val)/* int _longjmp(jmp_buf env) * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
*
* File: sys/ppc/_longjmp.s
*
* Implements _longjmp()
*
* History:
* 8 September 1998 Matt Watson (mwatson@apple.com)
* Created. Derived from longjmp.s
*/
#include <architecture/ppc/asm_help.h>
#include "_setjmp.h"
#define VRSave 256
/* special flag bit definitions copied from /osfmk/ppc/thread_act.h */
#define floatUsedbit 1
#define vectorUsedbit 2
#if defined(__DYNAMIC__)
.data
.non_lazy_symbol_pointer
.align 2
L_memmove$non_lazy_ptr:
.indirect_symbol _memmove
.long 0
.non_lazy_symbol_pointer
.align 2
L__cpu_has_altivec$non_lazy_ptr:
.indirect_symbol __cpu_has_altivec
.long 0
.text
#endif
LEAF(__longjmp)
lwz r5,JMP_flags(r3)
lwz r6,JMP_addr_at_setjmp(r3)
rlwinm r7,r5,0,vectorUsedbit,vectorUsedbit
rlwinm r8,r5,0,floatUsedbit,floatUsedbit
cmpw cr1,r3,r6 cmpwi cr4,r8,0
stw r3,JMP_addr_at_setjmp(r3) mr r30,r3 addi r4,r6,JMP_vr_base_addr
rlwinm r3,r3,0,0,27 sub r7,r4,r6 li r5,(JMP_buf_end - JMP_vr_base_addr)
#if defined(__DYNAMIC__)
bcl 20,31,1f addis r12, r12, ha16(L_memmove$non_lazy_ptr - 1b)
lwz r12, lo16(L_memmove$non_lazy_ptr - 1b)(r12)
mtctr r12 #else
bl _memmove
#endif
mr r3,r30
mr r4,r31
LRestoreVRs:
beq+ cr3,LZeroVRSave addi r6,r3,JMP_vr_base_addr
cmpwi r0,0 beq+ LRestoreFPRs
lvx v20,0,r6
li r7,16*1
lvx v21,r7,r6
li r7,16*2
lvx v22,r7,r6
li r7,16*3
lvx v23,r7,r6
li r7,16*4
lvx v24,r7,r6
li r7,16*5
lvx v25,r7,r6
li r7,16*6
lvx v26,r7,r6
li r7,16*7
lvx v27,r7,r6
li r7,16*8
lvx v28,r7,r6
li r7,16*9
lvx v29,r7,r6
li r7,16*10
lvx v30,r7,r6
li r7,16*11
lvx v31,r7,r6
b LRestoreFPRs LZeroVRSave:
#if defined(__DYNAMIC__)
bcl 20,31,1f
1: mflr r9 lwz r7,lo16(L__cpu_has_altivec$non_lazy_ptr - 1b)(r6)
lwz r7,0(r7) lis r7, ha16(__cpu_has_altivec)
lwz r7, lo16(__cpu_has_altivec)(r7)
#endif
cmpwi r7,0
li r8,0
beq LRestoreFPRs
LRestoreFPRs:
beq cr4,LRestoreGPRs rlwinm r6,r6,0,0,27 lfd f15,1*8(r6)
lfd f16,2*8(r6)
lfd f17,3*8(r6)
lfd f18,4*8(r6)
lfd f19,5*8(r6)
lfd f20,6*8(r6)
lfd f21,7*8(r6)
lfd f22,8*8(r6)
lfd f23,9*8(r6)
lfd f24,10*8(r6)
lfd f25,11*8(r6)
lfd f26,12*8(r6)
lfd f27,13*8(r6)
lfd f28,14*8(r6)
lfd f29,15*8(r6)
lfd f30,16*8(r6)
lfd f31,17*8(r6)
LRestoreGPRs:
lwz r31, JMP_r31(r3)
/* r1, r14-r30 */
lwz r1, JMP_r1 (r3)
lwz r2, JMP_r2 (r3)
lwz r13, JMP_r13(r3)
lwz r14, JMP_r14(r3)
lwz r15, JMP_r15(r3)
lwz r16, JMP_r16(r3)
lwz r17, JMP_r17(r3)
lwz r18, JMP_r18(r3)
lwz r19, JMP_r19(r3)
lwz r20, JMP_r20(r3)
lwz r21, JMP_r21(r3)
lwz r22, JMP_r22(r3)
lwz r23, JMP_r23(r3)
lwz r24, JMP_r24(r3)
lwz r25, JMP_r25(r3)
lwz r26, JMP_r26(r3)
lwz r27, JMP_r27(r3)
lwz r28, JMP_r28(r3)
lwz r29, JMP_r29(r3)
lwz r30, JMP_r30(r3)
lwz r0, JMP_cr(r3)
mtcrf 0xff,r0
lwz r0, JMP_lr(r3)
mtlr r0
lwz r0, JMP_ctr(r3) lwz r0, JMP_xer(r3) mr. r3, r4
bnelr
li r3, 1
blr