/*
* Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. The rights granted to you under the License
* may not be used to create, or enable the creation or redistribution of,
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
*
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#include <mach_assert.h>
#include <mach_ldebug.h>
#include <ppc/asm.h>
#include <ppc/proc_reg.h>
#include <assym.s>
#define STRING ascii
#define ILK_LOCKED 0x01
#define WAIT_FLAG 0x02
#define WANT_UPGRADE 0x04
#define WANT_EXCL 0x08
#define TH_FN_OWNED 0x01
# volatile CR bits
#define hwtimeout 20
#define mlckmiss 21
#define RW_DATA 0
#define PROLOG(space) \
stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
mfcr r2 __ASMNL__ \
mflr r0 __ASMNL__ \
stw r3,FM_ARG0(r1) __ASMNL__ \
stw r11,FM_ARG0+0x04(r1) __ASMNL__ \
stw r2,(FM_ALIGN(space)+FM_SIZE+FM_CR_SAVE)(r1) __ASMNL__ \
stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
#define EPILOG \
lwz r1,0(r1) __ASMNL__ \
lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
mtlr r0 __ASMNL__
/*
* void hw_lock_init(hw_lock_t)
*
* Initialize a hardware lock.
*/
.align 5
.globl EXT(hw_lock_init)
LEXT(hw_lock_init)
li r0, 0 blr
/*
* unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
*
* Try to acquire spin-lock. The second parameter is the bit mask to test and set.
* multiple bits may be set. Return success (1) or failure (0).
* Attempt will fail after timeout ticks of the timebase.
*/
.align 5
.globl EXT(hw_lock_bit)
LEXT(hw_lock_bit)
crset hwtimeout mr r4,r5
/*
* void hw_lock_lock(hw_lock_t)
*
* Acquire lock, spinning until it becomes available.
* Return with preemption disabled.
* We will just set a default timeout and jump into the NORMAL timeout lock.
*/
.align 5
.globl EXT(hw_lock_lock)
LEXT(hw_lock_lock)
crclr hwtimeout li r12,ILK_LOCKED
lockDisa:
crset hwtimeout li r12,ILK_LOCKED
/*
* unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
*
* Try to acquire spin-lock. Return success (1) or failure (0).
* Attempt will fail after timeout ticks of the timebase.
* We try fairly hard to get this lock. We disable for interruptions, but
* reenable after a "short" timeout (128 ticks, we may want to change this).
* After checking to see if the large timeout value (passed in) has expired and a
* sufficient number of cycles have gone by (to insure pending 'rupts are taken),
* we return either in abject failure, or disable and go back to the lock sniff routine.
* If the sniffer finds the lock free, it jumps right up and tries to grab it.
*/
.align 5
.globl EXT(hw_lock_to)
LEXT(hw_lock_to)
crset hwtimeout lckcomm:
mfsprg r6,1 addi r5,r5,1 mr r5,r3
lcktry: lwarx r6,0,r5 or r6,r6,r12 stwcx. r6,0,r5 li r3,1 LEXT(hwllckPatch_isync)
isync
lckspin: li r6,lgKillResv
mr. r4,r4 lis r4,hi16(EXT(LockTimeOut)) lwz r4,0(r4) mr. r8,r8 lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r7 mftb r8
lockspin1: mtmsr r7
lcksniff: lwz r3,0(r5) beq++ lckretry mftb r10 cmplwi r10,128
mtmsr r9 subi r4,r4,128 mr. r4,r4 or r4,r4,r4 ble-- lckfail lckretry:
mtmsr r9 b lcktry
lckfail: li r3,0 lckpanic:
mr r4,r5
mr r5,r3
lis r3,hi16(lckpanic_str) bl EXT(panic)
BREAKPOINT_TRAP lckpanic_str:
STRINGD "timeout on attempt to acquire lock (0x%08X), value = 0x%08X\n\000"
.text
/*
* void hw_lock_unlock(hw_lock_t)
*
* Unconditionally release lock.
* Release preemption level.
*/
.align 5
.globl EXT(hw_lock_unlock)
LEXT(hw_lock_unlock)
.globl EXT(hwulckPatch_isync)
LEXT(hwulckPatch_isync)
isync
.globl EXT(hwulckPatch_eieio)
LEXT(hwulckPatch_eieio)
eieio
li r0, 0
b epStart /*
* unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
*
* Release bit based spin-lock. The second parameter is the bit mask to clear.
* Multiple bits may be cleared.
*
*/
.align 5
.globl EXT(hw_unlock_bit)
LEXT(hw_unlock_bit)
.globl EXT(hwulckbPatch_isync)
LEXT(hwulckbPatch_isync)
isync
.globl EXT(hwulckbPatch_eieio)
LEXT(hwulckbPatch_eieio)
eieio
ubittry: lwarx r0,0,r3 stwcx. r0,0,r3
b epStart /*
* unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
* unsigned int newb, unsigned int timeout)
*
* Try to acquire spin-lock. The second parameter is the bit mask to check.
* The third is the value of those bits and the 4th is what to set them to.
* Return success (1) or failure (0).
* Attempt will fail after timeout ticks of the timebase.
* We try fairly hard to get this lock. We disable for interruptions, but
* reenable after a "short" timeout (128 ticks, we may want to shorten this).
* After checking to see if the large timeout value (passed in) has expired and a
* sufficient number of cycles have gone by (to insure pending 'rupts are taken),
* we return either in abject failure, or disable and go back to the lock sniff routine.
* If the sniffer finds the lock free, it jumps right up and tries to grab it.
*/
.align 5
.globl EXT(hw_lock_mbits)
LEXT(hw_lock_mbits)
li r10,0
mbittry: lwarx r12,0,r3 andc r12,r12,r4 cmplw r0,r5 stwcx. r12,0,r3 b mbittry .align 5
mbitspin: li r11,lgKillResv
mr. r10,r10 lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r8 mftb r10 mbitspin0:
mtmsr r8 mbitsniff:
lwz r12,0(r3) cmplw r0,r5
mftb r11 cmplwi r11,128
mtmsr r9 subi r7,r7,128 mr. r7,r7
ble-- mbitfail mbitretry:
mtmsr r9 b mbittry
.align 5
mbitgot:
li r3,1 LEXT(hwlmlckPatch_isync)
isync
mbitfail: li r3,0
/*
* unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
*
* Spin until word hits 0 or timeout.
* Return success (1) or failure (0).
* Attempt will fail after timeout ticks of the timebase.
*
* The theory is that a processor will bump a counter as it signals
* other processors. Then it will spin untl the counter hits 0 (or
* times out). The other processors, as it receives the signal will
* decrement the counter.
*
* The other processors use interlocked update to decrement, this one
* does not need to interlock.
*/
.align 5
.globl EXT(hw_cpu_sync)
LEXT(hw_cpu_sync)
mftb r10 li r3,1 csynctry: lwz r11,0(r9) beqlr-
sub r12,r12,r10 bge+ csynctry li r3,0
/*
* unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
*
* Spin until word changes or timeout.
* Return success (1) or failure (0).
* Attempt will fail after timeout ticks of the timebase.
*
* This is used to insure that a processor passes a certain point.
* An example of use is to monitor the last interrupt time in the
* per_proc block. This can be used to insure that the other processor
* has seen at least one interrupt since a specific time.
*/
.align 5
.globl EXT(hw_cpu_wcng)
LEXT(hw_cpu_wcng)
mftb r10 li r3,1 wcngtry: lwz r11,0(r9) bnelr-
sub r12,r12,r10 bge+ wcngtry li r3,0
/*
* unsigned int hw_lock_try(hw_lock_t)
*
* Try to acquire spin-lock. Return success (1) or failure (0)
* Returns with preemption disabled on success.
*
*/
.align 5
.globl EXT(hw_lock_try)
LEXT(hw_lock_try)
lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0
mtmsr r7 lwz r5,0(r3) bne-- .L_lock_try_failed .L_lock_try_loop:
lwarx r5,0,r3 andi. r6,r5,ILK_LOCKED bne-- .L_lock_try_failedX stwcx. r5,0,r3
.globl EXT(hwltlckPatch_isync)
LEXT(hwltlckPatch_isync)
isync
mfsprg r6,1 addi r5,r5,1
mtmsr r9 blr
.L_lock_try_failedX:
li r6,lgKillResv
.L_lock_try_failed:
mtmsr r9 blr
/*
* unsigned int hw_lock_held(hw_lock_t)
*
* Return 1 if lock is held
* Doesn't change preemption state.
* N.B. Racy, of course.
*/
.align 5
.globl EXT(hw_lock_held)
LEXT(hw_lock_held)
isync andi. r6,r3,ILK_LOCKED
/*
* uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
*
* Compare old to area if equal, store new, and return true
* else return false and no store
* This is an atomic operation
*/
.align 5
.globl EXT(hw_compare_and_store)
LEXT(hw_compare_and_store)
mr r6,r3 cstry: lwarx r9,0,r5 cmplw cr0,r9,r6 stwcx. r4,0,r5 .globl EXT(hwcsatomicPatch_isync)
LEXT(hwcsatomicPatch_isync)
isync
csfail: li r3,lgKillResv
li r3,0
/*
* uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
*
* Atomically add the second parameter to the first.
* Returns the result.
*
*/
.align 5
.globl EXT(hw_atomic_add)
LEXT(hw_atomic_add)
mr r6,r3 addtry: lwarx r3,0,r6 stwcx. r3,0,r6 blr
/*
* uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
*
* Atomically subtract the second parameter from the first.
* Returns the result.
*
*/
.align 5
.globl EXT(hw_atomic_sub)
LEXT(hw_atomic_sub)
mr r6,r3 subtry: lwarx r3,0,r6 stwcx. r3,0,r6 blr
/*
* uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
*
* Atomically ORs the second parameter into the first.
* Returns the result.
*/
.align 5
.globl EXT(hw_atomic_or)
LEXT(hw_atomic_or)
mr r6,r3 ortry: lwarx r3,0,r6 stwcx. r3,0,r6 blr
/*
* uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
*
* Atomically ANDs the second parameter with the first.
* Returns the result.
*
*/
.align 5
.globl EXT(hw_atomic_and)
LEXT(hw_atomic_and)
mr r6,r3 andtry: lwarx r3,0,r6 stwcx. r3,0,r6 blr
/*
* void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
*
* Atomically inserts the element at the head of the list
* anchor is the pointer to the first element
* element is the pointer to the element to insert
* disp is the displacement into the element to the chain pointer
*
* NOTE: OSEnqueueAtomic() is aliased to this, see xnu/libkern/Makefile
*/
.align 5
.globl EXT(hw_queue_atomic)
LEXT(hw_queue_atomic)
mr r7,r4 b hw_queue_comm /*
* void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
*
* Atomically inserts the list of elements at the head of the list
* anchor is the pointer to the first element
* first is the pointer to the first element to insert
* last is the pointer to the last element to insert
* disp is the displacement into the element to the chain pointer
*/
.align 5
.globl EXT(hw_queue_atomic_list)
LEXT(hw_queue_atomic_list)
mr r7,r5
hw_queue_comm:
lwarx r9,0,r3 eieio bne-- hw_queue_comm blr /*
* unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
*
* Atomically removes the first element in a list and returns it.
* anchor is the pointer to the first element
* disp is the displacement into the element to the chain pointer
* Returns element if found, 0 if empty.
*
* NOTE: OSDequeueAtomic() is aliased to this, see xnu/libkern/Makefile
*/
.align 5
.globl EXT(hw_dequeue_atomic)
LEXT(hw_dequeue_atomic)
mr r5,r3 hw_dequeue_comm:
lwarx r3,0,r5 beq-- hdcFail stwcx. r9,0,r5 b hw_dequeue_comm hdcFail: li r4,lgKillResv blr
/*
* Routines for mutex lock debugging.
*/
/*
* Gets lock check flags in CR6: CR bits 24-27
*/
#define CHECK_SETUP(rg) \
lbz rg,lglcksWork(0) __ASMNL__ \
mtcrf 2,rg __ASMNL__
/*
* Checks for expected lock type.
*/
#define CHECK_MUTEX_TYPE() \
bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
bt 24+disLktypeb,1f __ASMNL__ \
lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
cmpwi r10,MUTEX_TAG __ASMNL__ \
beq++ 1f __ASMNL__ \
PROLOG(0) __ASMNL__ \
mr r4,r11 __ASMNL__ \
mr r5,r10 __ASMNL__ \
lis r3,hi16(not_a_mutex) __ASMNL__ \
ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
bl EXT(panic) __ASMNL__ \
BREAKPOINT_TRAP __ASMNL__ \
1:
.data
not_a_mutex:
STRINGD "mutex (0x%08X) not a mutex type (0x%08X)\n\000"
.text
/*
* Verifies return to the correct thread in "unlock" situations.
*/
#define CHECK_THREAD(thread_offset) \
bf MUTEX_ATTR_DEBUGb,3f __ASMNL__ \
bt 24+disLkThreadb,3f __ASMNL__ \
mfsprg r10,1 __ASMNL__ \
lwz r5,MUTEX_DATA(r3) __ASMNL__ \
rlwinm. r9,r5,0,0,29 __ASMNL__ \
bne++ 1f __ASMNL__ \
lis r3,hi16(not_held) __ASMNL__ \
ori r3,r3,lo16(not_held) __ASMNL__ \
b 2f __ASMNL__ \
1: __ASMNL__ \
cmpw r9,r10 __ASMNL__ \
beq++ 3f __ASMNL__ \
mr r5,r10 __ASMNL__ \
mr r6,r9 __ASMNL__ \
lis r3,hi16(wrong_thread) __ASMNL__ \
ori r3,r3,lo16(wrong_thread) __ASMNL__ \
2: __ASMNL__ \
mr r4,r11 __ASMNL__ \
PROLOG(0) __ASMNL__ \
bl EXT(panic) __ASMNL__ \
BREAKPOINT_TRAP __ASMNL__ \
3:
.data
not_held:
STRINGD "mutex (0x%08X) not held\n\000"
wrong_thread:
STRINGD "mutex (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n\000"
.text
#define CHECK_MYLOCK() \
bf MUTEX_ATTR_DEBUGb,1f __ASMNL__ \
bt 24+disLkMyLckb,1f __ASMNL__ \
mfsprg r10,1 __ASMNL__ \
lwz r9,MUTEX_DATA(r3) __ASMNL__ \
rlwinm r9,r9,0,0,29 __ASMNL__ \
cmpw r9,r10 __ASMNL__ \
bne++ 1f __ASMNL__ \
mr r4,r11 __ASMNL__ \
lis r3, hi16(mylock_attempt) __ASMNL__ \
ori r3,r3,lo16(mylock_attempt) __ASMNL__ \
bl EXT(panic) __ASMNL__ \
BREAKPOINT_TRAP __ASMNL__ \
1:
.data
mylock_attempt:
STRINGD "mutex (0x%08X) recursive lock attempt\n\000"
.text
#define LCK_STACK(lck, stack, lck_stack, frame_cnt, lr_save, tmp) \
bf 24+enaLkExtStckb,3f __ASMNL__ \
addi lck_stack,lck,MUTEX_STACK __ASMNL__ \
li frame_cnt,MUTEX_FRAMES-1 __ASMNL__ \
1: __ASMNL__ \
mr tmp,stack __ASMNL__ \
lwz stack,0(stack) __ASMNL__ \
xor tmp,stack,tmp __ASMNL__ \
cmplwi tmp,8192 __ASMNL__ \
bge-- 2f __ASMNL__ \
lwz lr_save,FM_LR_SAVE(stack) __ASMNL__ \
stwu lr_save,4(lck_stack) __ASMNL__ \
subi frame_cnt,frame_cnt,1 __ASMNL__ \
cmpi cr0,frame_cnt,0 __ASMNL__ \
bne 1b __ASMNL__ \
b 3f __ASMNL__ \
2: __ASMNL__ \
li tmp,0 __ASMNL__ \
stwu tmp,4(lck_stack) __ASMNL__ \
subi frame_cnt,frame_cnt,1 __ASMNL__ \
cmpi cr0,frame_cnt,0 __ASMNL__ \
bne 2b __ASMNL__ \
3:
/*
* void mutex_init(mutex_t* l, etap_event_t etap)
*
*/
.align 5
.globl EXT(mutex_init)
LEXT(mutex_init)
PROLOG(0)
li r10,0
stw r10,MUTEX_DATA(r3) sth r10,MUTEX_PROMOTED_PRI(r3)
#if MACH_LDEBUG
li r11,MUTEX_ATTR_DEBUG
stw r10,MUTEX_STACK(r3) li r9, MUTEX_TAG
stw r9, MUTEX_TYPE(r3) addi r8,r3,MUTEX_STACK-4
li r9,MUTEX_FRAMES
mlistck:
stwu r10,4(r8) cmpi cr0,r9,0
bne mlistck
#endif /* MACH_LDEBUG */
EPILOG
blr
/*
* void lck_mtx_lock_ext(lck_mtx_ext_t*)
*
*/
.align 5
.globl EXT(lck_mtx_lock_ext)
LEXT(lck_mtx_lock_ext)
#if MACH_LDEBUG
.globl EXT(mutex_lock)
LEXT(mutex_lock)
.globl EXT(_mutex_lock)
LEXT(_mutex_lock)
#endif
mr r11,r3 lwz r0,MUTEX_ATTR(r3)
mtcrf 1,r0 CHECK_MUTEX_TYPE()
bf MUTEX_ATTR_DEBUGb,L_mutex_lock_assert_wait_2
PROLOG(0)
bl EXT(assert_wait_possible)
mr. r3,r3
bne L_mutex_lock_assert_wait_1
lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
bl EXT(panic)
BREAKPOINT_TRAP .data
L_mutex_lock_assert_wait_panic_str:
STRINGD "mutex lock attempt with assert_wait_possible false\n\000"
.text
L_mutex_lock_assert_wait_1:
lwz r3,FM_ARG0(r1)
lwz r11,FM_ARG0+0x04(r1)
lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
mtcr r2
EPILOG
L_mutex_lock_assert_wait_2:
mfsprg r6,1 lwz r5,MUTEX_GRP(r3) mlckestatloop:
lwarx r8,r7,r5 stwcx. r8,r7,r5 mr. r8,r8 lwz r8,GRP_MTX_STAT_UTIL(r5) stw r8,GRP_MTX_STAT_UTIL(r5) lwz r5,MUTEX_DATA(r3) li r8,0
lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r7 mr. r5,r5
mlcketry:
lwarx r5,MUTEX_DATA,r3 bne-- mlckespin0 bne-- mlcketry LEXT(mlckePatch_isync)
isync bf MUTEX_ATTR_DEBUGb,mlckedebskip
mr r8,r6 stw r8,MUTEX_THREAD(r3) LCK_STACK(r3,r5,r6,r7,r8,r10)
mlckedebskip:
mtmsr r9
mlckespin0:
li r5,lgKillResv mlckespin01:
mflr r12
mtmsr r9 mtmsr r7 b mlcketry
/*
* void lck_mtx_lock(lck_mtx_t*)
*
*/
.align 5
.globl EXT(lck_mtx_lock)
LEXT(lck_mtx_lock)
#if !MACH_LDEBUG
.globl EXT(mutex_lock)
LEXT(mutex_lock)
.globl EXT(_mutex_lock)
LEXT(_mutex_lock)
#endif
mfsprg r6,1 mr r11,r3 li r8,0
li r9,0
mr. r5,r5
mlcktry:
lwarx r5,MUTEX_DATA,r3 bne-- mlckspin01 bne-- mlcktry LEXT(mlckPatch_isync)
isync
mlckspin00:
cmpli cr0,r5,MUTEX_IND lwz r3,MUTEX_PTR(r3) mlckspin01:
li r5,lgKillResv mlckspin02:
mflr r12
li r0,0
mtcrf 1,r0 mtlr r12
b mlcktry
mlckspin1:
mr. r4,r4 lis r4,hi16(EXT(MutexSpin)) lwz r4,0(r4) bne++ mlckspin2 b mlckslow1 mlckspin2: mr. r8,r8 crclr mlckmiss bne++ mlckspin3 mfmsr r9 ori r7,r0,lo16(MASK(MSR_EE)) andc r7,r9,r7 isync b mlcksniff
mlckspin3: mtmsr r7
mlcksniff: lwz r5,MUTEX_DATA(r3) beq++ mlckretry beq++ mlckslow0 andi. r5,r5,ILK_LOCKED bt mlckmiss,mlStatSkip lwz r5,MUTEX_GRP(r3) mlStatLoop:
lwarx r6,0,r5 stwcx. r6,0,r5 mfsprg r6,1 lwz r2,ACT_MACT_SPF(r10) beq mlckslow0 ori r2,r2,lo16(TH_OPT_DELAYIDLE) and. r10,r10,r2
mftb r10 cmplwi r10,128
mtmsr r9 subi r4,r4,128 mr. r4,r4
ble-- mlckslow1 mlckretry:
mtmsr r9 blr
mlckslow0:
mlckslow1:
mtlr r12
PROLOG(0)
.L_ml_retry:
bl lockDisa lwz r3,FM_ARG0(r1) mr r4,r11 ori r3,r3,lo16(mutex_failed1) BREAKPOINT_TRAP .data
mutex_failed1:
STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock\n\000"
.text
mlGotInt:
lwz r4,MUTEX_DATA(r3) bne- mlInUse bf++ MUTEX_ATTR_DEBUGb,mlDebSkip
CHECK_SETUP(r5)
mfsprg r9,1 lwz r6,FM_LR_SAVE(r5) stw r6,MUTEX_STACK(r3) LCK_STACK(r3,r5,r6,r7,r8,r10)
mlDebSkip:
mr r3,r11 lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
mfsprg r5,1
mtcr r2
mr. r4,r3
lwz r3,FM_ARG0(r1) beq mlUnlock
ori r5,r5,WAIT_FLAG
mlUnlock: eieio
stw r5,MUTEX_DATA(r3) EPILOG
mlInUse:
CHECK_SETUP(r12)
CHECK_MYLOCK()
bf MUTEX_ATTR_STATb,mlStatSkip2 bt mlckmiss,mlStatSkip1 li r9,GRP_MTX_STAT_MISS+4 lwarx r8,r9,r5 stwcx. r8,r9,r5 mlStatSkip1:
lwz r9,GRP_MTX_STAT_WAIT+4(r5) stw r9,GRP_MTX_STAT_WAIT+4(r5) ori r4,r4,WAIT_FLAG rlwinm r4,r4,0,0,29 stw r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
mr r3,r11
lwz r3,FM_ARG0(r1) lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
mtcr r2
b .L_ml_retry
/*
* void lck_mtx_try_lock(_extlck_mtx_ext_t*)
*
*/
.align 5
.globl EXT(lck_mtx_try_lock_ext)
LEXT(lck_mtx_try_lock_ext)
#if MACH_LDEBUG
.globl EXT(mutex_try)
LEXT(mutex_try)
.globl EXT(_mutex_try)
LEXT(_mutex_try)
#endif
mr r11,r3 lwz r0,MUTEX_ATTR(r3)
mtcrf 1,r0 CHECK_MUTEX_TYPE()
bf MUTEX_ATTR_STATb,mlteStatSkip li r7,GRP_MTX_STAT_UTIL+4 lwarx r8,r7,r5 stwcx. r8,r7,r5 mr. r8,r8 lwz r8,GRP_MTX_STAT_UTIL(r5) stw r8,GRP_MTX_STAT_UTIL(r5) mfsprg r6,1 mr. r5,r5 mfmsr r9 ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r7
mlteLoopTry:
lwarx r5,MUTEX_DATA,r3 bne-- mlteSlowX bne-- mlteLoopTry LEXT(mltelckPatch_isync)
isync bf MUTEX_ATTR_DEBUGb,mlteDebSkip
mr r8,r6 stw r8,MUTEX_THREAD(r3) LCK_STACK(r3,r5,r6,r7,r8,r10)
mlteDebSkip:
li r3, 1
mtmsr r9 mlteSlowX:
li r5,lgKillResv mtmsr r9
/*
* void lck_mtx_try_lock(lck_mtx_t*)
*
*/
.align 5
.globl EXT(lck_mtx_try_lock)
LEXT(lck_mtx_try_lock)
#if !MACH_LDEBUG
.globl EXT(mutex_try)
LEXT(mutex_try)
.globl EXT(_mutex_try)
LEXT(_mutex_try)
#endif
mfsprg r6,1 mr r11,r3 bne-- mltSlow00 mltLoopTry:
lwarx r5,MUTEX_DATA,r3 bne-- mltSlow01 bne-- mltLoopTry LEXT(mltlckPatch_isync)
isync blr
mltSlow00:
cmpli cr0,r5,MUTEX_IND lwz r3,MUTEX_PTR(r3) mltSlow01:
li r5,lgKillResv
mltSlow02:
li r0,0
mtcrf 1,r0 L_mutex_try_slow:
PROLOG(0)
lwz r6,MUTEX_DATA(r3) bne- mtFail bl lockDisa lwz r3,FM_ARG0(r1) mr r4,r11 ori r3,r3,lo16(mutex_failed2) BREAKPOINT_TRAP .data
mutex_failed2:
STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock try\n\000"
.text
mtGotInt:
lwz r4,MUTEX_DATA(r3) bne- mtInUse bf++ MUTEX_ATTR_DEBUGb,mtDebSkip
CHECK_SETUP(r5)
mfsprg r9,1 lwz r6,FM_LR_SAVE(r5) stw r6,MUTEX_STACK(r3) LCK_STACK(r3,r5,r6,r7,r8,r10)
mtDebSkip:
mr r3,r11 mfsprg r5,1
mr. r4,r3
lwz r3,FM_ARG0(r1) beq mtUnlock
ori r5,r5,WAIT_FLAG
mtUnlock: eieio
stw r5,MUTEX_DATA(r3) bl epStart li r3, 1
EPILOG
mtInUse:
bf++ MUTEX_ATTR_STATb,mtStatSkip li r9,GRP_MTX_STAT_MISS+4 lwarx r8,r9,r5 stwcx. r8,r9,r5 mtStatSkip:
rlwinm r4,r4,0,0,30 bl epStart mtFail: li r3,0 blr
/*
* void mutex_unlock(mutex_t* l)
*
*/
.align 5
.globl EXT(mutex_unlock)
LEXT(mutex_unlock)
sync
mr r11,r3 b mlueEnter1
#else
b mluEnter1
#endif
/*
* void lck_mtx_ext_unlock(lck_mtx_ext_t* l)
*
*/
.align 5
.globl EXT(lck_mtx_ext_unlock)
LEXT(lck_mtx_ext_unlock)
#if MACH_LDEBUG
.globl EXT(mutex_unlock_rwcmb)
LEXT(mutex_unlock_rwcmb)
#endif
mlueEnter:
.globl EXT(mulckePatch_isync)
LEXT(mulckePatch_isync)
isync
.globl EXT(mulckePatch_eieio)
LEXT(mulckePatch_eieio)
eieio
mr r11,r3 lwz r0,MUTEX_ATTR(r3)
mtcrf 1,r0 CHECK_MUTEX_TYPE()
CHECK_THREAD(MUTEX_THREAD)
lwz r5,MUTEX_DATA(r3) bne-- L_mutex_unlock_slow lis r0,hi16(MASK(MSR_VEC)) ori r7,r0,lo16(MASK(MSR_EE)) andc r7,r9,r7 isync mlueLoop:
lwarx r5,MUTEX_DATA,r3
rlwinm. r4,r5,0,30,31 bne-- mlueSlowX
stwcx. r5,MUTEX_DATA,r3
bne-- mlueLoop
mtmsr r9
mlueSlowX:
li r5,lgKillResv mtmsr r9
/*
* void lck_mtx_unlock(lck_mtx_t* l)
*
*/
.align 5
.globl EXT(lck_mtx_unlock)
LEXT(lck_mtx_unlock)
#if !MACH_LDEBUG
.globl EXT(mutex_unlock_rwcmb)
LEXT(mutex_unlock_rwcmb)
#endif
mluEnter:
.globl EXT(mulckPatch_isync)
LEXT(mulckPatch_isync)
isync
.globl EXT(mulckPatch_eieio)
LEXT(mulckPatch_eieio)
eieio
mr r11,r3 lwz r5,MUTEX_DATA(r3) bne-- mluSlow0 mluLoop:
lwarx r5,MUTEX_DATA,r3
rlwinm. r4,r5,0,30,31 bne-- mluSlowX
stwcx. r5,MUTEX_DATA,r3
bne-- mluLoop
blr
mluSlow0:
cmpli cr0,r5,MUTEX_IND lwz r3,MUTEX_PTR(r3) mluSlowX:
li r5,lgKillResv
L_mutex_unlock_slow:
PROLOG(0)
bl lockDisa lwz r3,FM_ARG0(r1) mr r4,r11 ori r3,r3,lo16(mutex_failed3) BREAKPOINT_TRAP .data
mutex_failed3:
STRINGD "attempt to interlock mutex (0x%08X) failed on mutex unlock\n\000"
.text
muGotInt:
lwz r4,MUTEX_DATA(r3)
andi. r5,r4,WAIT_FLAG beq+ muUnlock mr r3,r11 lwz r3,FM_ARG0(r1) lwz r5,MUTEX_DATA(r3) muUnlock:
andi. r5,r5,WAIT_FLAG stw r5,MUTEX_DATA(r3) EPILOG
/*
* void lck_mtx_assert(lck_mtx_t* l, unsigned int)
*
*/
.align 5
.globl EXT(lck_mtx_assert)
LEXT(lck_mtx_assert)
.globl EXT(_mutex_assert)
LEXT(_mutex_assert)
mr r11,r3
maEnter:
lwz r5,MUTEX_DATA(r3)
cmpli cr0,r5,MUTEX_IND lwz r3,MUTEX_PTR(r3) maCheck:
mfsprg r6,1 cmpwi r4,MUTEX_ASSERT_OWNED
cmplw cr1,r6,r5 bne-- maNext
mr r4,r11
lis r3,hi16(mutex_assert1) b maPanic cmpwi r4,MUTEX_ASSERT_NOTOWNED bnelr++
maPanic:
PROLOG(0)
mr r4,r11
lis r3,hi16(mutex_assert2) bl EXT(panic)
.data
mutex_assert1:
STRINGD "mutex (0x%08X) not owned\n\000"
mutex_assert2:
STRINGD "mutex (0x%08X) owned\n\000"
.text
/*
* void lck_mtx_ilk_unlock(lck_mtx *lock)
*/
.globl EXT(lck_mtx_ilk_unlock)
LEXT(lck_mtx_ilk_unlock)
lwz r10,MUTEX_DATA(r3)
rlwinm r10,r10,0,0,30
eieio
stw r10,MUTEX_DATA(r3)
b epStart /*
* void _enable_preemption_no_check(void)
*
* This version does not check if we get preempted or not
*/
.align 4
.globl EXT(_enable_preemption_no_check)
LEXT(_enable_preemption_no_check)
cmplw cr1,r1,r1
/*
* void _enable_preemption(void)
*
* This version checks if we get preempted or not
*/
.align 5
.globl EXT(_enable_preemption)
LEXT(_enable_preemption)
epStart:
cmplwi cr1,r1,0 epCommn:
mfsprg r3,1 lwz r5,ACT_PREEMPT_CNT(r3) blt- epTooFar crandc cr0_eq,cr0_eq,cr1_eq
beq+ epCheckPreempt epTooFar:
mr r4,r5
lis r3,hi16(epTooFarStr) PROLOG(0)
bl EXT(panic)
BREAKPOINT_TRAP .data
epTooFarStr:
STRINGD "enable_preemption: preemption_level %d\n\000"
.text
.align 5
epCheckPreempt:
lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) beq+ epCPno andc r9,r9,r0 mtmsr r7 lwz r3,ACT_PER_PROC(r3) li r5,AST_URGENT and. r7,r7,r5 mtmsr r9 beqlr+ blr /*
* void disable_preemption(void)
*
* Here is where we disable preemption.
*/
.align 5
.globl EXT(_disable_preemption)
LEXT(_disable_preemption)
mfsprg r6,1 addi r5,r5,1 blr /*
* int get_preemption_level(void)
*
* Return the current preemption level
*/
.align 5
.globl EXT(get_preemption_level)
LEXT(get_preemption_level)
mfsprg r6,1 blr /*
* void ppc_usimple_lock_init(simple_lock_t, etap_event_t)
*
* Initialize a simple lock.
*/
.align 5
.globl EXT(ppc_usimple_lock_init)
LEXT(ppc_usimple_lock_init)
li r0, 0 blr
/*
* void lck_spin_lock(lck_spin_t *)
* void ppc_usimple_lock(simple_lock_t *)
*
*/
.align 5
.globl EXT(lck_spin_lock)
LEXT(lck_spin_lock)
.globl EXT(ppc_usimple_lock)
LEXT(ppc_usimple_lock)
mfsprg r6,1 addi r5,r5,1 mr r5,r3 li r4,0 slcktry: lwarx r11,SLOCK_ILK,r5 ori r11,r6,ILK_LOCKED stwcx. r11,SLOCK_ILK,r5 .globl EXT(slckPatch_isync)
LEXT(slckPatch_isync)
isync
slckspin: li r11,lgKillResv
mr. r4,r4 lis r4,hi16(EXT(LockTimeOut)) lwz r4,0(r4) slockspin0: mr. r8,r8 lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r7 mftb r8
slockspin1: mtmsr r7
slcksniff: lwz r3,SLOCK_ILK(r5) beq++ slckretry mftb r10 cmplwi r10,128
mtmsr r9 subi r4,r4,128 mr. r4,r4 or r4,r4,r4 ble-- slckfail slckretry:
mtmsr r9 b slcktry
slckfail: ori r3,r3,lo16(slckpanic_str)
mr r4,r5
mflr r5
PROLOG(0)
bl EXT(panic)
BREAKPOINT_TRAP .data
slckpanic_str:
STRINGD "simple lock (0x%08X) deadlock detection, pc=0x%08X\n\000"
.text
/*
* boolean_t lck_spin_try_lock(lck_spin_t *)
* unsigned int ppc_usimple_lock_try(simple_lock_t *)
*
*/
.align 5
.globl EXT(lck_spin_try_lock)
LEXT(lck_spin_try_lock)
.globl EXT(ppc_usimple_lock_try)
LEXT(ppc_usimple_lock_try)
lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r7
lwz r11,SLOCK_ILK(r3) bne-- slcktryfail slcktryloop:
lwarx r11,SLOCK_ILK,r3 andi. r5,r11,ILK_LOCKED bne-- slcktryfailX stwcx. r5,SLOCK_ILK,r3
.globl EXT(stlckPatch_isync)
LEXT(stlckPatch_isync)
isync
lwz r5,ACT_PREEMPT_CNT(r6) stw r5,ACT_PREEMPT_CNT(r6) mtmsr r9 blr
slcktryfailX:
li r5,lgKillResv
slcktryfail:
mtmsr r9 blr
/*
* void lck_spin_unlock(lck_spin_t *)
* void ppc_usimple_unlock_rwcmb(simple_lock_t *)
*
*/
.align 5
.globl EXT(lck_spin_unlock)
LEXT(lck_spin_unlock)
.globl EXT(ppc_usimple_unlock_rwcmb)
LEXT(ppc_usimple_unlock_rwcmb)
li r0,0
.globl EXT(sulckPatch_isync)
LEXT(sulckPatch_isync)
isync
.globl EXT(sulckPatch_eieio)
LEXT(sulckPatch_eieio)
eieio
stw r0, SLOCK_ILK(r3)
b epStart /*
* void ppc_usimple_unlock_rwmb(simple_lock_t *)
*
*/
.align 5
.globl EXT(ppc_usimple_unlock_rwmb)
LEXT(ppc_usimple_unlock_rwmb)
li r0,0
sync
stw r0, SLOCK_ILK(r3)
b epStart /*
* void enter_funnel_section(funnel_t *)
*
*/
.align 5
.globl EXT(enter_funnel_section)
LEXT(enter_funnel_section)
#if !MACH_LDEBUG
lis r10,hi16(EXT(kdebug_enable))
ori r10,r10,lo16(EXT(kdebug_enable))
lwz r10,0(r10)
lis r11,hi16(EXT(split_funnel_off))
ori r11,r11,lo16(EXT(split_funnel_off))
lwz r11,0(r11)
or. r10,r11,r10 mfsprg r6,1
lwz r5,0(r7) bne-- L_enter_funnel_section_slow L_enter_funnel_section_loop:
lwarx r5,0,r7 bne-- L_enter_funnel_section_slowX bne-- L_enter_funnel_section_loop LEXT(entfsectPatch_isync)
isync stw r3,THREAD_FUNNEL_LOCK(r6) blr
L_enter_funnel_section_slowX:
li r4,lgKillResv
L_enter_funnel_section_slow:
#endif
li r4,TRUE
b EXT(thread_funnel_set)
/*
* void exit_funnel_section(void)
*
*/
.align 5
.globl EXT(exit_funnel_section)
LEXT(exit_funnel_section)
mfsprg r6,1 mr. r3,r3 #if !MACH_LDEBUG
lis r10,hi16(EXT(kdebug_enable))
ori r10,r10,lo16(EXT(kdebug_enable))
lwz r10,0(r10)
mr. r10,r10
bne- L_exit_funnel_section_slow .globl EXT(retfsectPatch_isync)
LEXT(retfsectPatch_isync)
isync
.globl EXT(retfsectPatch_eieio)
LEXT(retfsectPatch_eieio)
eieio
lwz r5,0(r7) bne-- L_exit_funnel_section_slow L_exit_funnel_section_loop:
lwarx r5,0,r7
rlwinm. r4,r5,0,30,31 bne-- L_exit_funnel_section_slowX
stwcx. r5,0,r7 li r7,0
stw r7,THREAD_FUNNEL_STATE(r6) blr L_exit_funnel_section_slowX:
li r4,lgKillResv
L_exit_funnel_section_slow:
#endif
li r4,FALSE
b EXT(thread_funnel_set)
L_exit_funnel_section_ret:
blr
/*
* void lck_rw_lock_exclusive(lck_rw_t*)
*
*/
.align 5
.globl EXT(lck_rw_lock_exclusive)
LEXT(lck_rw_lock_exclusive)
#if !MACH_LDEBUG
.globl EXT(lock_write)
LEXT(lock_write)
#endif
rwleloop: lwarx r5,RW_DATA,r3 ori r6,r5,WANT_EXCL stwcx. r6,RW_DATA,r3 .globl EXT(rwlePatch_isync)
LEXT(rwlePatch_isync)
isync
blr
rwlespin:
li r4,lgKillResv cmpli cr0,r5,RW_IND mr r4,r3 b EXT(lck_rw_lock_exclusive_ext)
rwlespin1:
b EXT(lck_rw_lock_exclusive_gen)
/*
* void lck_rw_lock_shared(lck_rw_t*)
*
*/
.align 5
.globl EXT(lck_rw_lock_shared)
LEXT(lck_rw_lock_shared)
#if !MACH_LDEBUG
.globl EXT(lock_read)
LEXT(lock_read)
#endif
rwlsloop: lwarx r5,RW_DATA,r3 addis r6,r5,1 stwcx. r6,RW_DATA,r3 .globl EXT(rwlsPatch_isync)
LEXT(rwlsPatch_isync)
isync
blr
rwlsspin:
li r4,lgKillResv cmpli cr0,r5,RW_IND mr r4,r3 b EXT(lck_rw_lock_shared_ext)
rwlsspin1:
b EXT(lck_rw_lock_shared_gen)
/*
* boolean_t lck_rw_lock_shared_to_exclusive(lck_rw_t*)
*
*/
.align 5
.globl EXT(lck_rw_lock_shared_to_exclusive)
LEXT(lck_rw_lock_shared_to_exclusive)
#if !MACH_LDEBUG
.globl EXT(lock_read_to_write)
LEXT(lock_read_to_write)
#endif
rwlseloop: lwarx r5,RW_DATA,r3 lis r8,0xFFFF and. r7,r6,r8 bne-- rwlsespin bne-- rwlseloop
.globl EXT(rwlsePatch_isync)
LEXT(rwlsePatch_isync)
isync
li r3,0 rwlsespin:
li r4,lgKillResv cmpli cr0,r5,RW_IND mr r4,r3 b EXT(lck_rw_lock_shared_to_exclusive_ext)
rwlsespin1:
b EXT(lck_rw_lock_shared_to_exclusive_gen)
/*
* void lck_rw_lock_exclusive_to_shared(lck_rw_t*)
*
*/
.align 5
.globl EXT(lck_rw_lock_exclusive_to_shared)
LEXT(lck_rw_lock_exclusive_to_shared)
#if !MACH_LDEBUG
.globl EXT(lock_write_to_read)
LEXT(lock_write_to_read)
#endif
.globl EXT(rwlesPatch_isync)
LEXT(rwlesPatch_isync)
isync
.globl EXT(rwlesPatch_eieio)
LEXT(rwlesPatch_eieio)
eieio
rwlesloop: lwarx r5,RW_DATA,r3 bne-- rwlesspin andi. r10,r5,WANT_UPGRADE bne rwlesexcl1 rwlesexcl1:
andc r7,r5,r9 stwcx. r6,RW_DATA,r3 andi. r7,r5,WAIT_FLAG addi r3,r3,RW_EVENT rwlesspin:
li r4,lgKillResv cmpli cr0,r5,RW_IND mr r4,r3 b EXT(lck_rw_lock_exclusive_to_shared_ext)
rwlesspin1:
b EXT(lck_rw_lock_exclusive_to_shared_gen)
/*
* boolean_t lck_rw_try_lock_exclusive(lck_rw_t*)
*
*/
.align 5
.globl EXT(lck_rw_try_lock_exclusive)
LEXT(lck_rw_try_lock_exclusive)
lis r10,0xFFFF rwtleloop: lwarx r5,RW_DATA,r3 bne-- rwtlespin ori r6,r5,WANT_EXCL stwcx. r6,RW_DATA,r3 .globl EXT(rwtlePatch_isync)
LEXT(rwtlePatch_isync)
isync
li r3,1 rwtlefail:
li r4,lgKillResv li r3,0 rwtlespin:
li r4,lgKillResv cmpli cr0,r5,RW_IND mr r4,r3 b EXT(lck_rw_try_lock_exclusive_ext)
rwtlespin1:
b EXT(lck_rw_try_lock_exclusive_gen)
/*
* boolean_t lck_rw_try_lock_shared(lck_rw_t*)
*
*/
.align 5
.globl EXT(lck_rw_try_lock_shared)
LEXT(lck_rw_try_lock_shared)
rwtlsloop: lwarx r5,RW_DATA,r3 bne-- rwtlsspin addis r6,r5,1 stwcx. r6,RW_DATA,r3 .globl EXT(rwtlsPatch_isync)
LEXT(rwtlsPatch_isync)
isync
li r3,1 rwtlsfail:
li r3,0 rwtlsspin:
li r4,lgKillResv cmpli cr0,r5,RW_IND mr r4,r3 b EXT(lck_rw_try_lock_shared_ext)
rwtlsspin1:
b EXT(lck_rw_try_lock_shared_gen)
/*
* lck_rw_type_t lck_rw_done(lck_rw_t*)
*
*/
.align 5
.globl EXT(lck_rw_done)
LEXT(lck_rw_done)
#if !MACH_LDEBUG
.globl EXT(lock_done)
LEXT(lock_done)
#endif
.globl EXT(rwldPatch_isync)
LEXT(rwldPatch_isync)
isync
.globl EXT(rwldPatch_eieio)
LEXT(rwldPatch_eieio)
eieio
li r10,WAIT_FLAG mr r12,r3 andi. r8,r5,ILK_LOCKED and. r8,r5,r7 beq cr1,rwldexcl addis r6,r5,0xFFFF li r8,0 and r8,r6,r10 rwldshared1:
b rwldstore
rwldexcl:
li r11,RW_EXCL and. r6,r5,r9 bne rwldexcl1 rwldexcl1:
andc r6,r5,r9 rwldstore:
stwcx. r6,RW_DATA,r3 mr. r8,r8 beqlr++
mr r3,r12 addi r3,r3,RW_EVENT lwz r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
mtcr r2
EPILOG
li r3,RW_SHARED li r3,RW_EXCL blr
rwldspin:
li r4,lgKillResv cmpli cr0,r5,RW_IND mr r4,r3 b EXT(lck_rw_done_ext)
rwldspin1:
b EXT(lck_rw_done_gen)
/*
* void lck_rw_ilk_lock(lck_rw_t *lock)
*/
.globl EXT(lck_rw_ilk_lock)
LEXT(lck_rw_ilk_lock)
crclr hwtimeout li r12,ILK_LOCKED
/*
* void lck_rw_ilk_unlock(lck_rw_t *lock)
*/
.globl EXT(lck_rw_ilk_unlock)
LEXT(lck_rw_ilk_unlock)
li r4,1
b EXT(hw_unlock_bit)