/*
* Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
* The contents of this file constitute Original Code as defined in and
* are subject to the Apple Public Source License Version 1.1 (the
* "License"). You may not use this file except in compliance with the
* License. Please obtain a copy of the License at
* http://www.apple.com/publicsource and read it before using this file.
*
* This Original Code and all software distributed under the License are
* distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
* License for the specific language governing rights and limitations
* under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <cpus.h>
#include <mach_assert.h>
#include <mach_ldebug.h>
#include <mach_rt.h>
#include <ppc/asm.h>
#include <ppc/proc_reg.h>
#include <assym.s>
#define STRING ascii
#define SWT_HI 0+FM_SIZE
#define SWT_LO 4+FM_SIZE
#define MISSED 8+FM_SIZE
#define ILK_LOCKED 0x01
#define WAIT_FLAG 0x02
#define TH_FN_OWNED 0x01
#define CHECKNMI 0
#define CHECKLOCKS 1
#define PROLOG(space) \
stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \
mflr r0 __ASMNL__ \
stw r3,FM_ARG0(r1) __ASMNL__ \
stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__
#define EPILOG \
lwz r1,0(r1) __ASMNL__ \
lwz r0,FM_LR_SAVE(r1) __ASMNL__ \
mtlr r0 __ASMNL__
#if MACH_LDEBUG && CHECKLOCKS
/*
* Routines for general lock debugging.
*/
/*
* Gets lock check flags in CR6: CR bits 24-27
*/
#define CHECK_SETUP(rg) \
lbz rg,dgFlags(0) __ASMNL__ \
mtcrf 2,rg __ASMNL__
/*
* Checks for expected lock types and calls "panic" on
* mismatch. Detects calls to Mutex functions with
* type simplelock and vice versa.
*/
#define CHECK_MUTEX_TYPE() \
bt 24+disLktypeb,1f __ASMNL__ \
lwz r10,MUTEX_TYPE(r3) __ASMNL__ \
cmpwi r10,MUTEX_TAG __ASMNL__ \
beq+ 1f __ASMNL__ \
lis r3,hi16(not_a_mutex) __ASMNL__ \
ori r3,r3,lo16(not_a_mutex) __ASMNL__ \
bl EXT(panic) __ASMNL__ \
lwz r3,FM_ARG0(r1) __ASMNL__ \
1:
.data
not_a_mutex:
STRINGD "not a mutex!\n\000"
.text
#define CHECK_SIMPLE_LOCK_TYPE() \
bt 24+disLktypeb,1f __ASMNL__ \
lhz r10,SLOCK_TYPE(r3) __ASMNL__ \
cmpwi r10,USLOCK_TAG __ASMNL__ \
beq+ 1f __ASMNL__ \
lis r3,hi16(not_a_slock) __ASMNL__ \
ori r3,r3,lo16(not_a_slock) __ASMNL__ \
bl EXT(panic) __ASMNL__ \
lwz r3,FM_ARG0(r1) __ASMNL__ \
1:
.data
not_a_slock:
STRINGD "not a simple lock!\n\000"
.text
#define CHECK_NO_SIMPLELOCKS() \
bt 24+disLkNmSimpb,2f __ASMNL__ \
lis r10,hi16(MASK(MSR_VEC)) __ASMNL__ \
ori r10,r10,lo16(MASK(MSR_FP)) __ASMNL__ \
mfmsr r11 __ASMNL__ \
andc r11,r11,r10 __ASMNL__ \
ori r10,r10,lo16(MASK(MSR_EE)) __ASMNL__ \
andc r10,r11,r10 __ASMNL__ \
mtmsr r10 __ASMNL__ \
isync __ASMNL__ \
mfsprg r10,0 __ASMNL__ \
lwz r10,PP_SIMPLE_LOCK_CNT(r10) __ASMNL__ \
cmpwi r10,0 __ASMNL__ \
beq+ 1f __ASMNL__ \
lis r3,hi16(simple_locks_held) __ASMNL__ \
ori r3,r3,lo16(simple_locks_held) __ASMNL__ \
bl EXT(panic) __ASMNL__ \
lwz r3,FM_ARG0(r1) __ASMNL__ \
1: __ASMNL__ \
mtmsr r11 __ASMNL__ \
2:
.data
simple_locks_held:
STRINGD "simple locks held!\n\000"
.text
/*
* Verifies return to the correct thread in "unlock" situations.
*/
#define CHECK_THREAD(thread_offset) \
bt 24+disLkThreadb,2f __ASMNL__ \
lis r10,hi16(MASK(MSR_VEC)) __ASMNL__ \
ori r10,r10,lo16(MASK(MSR_FP)) __ASMNL__ \
mfmsr r11 __ASMNL__ \
andc r11,r11,r10 __ASMNL__ \
ori r10,r10,lo16(MASK(MSR_EE)) __ASMNL__ \
andc r10,r11,r10 __ASMNL__ \
mtmsr r10 __ASMNL__ \
isync __ASMNL__ \
mfsprg r10,1 __ASMNL__ \
lwz r10,ACT_THREAD(r10) __ASMNL__ \
cmpwi r10,0 __ASMNL__ \
beq- 1f __ASMNL__ \
lwz r9,thread_offset(r3) __ASMNL__ \
cmpw r9,r10 __ASMNL__ \
beq+ 1f __ASMNL__ \
lis r3,hi16(wrong_thread) __ASMNL__ \
ori r3,r3,lo16(wrong_thread) __ASMNL__ \
bl EXT(panic) __ASMNL__ \
lwz r3,FM_ARG0(r1) __ASMNL__ \
1: __ASMNL__ \
mtmsr r11 __ASMNL__ \
2:
.data
wrong_thread:
STRINGD "wrong thread!\n\000"
.text
#define CHECK_MYLOCK(thread_offset) \
bt 24+disLkMyLckb,2f __ASMNL__ \
lis r10,hi16(MASK(MSR_VEC)) __ASMNL__ \
ori r10,r10,lo16(MASK(MSR_FP)) __ASMNL__ \
mfmsr r11 __ASMNL__ \
andc r11,r11,r10 __ASMNL__ \
ori r10,r10,lo16(MASK(MSR_EE)) __ASMNL__ \
andc r10,r11,r10 __ASMNL__ \
mtmsr r10 __ASMNL__ \
isync __ASMNL__ \
mfsprg r10,1 __ASMNL__ \
lwz r10,ACT_THREAD(r10) __ASMNL__ \
cmpwi r10,0 __ASMNL__ \
beq- 1f __ASMNL__ \
lwz r9, thread_offset(r3) __ASMNL__ \
cmpw r9,r10 __ASMNL__ \
bne+ 1f __ASMNL__ \
lis r3, hi16(mylock_attempt) __ASMNL__ \
ori r3,r3,lo16(mylock_attempt) __ASMNL__ \
bl EXT(panic) __ASMNL__ \
lwz r3,FM_ARG0(r1) __ASMNL__ \
1: __ASMNL__ \
mtmsr r11 __ASMNL__ \
2:
.data
mylock_attempt:
STRINGD "mylock attempt!\n\000"
.text
#else /* MACH_LDEBUG */
#define CHECK_SETUP(rg)
#define CHECK_MUTEX_TYPE()
#define CHECK_SIMPLE_LOCK_TYPE()
#define CHECK_THREAD(thread_offset)
#define CHECK_NO_SIMPLELOCKS()
#define CHECK_MYLOCK(thread_offset)
#endif /* MACH_LDEBUG */
/*
* void hw_lock_init(hw_lock_t)
*
* Initialize a hardware lock.
*/
.align 5
.globl EXT(hw_lock_init)
LEXT(hw_lock_init)
li r0, 0 blr
/*
* void hw_lock_unlock(hw_lock_t)
*
* Unconditionally release lock.
* Release preemption level.
*/
.align 5
.globl EXT(hw_lock_unlock)
LEXT(hw_lock_unlock)
.globl EXT(hwulckPatch_isync)
LEXT(hwulckPatch_isync)
isync
.globl EXT(hwulckPatch_eieio)
LEXT(hwulckPatch_eieio)
eieio
li r0, 0
b epStart /*
* void hw_lock_lock(hw_lock_t)
*
* Acquire lock, spinning until it becomes available.
* Return with preemption disabled.
* We will just set a default timeout and jump into the NORMAL timeout lock.
*/
.align 5
.globl EXT(hw_lock_lock)
LEXT(hw_lock_lock)
lockDisa:
li r4,0
/*
* unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
*
* Try to acquire spin-lock. Return success (1) or failure (0).
* Attempt will fail after timeout ticks of the timebase.
* We try fairly hard to get this lock. We disable for interruptions, but
* reenable after a "short" timeout (128 ticks, we may want to change this).
* After checking to see if the large timeout value (passed in) has expired and a
* sufficient number of cycles have gone by (to insure pending 'rupts are taken),
* we return either in abject failure, or disable and go back to the lock sniff routine.
* If the sniffer finds the lock free, it jumps right up and tries to grab it.
*/
.align 5
.globl EXT(hw_lock_to)
LEXT(hw_lock_to)
#if CHECKNMI
mflr r12 mtlr r12
lckcomm:
mfsprg r6,1 addi r5,r5,1 mr r5,r3
lcktry: lwarx r6,0,r5 ori r6,r6,ILK_LOCKED stwcx. r6,0,r5 li r3,1 blr lckspin: li r6,lgKillResv
mr. r4,r4 lis r4,hi16(EXT(LockTimeOut)) lwz r4,0(r4) mr. r8,r8 lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r7 mftb r8
lockspin1: mtmsr r7
lcksniff: lwz r3,0(r5) beq++ lckretry mftb r10 cmplwi r10,128
mtmsr r9 subi r4,r4,128 mr. r4,r4 or r4,r4,r4 ble-- lckfail lckretry:
mtmsr r9 b lcktry
lckfail: blr
/*
* unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
*
* Try to acquire spin-lock. The second parameter is the bit mask to test and set.
* multiple bits may be set. Return success (1) or failure (0).
* Attempt will fail after timeout ticks of the timebase.
* We try fairly hard to get this lock. We disable for interruptions, but
* reenable after a "short" timeout (128 ticks, we may want to shorten this).
* After checking to see if the large timeout value (passed in) has expired and a
* sufficient number of cycles have gone by (to insure pending 'rupts are taken),
* we return either in abject failure, or disable and go back to the lock sniff routine.
* If the sniffer finds the lock free, it jumps right up and tries to grab it.
*/
.align 5
.globl EXT(hw_lock_bit)
LEXT(hw_lock_bit)
li r10,0
bittry: lwarx r6,0,r3 or r6,r6,r4 stwcx. r6,0,r3
li r3,1 blr
.align 5
bitspin: li r11,lgKillResv
mr. r10,r10 beq-- bit1sttime bitspin0: mtmsr r7
bitsniff: lwz r6,0(r3) beq++ bitretry mftb r6 cmplwi r6,128
mtmsr r9 subi r5,r5,128 mr. r5,r5
bgt++ bitspin0 li r3,0
bitretry: mtmsr r9
bit1sttime: lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r7 mftb r8
.align 5
/*
* unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
*
* Release bit based spin-lock. The second parameter is the bit mask to clear.
* Multiple bits may be cleared.
*
*/
.align 5
.globl EXT(hw_unlock_bit)
LEXT(hw_unlock_bit)
.globl EXT(hwulckbPatch_isync)
LEXT(hwulckbPatch_isync)
isync
.globl EXT(hwulckbPatch_eieio)
LEXT(hwulckbPatch_eieio)
eieio
ubittry: lwarx r0,0,r3 stwcx. r0,0,r3
blr /*
* unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value,
* unsigned int newb, unsigned int timeout)
*
* Try to acquire spin-lock. The second parameter is the bit mask to check.
* The third is the value of those bits and the 4th is what to set them to.
* Return success (1) or failure (0).
* Attempt will fail after timeout ticks of the timebase.
* We try fairly hard to get this lock. We disable for interruptions, but
* reenable after a "short" timeout (128 ticks, we may want to shorten this).
* After checking to see if the large timeout value (passed in) has expired and a
* sufficient number of cycles have gone by (to insure pending 'rupts are taken),
* we return either in abject failure, or disable and go back to the lock sniff routine.
* If the sniffer finds the lock free, it jumps right up and tries to grab it.
*/
.align 5
.globl EXT(hw_lock_mbits)
LEXT(hw_lock_mbits)
li r10,0
mbittry: lwarx r12,0,r3 andc r12,r12,r4 cmplw r0,r5 stwcx. r12,0,r3 b mbittry .align 5
mbitspin: li r11,lgKillResv
mr. r10,r10 lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r8 mftb r10 mbitspin0:
mtmsr r8 mbitsniff:
lwz r12,0(r3) cmplw r0,r5
mftb r11 cmplwi r11,128
mtmsr r9 subi r7,r7,128 mr. r7,r7
ble-- mbitfail mbitretry:
mtmsr r9 b mbittry
.align 5
mbitgot:
li r3,1 blr
mbitfail: li r3,0
/*
* unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
*
* Spin until word hits 0 or timeout.
* Return success (1) or failure (0).
* Attempt will fail after timeout ticks of the timebase.
*
* The theory is that a processor will bump a counter as it signals
* other processors. Then it will spin untl the counter hits 0 (or
* times out). The other processors, as it receives the signal will
* decrement the counter.
*
* The other processors use interlocked update to decrement, this one
* does not need to interlock.
*/
.align 5
.globl EXT(hw_cpu_sync)
LEXT(hw_cpu_sync)
mftb r10 li r3,1 csynctry: lwz r11,0(r9) beqlr-
sub r12,r12,r10 bge+ csynctry li r3,0
/*
* unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
*
* Spin until word changes or timeout.
* Return success (1) or failure (0).
* Attempt will fail after timeout ticks of the timebase.
*
* This is used to insure that a processor passes a certain point.
* An example of use is to monitor the last interrupt time in the
* per_proc block. This can be used to insure that the other processor
* has seen at least one interrupt since a specific time.
*/
.align 5
.globl EXT(hw_cpu_wcng)
LEXT(hw_cpu_wcng)
mftb r10 li r3,1 wcngtry: lwz r11,0(r9) bnelr-
sub r12,r12,r10 bge+ wcngtry li r3,0
/*
* unsigned int hw_lock_try(hw_lock_t)
*
* Try to acquire spin-lock. Return success (1) or failure (0)
* Returns with preemption disabled on success.
*
*/
.align 5
.globl EXT(hw_lock_try)
LEXT(hw_lock_try)
lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0
mtmsr r7 lwz r5,0(r3) bne-- .L_lock_try_failed .L_lock_try_loop:
lwarx r5,0,r3 andi. r6,r5,ILK_LOCKED bne-- .L_lock_try_failedX stwcx. r5,0,r3
isync
mfsprg r6,1 addi r5,r5,1
mtmsr r9 blr
.L_lock_try_failedX:
li r6,lgKillResv
.L_lock_try_failed:
mtmsr r9 blr
/*
* unsigned int hw_lock_held(hw_lock_t)
*
* Return 1 if lock is held
* Doesn't change preemption state.
* N.B. Racy, of course.
*/
.align 5
.globl EXT(hw_lock_held)
LEXT(hw_lock_held)
isync andi. r6,r3,ILK_LOCKED
/*
* uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
*
* Compare old to area if equal, store new, and return true
* else return false and no store
* This is an atomic operation
*/
.align 5
.globl EXT(hw_compare_and_store)
LEXT(hw_compare_and_store)
mr r6,r3 cstry: lwarx r9,0,r5 cmplw cr0,r9,r6 stwcx. r4,0,r5 isync
csfail: li r3,lgKillResv
li r3,0
/*
* uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
*
* Atomically add the second parameter to the first.
* Returns the result.
*
*/
.align 5
.globl EXT(hw_atomic_add)
LEXT(hw_atomic_add)
mr r6,r3 addtry: lwarx r3,0,r6 stwcx. r3,0,r6 blr
/*
* uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
*
* Atomically subtract the second parameter from the first.
* Returns the result.
*
*/
.align 5
.globl EXT(hw_atomic_sub)
LEXT(hw_atomic_sub)
mr r6,r3 subtry: lwarx r3,0,r6 stwcx. r3,0,r6 blr
/*
* uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
*
* Atomically ORs the second parameter into the first.
* Returns the result.
*/
.align 5
.globl EXT(hw_atomic_or)
LEXT(hw_atomic_or)
mr r6,r3 ortry: lwarx r3,0,r6 stwcx. r3,0,r6 blr
/*
* uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
*
* Atomically ANDs the second parameter with the first.
* Returns the result.
*
*/
.align 5
.globl EXT(hw_atomic_and)
LEXT(hw_atomic_and)
mr r6,r3 andtry: lwarx r3,0,r6 stwcx. r3,0,r6 blr
/*
* void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
*
* Atomically inserts the element at the head of the list
* anchor is the pointer to the first element
* element is the pointer to the element to insert
* disp is the displacement into the element to the chain pointer
*
*/
.align 5
.globl EXT(hw_queue_atomic)
LEXT(hw_queue_atomic)
mr r7,r4 b hw_queue_comm /*
* void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
*
* Atomically inserts the list of elements at the head of the list
* anchor is the pointer to the first element
* first is the pointer to the first element to insert
* last is the pointer to the last element to insert
* disp is the displacement into the element to the chain pointer
*/
.align 5
.globl EXT(hw_queue_atomic_list)
LEXT(hw_queue_atomic_list)
mr r7,r5
hw_queue_comm:
lwarx r9,0,r3 eieio bne-- hw_queue_comm blr /*
* unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
*
* Atomically removes the first element in a list and returns it.
* anchor is the pointer to the first element
* disp is the displacement into the element to the chain pointer
* Returns element if found, 0 if empty.
*/
.align 5
.globl EXT(hw_dequeue_atomic)
LEXT(hw_dequeue_atomic)
mr r5,r3 hw_dequeue_comm:
lwarx r3,0,r5 beq-- hdcFail stwcx. r9,0,r5 b hw_dequeue_comm hdcFail: li r4,lgKillResv blr
/*
* void mutex_init(mutex_t* l, etap_event_t etap)
*
*/
.align 5
.globl EXT(mutex_init)
LEXT(mutex_init)
PROLOG(0)
li r10, 0
stw r10, LOCK_DATA(r3) sth r10, MUTEX_PROMOTED_PRI(r3)
#if MACH_LDEBUG
stw r10, MUTEX_PC(r3) li r10, MUTEX_TAG
stw r10, MUTEX_TYPE(r3) EPILOG
blr
/*
* void mutex_lock(mutex_t*)
*
*/
.align 5
.globl EXT(mutex_lock)
LEXT(mutex_lock)
.globl EXT(_mutex_lock)
LEXT(_mutex_lock)
#if !MACH_LDEBUG
mfsprg r6,1 li r4,0
li r8,0
mr. r5,r5
mlcktry:
lwarx r5,0,r3 bne-- mlckspin0 bne-- mlcktry mflr r8
stw r8,4(r3)
blr
mlckspin0:
li r5,lgKillResv mlckspin1:
mr. r4,r4 lis r4,hi16(EXT(MutexSpin)) lwz r4,0(r4) beq mlckslow1 mlckspin2: mr. r8,r8 lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r7 mftb r8
mlckspin3: mtmsr r7
mlcksniff: lwz r5,0(r3) beq++ mlckretry mr. r5,r5 lwz r10,ACT_MACT_SPF(r5) beq mlckslow0 mftb r10 cmplwi r10,128
mtmsr r9 subi r4,r4,128 mr. r4,r4
ble-- mlckslow1 mlckretry:
mtmsr r9 b mlcktry
mlckslow0:
mlckslow1:
#endif
#if CHECKNMI
mflr r12 mtlr r12
PROLOG(12)
#if MACH_LDEBUG
bl EXT(assert_wait_possible)
mr. r3,r3
bne L_mutex_lock_assert_wait_1
lis r3,hi16(L_mutex_lock_assert_wait_panic_str)
ori r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
PROLOG(0)
bl EXT(panic)
BREAKPOINT_TRAP .data
L_mutex_lock_assert_wait_panic_str:
STRINGD "mutex_lock: assert_wait_possible false\n\000"
.text
L_mutex_lock_assert_wait_1:
lwz r3,FM_ARG0(r1)
#endif
CHECK_SETUP(r12)
CHECK_MUTEX_TYPE()
CHECK_NO_SIMPLELOCKS()
.L_ml_retry:
bl lockDisa lwz r3,FM_ARG0(r1)
lis r3,hi16(mutex_failed1) PROLOG(0)
bl EXT(panic)
.data
mutex_failed1:
STRINGD "We can't get a mutex interlock lock on mutex_lock\n\000"
.text
mlGotInt:
lwz r4,LOCK_DATA(r3) bne- mlInUse #if MACH_LDEBUG
li r5,lo16(MASK(MSR_EE)) andc r5,r11,r5
mtmsr r5
mfsprg r9,1 lwz r5,FM_LR_SAVE(r5) stw r5,MUTEX_PC(r3) stw r8,MUTEX_THREAD(r3) lwz r9,THREAD_MUTEX_COUNT(r8) stw r9,THREAD_MUTEX_COUNT(r8) mtmsr r11
#endif /* MACH_LDEBUG */
bl EXT(mutex_lock_acquire)
mfsprg r5,1
mr. r4,r3
lwz r3,FM_ARG0(r1)
beq mlUnlock
ori r5,r5,WAIT_FLAG
mlUnlock: eieio
stw r5,LOCK_DATA(r3) EPILOG
mlInUse:
CHECK_SETUP(r12)
CHECK_MYLOCK(MUTEX_THREAD)
ori r4,r4,WAIT_FLAG rlwinm r4,r4,0,0,29
lwz r3,FM_ARG0(r1)
/*
* void _mutex_try(mutex_t*)
*
*/
.align 5
.globl EXT(mutex_try)
LEXT(mutex_try)
.globl EXT(_mutex_try)
LEXT(_mutex_try)
#if !MACH_LDEBUG
mfsprg r6,1 mr. r5,r5
L_mutex_try_loop:
lwarx r5,0,r3 bne-- L_mutex_try_slowX bne-- L_mutex_try_loop li r3, 1
blr
L_mutex_try_slowX:
li r5,lgKillResv
L_mutex_try_slow:
#endif
PROLOG(8) CHECK_SETUP(r12)
CHECK_MUTEX_TYPE()
CHECK_NO_SIMPLELOCKS()
lwz r6,LOCK_DATA(r3) bne- mtFail bl lockDisa lwz r3,FM_ARG0(r1)
lis r3,hi16(mutex_failed2) PROLOG(0)
bl EXT(panic)
.data
mutex_failed2:
STRINGD "We can't get a mutex interlock lock on mutex_try\n\000"
.text
mtGotInt:
lwz r4,LOCK_DATA(r3) bne- mtInUse #if MACH_LDEBUG
lis r9,hi16(MASK(MSR_VEC)) ori r9,r9,lo16(MASK(MSR_FP)) andc r11,r11,r9
mtmsr r5
mfsprg r9,1 lwz r5,FM_LR_SAVE(r5) stw r5,MUTEX_PC(r3) stw r8,MUTEX_THREAD(r3) lwz r9, THREAD_MUTEX_COUNT(r8) stw r9, THREAD_MUTEX_COUNT(r8) mtmsr r11
#endif /* MACH_LDEBUG */
bl EXT(mutex_lock_acquire)
mfsprg r5,1
mr. r4,r3
lwz r3,FM_ARG0(r1)
beq mtUnlock
ori r5,r5,WAIT_FLAG
mtUnlock: eieio
stw r5,LOCK_DATA(r3) bl epStart li r3, 1
EPILOG
mtInUse:
rlwinm r4,r4,0,0,30 bl epStart mtFail: li r3,0 blr
/*
* void mutex_unlock_rwcmb(mutex_t* l)
*
*/
.align 5
.globl EXT(mutex_unlock_rwcmb)
LEXT(mutex_unlock_rwcmb)
.globl EXT(mulckPatch_isync)
LEXT(mulckPatch_isync)
isync
.globl EXT(mulckPatch_eieio)
LEXT(mulckPatch_eieio)
eieio
lwz r5,0(r3) bne-- L_mutex_unlock_slow L_mutex_unlock_rwcmb_loop:
lwarx r5,0,r3
rlwinm. r4,r5,0,30,31 bne-- L_mutex_unlock_rwcmb_slowX
stwcx. r5,0,r3
bne-- L_mutex_unlock_rwcmb_loop
blr
L_mutex_unlock_rwcmb_slowX:
li r5,lgKillResv b L_mutex_unlock_slow /*
* void mutex_unlock(mutex_t* l)
*
*/
.align 5
.globl EXT(mutex_unlock)
LEXT(mutex_unlock)
#if !MACH_LDEBUG
sync
lwz r5,0(r3) bne-- L_mutex_unlock_slow L_mutex_unlock_loop:
lwarx r5,0,r3
rlwinm. r4,r5,0,30,31 bne-- L_mutex_unlock_slowX
stwcx. r5,0,r3
bne-- L_mutex_unlock_loop
blr
L_mutex_unlock_slowX:
li r5,lgKillResv
#endif
L_mutex_unlock_slow:
PROLOG(0)
CHECK_SETUP(r12)
CHECK_MUTEX_TYPE()
CHECK_THREAD(MUTEX_THREAD)
bl lockDisa lwz r3,FM_ARG0(r1)
lis r3,hi16(mutex_failed3) PROLOG(0)
bl EXT(panic)
.data
mutex_failed3:
STRINGD "We can't get a mutex interlock lock on mutex_unlock\n\000"
.text
muGotInt:
lwz r4,LOCK_DATA(r3)
andi. r5,r4,WAIT_FLAG beq+ muUnlock bl EXT(mutex_unlock_wakeup) lwz r5,LOCK_DATA(r3) muUnlock:
#if MACH_LDEBUG
lis r8,hi16(MASK(MSR_VEC)) ori r8,r8,lo16(MASK(MSR_FP)) andc r11,r11,r8
mtmsr r9
mfsprg r9,1
lwz r9,ACT_THREAD(r9)
stw r9,MUTEX_THREAD(r3) beq- .L_mu_no_active_thread
lwz r8,THREAD_MUTEX_COUNT(r9)
subi r8,r8,1
stw r8,THREAD_MUTEX_COUNT(r9)
.L_mu_no_active_thread:
mtmsr r11
#endif /* MACH_LDEBUG */
andi. r5,r5,WAIT_FLAG stw r5,LOCK_DATA(r3) EPILOG
/*
* boolean_t mutex_preblock(mutex_t*, thread_t)
*/
.align 5
.globl EXT(mutex_preblock)
LEXT(mutex_preblock)
mr r6,r3
lwz r5,LOCK_DATA(r3)
mr. r3,r5
beqlr+
mr r3,r6
PROLOG(0)
stw r4,(FM_ARG0-4)(r1)
bl EXT(hw_lock_try)
mr. r4,r3
lwz r3,FM_ARG0(r1)
bne+ mpbGotInt
li r3,0
EPILOG
blr
mpbGotInt:
lwz r6,LOCK_DATA(r3)
rlwinm. r5,r6,0,0,30
bne+ mpbInUse
stw r5,LOCK_DATA(r3)
bl epStart
li r3,0
EPILOG
blr
mpbInUse:
lwz r4,(FM_ARG0-4)(r1)
rlwinm r5,r6,0,0,29
bl EXT(mutex_preblock_wait)
lwz r4,FM_ARG0(r1)
mr. r3,r3
lwz r5,LOCK_DATA(r4)
rlwinm r5,r5,0,0,30
beq- mpbUnlock0
ori r5,r5,WAIT_FLAG
eieio
stw r5,LOCK_DATA(r4)
bl epStart
li r3,1
EPILOG
blr
mpbUnlock0:
eieio
stw r5,LOCK_DATA(r4)
bl epStart
li r3,0
EPILOG
blr
/*
* void interlock_unlock(hw_lock_t lock)
*/
.align 5
.globl EXT(interlock_unlock)
LEXT(interlock_unlock)
lwz r10,LOCK_DATA(r3)
rlwinm r10,r10,0,0,30
eieio
stw r10,LOCK_DATA(r3)
b epStart /*
* void _enable_preemption_no_check(void)
*
* This version does not check if we get preempted or not
*/
.align 4
.globl EXT(_enable_preemption_no_check)
LEXT(_enable_preemption_no_check)
cmplw cr1,r1,r1
/*
* void _enable_preemption(void)
*
* This version checks if we get preempted or not
*/
.align 5
.globl EXT(_enable_preemption)
LEXT(_enable_preemption)
epStart:
cmplwi cr1,r1,0 epCommn:
mfsprg r3,1 lwz r5,ACT_PREEMPT_CNT(r3) blt- epTooFar crandc cr0_eq,cr0_eq,cr1_eq
beq+ epCheckPreempt epTooFar:
mr r4,r5
lis r3,hi16(epTooFarStr) PROLOG(0)
bl EXT(panic)
BREAKPOINT_TRAP .data
epTooFarStr:
STRINGD "_enable_preemption: preemption_level %d\n\000"
.text
.align 5
epCheckPreempt:
lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) beq+ epCPno andc r9,r9,r0 mtmsr r7 mfsprg r3,0 li r5,AST_URGENT lis r0,hi16(DoPreemptCall) ori r0,r0,lo16(DoPreemptCall) epCPno:
beqlr+ blr /*
* void disable_preemption(void)
*
* Here is where we disable preemption. Since preemption is on a
* per processor basis (a thread runs on one CPU at a time) we don't
* need any cross-processor synchronization. We do, however, need to
* be interrupt safe, so we don't preempt while in the process of
* disabling it. We could use SPLs, but since we always want complete
* disablement, and this is platform specific code, we'll just kick the
* MSR. We'll save a couple of orders of magnitude over using SPLs.
*/
.align 5
.globl EXT(_disable_preemption)
LEXT(_disable_preemption)
mfsprg r6,1 addi r5,r5,1 blr /*
* int get_preemption_level(void)
*
* Return the current preemption level
*/
.align 5
.globl EXT(get_preemption_level)
LEXT(get_preemption_level)
mfsprg r6,1 blr /*
* int get_simple_lock_count(void)
*
* Return the simple lock count
*
*/
.align 5
.globl EXT(get_simple_lock_count)
LEXT(get_simple_lock_count)
#if MACH_LDEBUG
lis r3,hi16(MASK(MSR_VEC)) ori r3,r3,lo16(MASK(MSR_FP)) andc r9,r9,r3 mtmsr r8 mfsprg r6,0 mtmsr r9 li r3,0 blr /*
* void ppc_usimple_lock_init(simple_lock_t, etap_event_t)
*
* Initialize a simple lock.
*/
.align 5
.globl EXT(ppc_usimple_lock_init)
LEXT(ppc_usimple_lock_init)
li r0, 0 blr
/*
* void ppc_usimple_lock(simple_lock_t)
*
*/
.align 5
.globl EXT(ppc_usimple_lock)
LEXT(ppc_usimple_lock)
#if CHECKNMI
mflr r12 mtlr r12
mfsprg r6,1 addi r5,r5,1 mr r5,r3 li r4,0 slcktry: lwarx r11,0,r5 ori r11,r6,ILK_LOCKED stwcx. r11,0,r5 isync
slckspin: li r11,lgKillResv
mr. r4,r4 lis r4,hi16(EXT(LockTimeOut)) lwz r4,0(r4) slockspin0: mr. r8,r8 lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r7 mftb r8
slockspin1: mtmsr r7
slcksniff: lwz r3,0(r5) beq++ slckretry mftb r10 cmplwi r10,128
mtmsr r9 subi r4,r4,128 mr. r4,r4 or r4,r4,r4 ble-- slckfail slckretry:
mtmsr r9 b slcktry
slckfail: ori r3,r3,lo16(slckpanic_str)
mr r4,r5
mflr r5
PROLOG(0)
bl EXT(panic)
BREAKPOINT_TRAP .data
slckpanic_str:
STRINGD "ppc_usimple_lock: simple lock deadlock detection l=0x%08X, pc=0x%08X\n\000"
.text
/*
* unsigned int ppc_usimple_lock_try(simple_lock_t)
*
*/
.align 5
.globl EXT(ppc_usimple_lock_try)
LEXT(ppc_usimple_lock_try)
#if CHECKNMI
mflr r12 mtlr r12 lis r0,hi16(MASK(MSR_VEC)) ori r0,r0,lo16(MASK(MSR_FP)) andc r9,r9,r0 mtmsr r7
lwz r11,0(r3) bne-- slcktryfail slcktryloop:
lwarx r11,0,r3 andi. r5,r11,ILK_LOCKED bne-- slcktryfailX stwcx. r5,0,r3
isync
lwz r5,ACT_PREEMPT_CNT(r6) stw r5,ACT_PREEMPT_CNT(r6) mtmsr r9 blr
slcktryfailX:
li r5,lgKillResv
slcktryfail:
mtmsr r9 blr
/*
* void ppc_usimple_unlock_rwcmb(simple_lock_t)
*
*/
.align 5
.globl EXT(ppc_usimple_unlock_rwcmb)
LEXT(ppc_usimple_unlock_rwcmb)
#if CHECKNMI
mflr r12 mtlr r12 li r0,0
.globl EXT(sulckPatch_isync)
LEXT(sulckPatch_isync)
isync
.globl EXT(sulckPatch_eieio)
LEXT(sulckPatch_eieio)
eieio
stw r0, LOCK_DATA(r3)
b epStart /*
* void ppc_usimple_unlock_rwmb(simple_lock_t)
*
*/
.align 5
.globl EXT(ppc_usimple_unlock_rwmb)
LEXT(ppc_usimple_unlock_rwmb)
#if CHECKNMI
mflr r12 mtlr r12 li r0,0
sync
stw r0, LOCK_DATA(r3)
b epStart /*
* void enter_funnel_section(funnel_t *)
*
*/
.align 5
.globl EXT(enter_funnel_section)
LEXT(enter_funnel_section)
#if !MACH_LDEBUG
lis r10,hi16(EXT(kdebug_enable))
ori r10,r10,lo16(EXT(kdebug_enable))
lwz r10,0(r10)
lis r11,hi16(EXT(split_funnel_off))
ori r11,r11,lo16(EXT(split_funnel_off))
lwz r11,0(r11)
or. r10,r11,r10 mfsprg r6,1
lwz r5,0(r7) bne-- L_enter_funnel_section_slow L_enter_funnel_section_loop:
lwarx r5,0,r7 bne-- L_enter_funnel_section_slowX bne-- L_enter_funnel_section_loop lwz r6,ACT_THREAD(r6) stw r3,THREAD_FUNNEL_LOCK(r6) blr
L_enter_funnel_section_slowX:
li r4,lgKillResv
L_enter_funnel_section_slow:
#endif
li r4,TRUE
b EXT(thread_funnel_set)
/*
* void exit_funnel_section(void)
*
*/
.align 5
.globl EXT(exit_funnel_section)
LEXT(exit_funnel_section)
mfsprg r6,1 lwz r3,THREAD_FUNNEL_LOCK(r6) beq- L_exit_funnel_section_ret lis r10,hi16(EXT(kdebug_enable))
ori r10,r10,lo16(EXT(kdebug_enable))
lwz r10,0(r10)
mr. r10,r10
bne- L_exit_funnel_section_slow .globl EXT(retfsectPatch_isync)
LEXT(retfsectPatch_isync)
isync
.globl EXT(retfsectPatch_eieio)
LEXT(retfsectPatch_eieio)
eieio
lwz r5,0(r7) bne-- L_exit_funnel_section_slow L_exit_funnel_section_loop:
lwarx r5,0,r7
rlwinm. r4,r5,0,30,31 bne-- L_exit_funnel_section_slowX
stwcx. r5,0,r7 li r7,0
stw r7,THREAD_FUNNEL_STATE(r6) blr L_exit_funnel_section_slowX:
li r4,lgKillResv
L_exit_funnel_section_slow:
#endif
li r4,FALSE
b EXT(thread_funnel_set)
L_exit_funnel_section_ret:
blr
.globl EXT(condStop)
LEXT(condStop)
XcondStop: cmplw r3,r4 blr