#include "pthread_internals.h"
#include <sys/time.h>
#include <stdio.h>
#ifdef PLOCKSTAT
#include "plockstat.h"
#else
#define PLOCKSTAT_MUTEX_RELEASE(x, y)
#endif
extern int __semwait_signal(int, int, int, int, int64_t, int32_t);
extern int _pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *, int);
extern int __unix_conforming;
extern int usenew_mtximpl;
#ifdef PR_5243343
extern int PR_5243343_flag;
#endif
__private_extern__ int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int isRelative, int isconforming);
#ifndef BUILDING_VARIANT
static void cond_cleanup(void *arg);
static void cond_dropwait(npthread_cond_t * cond, int error, uint32_t updateval);
static void __pthread_cond_set_signature(npthread_cond_t * cond);
static int _pthread_cond_destroy_locked(pthread_cond_t *cond);
#endif
#if defined(__LP64__)
#define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt) \
{ \
if (cond->misalign != 0) { \
c_lseqcnt = &cond->c_seq[1]; \
c_sseqcnt = &cond->c_seq[2]; \
c_useqcnt = &cond->c_seq[0]; \
} else { \
\
c_lseqcnt = &cond->c_seq[0]; \
c_sseqcnt = &cond->c_seq[1]; \
c_useqcnt = &cond->c_seq[2]; \
} \
}
#else
#define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt) \
{ \
if (cond->misalign != 0) { \
c_lseqcnt = &cond->c_seq[1]; \
c_sseqcnt = &cond->c_seq[2]; \
c_useqcnt = &cond->c_seq[0]; \
} else { \
\
c_lseqcnt = &cond->c_seq[0]; \
c_sseqcnt = &cond->c_seq[1]; \
c_useqcnt = &cond->c_seq[2]; \
} \
}
#endif
#define _KSYN_TRACE_ 0
#if _KSYN_TRACE_
#define DBG_FUNC_START 1
#define DBG_FUNC_END 2
#define DBG_FUNC_NONE 0
int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
#define _KSYN_TRACE_UM_LOCK 0x9000060
#define _KSYN_TRACE_UM_UNLOCK 0x9000064
#define _KSYN_TRACE_UM_MHOLD 0x9000068
#define _KSYN_TRACE_UM_MDROP 0x900006c
#define _KSYN_TRACE_UM_CVWAIT 0x9000070
#define _KSYN_TRACE_UM_CVSIG 0x9000074
#define _KSYN_TRACE_UM_CVBRD 0x9000078
#define _KSYN_TRACE_UM_CDROPWT 0x90000a0
#define _KSYN_TRACE_UM_CVCLRPRE 0x90000a4
#endif
#ifndef BUILDING_VARIANT
int
pthread_condattr_init(pthread_condattr_t *attr)
{
attr->sig = _PTHREAD_COND_ATTR_SIG;
attr->pshared = _PTHREAD_DEFAULT_PSHARED;
return (0);
}
int
pthread_condattr_destroy(pthread_condattr_t *attr)
{
attr->sig = _PTHREAD_NO_SIG;
return (0);
}
int
pthread_condattr_getpshared(const pthread_condattr_t *attr,
int *pshared)
{
if (attr->sig == _PTHREAD_COND_ATTR_SIG)
{
*pshared = (int)attr->pshared;
return (0);
} else
{
return (EINVAL);
}
}
int
pthread_condattr_setpshared(pthread_condattr_t * attr, int pshared)
{
if (attr->sig == _PTHREAD_COND_ATTR_SIG)
{
#if __DARWIN_UNIX03
if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
#else
if ( pshared == PTHREAD_PROCESS_PRIVATE)
#endif
{
attr->pshared = pshared;
return (0);
} else {
return (EINVAL);
}
} else
{
return (EINVAL);
}
}
__private_extern__ int
_pthread_cond_init(pthread_cond_t *ocond,
const pthread_condattr_t *attr,
int conforming)
{
npthread_cond_t * cond = (npthread_cond_t *)ocond;
cond->busy = (npthread_mutex_t *)NULL;
cond->c_seq[0] = 0;
cond->c_seq[1] = 0;
cond->c_seq[2] = 0;
cond->rfu = 0;
if (((uintptr_t)cond & 0x07) != 0) {
cond->misalign = 1;
cond->c_seq[2] = PTH_RWS_CV_CBIT;
} else {
cond->misalign = 0;
cond->c_seq[1] = PTH_RWS_CV_CBIT;
}
if (conforming) {
if (attr)
cond->pshared = attr->pshared;
else
cond->pshared = _PTHREAD_DEFAULT_PSHARED;
} else
cond->pshared = _PTHREAD_DEFAULT_PSHARED;
__pthread_cond_set_signature(cond);
return (0);
}
int
pthread_cond_destroy(pthread_cond_t * ocond)
{
npthread_cond_t *cond = (npthread_cond_t *)ocond;
int ret;
if((cond->sig != _PTHREAD_COND_SIG) && (cond->sig != _PTHREAD_COND_SIG_init))
return(EINVAL);
LOCK(cond->lock);
ret = _pthread_cond_destroy_locked(ocond);
UNLOCK(cond->lock);
return(ret);
}
static int
_pthread_cond_destroy_locked(pthread_cond_t * ocond)
{
npthread_cond_t *cond = (npthread_cond_t *)ocond;
int ret;
volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
uint32_t lcntval , ucntval, scntval;
uint64_t oldval64, newval64;
retry:
if (cond->sig == _PTHREAD_COND_SIG)
{
COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
lcntval = *c_lseqcnt;
ucntval = *c_useqcnt;
scntval = *c_sseqcnt;
if ((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) {
oldval64 = (((uint64_t)scntval) << 32);
oldval64 |= lcntval;
newval64 = oldval64;
if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
goto retry;
cond->sig = _PTHREAD_NO_SIG;
ret = 0;
} else
ret = EBUSY;
} else if (cond->sig == _PTHREAD_COND_SIG_init) {
cond->sig = _PTHREAD_NO_SIG;
ret = 0;
} else
ret = EINVAL;
return (ret);
}
int
pthread_cond_broadcast(pthread_cond_t *ocond)
{
npthread_cond_t * cond = (npthread_cond_t *)ocond;
int sig = cond->sig;
uint32_t flags, updateval;
uint32_t lcntval , ucntval, scntval;
uint64_t oldval64, newval64, mugen, cvlsgen, cvudgen, mtid=0;
int diffgen, error = 0;
volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
uint32_t * pmtx = NULL;
uint32_t nlval, ulval;
int needclearpre = 0, retry_count = 0;
if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
return(EINVAL);
if (sig != _PTHREAD_COND_SIG)
{
LOCK(cond->lock);
if (cond->sig == _PTHREAD_COND_SIG_init)
{
_pthread_cond_init(ocond, NULL, 0);
UNLOCK(cond->lock);
return (0);
} else if (cond->sig != _PTHREAD_COND_SIG) {
UNLOCK(cond->lock);
return (EINVAL);
}
UNLOCK(cond->lock);
}
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0);
#endif
COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
retry:
lcntval = *c_lseqcnt;
ucntval = *c_useqcnt;
scntval = *c_sseqcnt;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0);
#endif
if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) ||
((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK))) {
oldval64 = (((uint64_t)scntval) << 32);
oldval64 |= lcntval;
newval64 = oldval64;
if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
goto retry;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, 0xf1f1f1f1, 0);
(void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, scntval, 0, 0xf1f1f1f1, 0);
#endif
return(0);
}
if (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK)) || is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
retry_count++;
if (retry_count > 8192) {
return(EAGAIN);
} else {
sched_yield();
goto retry;
}
}
if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) {
ulval = (scntval & PTHRW_COUNT_MASK);
} else {
ulval = (ucntval & PTHRW_COUNT_MASK);
}
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, lcntval, ucntval, scntval, diffgen, 0);
#endif
diffgen = diff_genseq((lcntval & PTHRW_COUNT_MASK), (ulval & PTHRW_COUNT_MASK));
ulval = (lcntval & PTHRW_COUNT_MASK);
if (OSAtomicCompareAndSwap32(ucntval, ulval, (volatile int32_t *)c_useqcnt) != TRUE) {
goto retry;
}
flags = 0;
if (cond->pshared == PTHREAD_PROCESS_SHARED)
flags |= _PTHREAD_MTX_OPT_PSHARED;
pmtx = NULL;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, 3, diffgen, flags, 0);
#endif
nlval = lcntval;
mugen = 0;
cvlsgen = ((uint64_t)scntval << 32) | nlval;
cvudgen = ((uint64_t)ucntval << 32) | diffgen;
updateval = __psynch_cvbroad(ocond, cvlsgen, cvudgen, flags, (pthread_mutex_t *)pmtx, mugen, mtid);
if (updateval != (uint32_t)-1) {
if (updateval != 0) {
retry2:
needclearpre = 0;
lcntval = *c_lseqcnt;
ucntval = *c_useqcnt;
scntval = *c_sseqcnt;
nlval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
nlval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL));
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, 0x25, lcntval, scntval, updateval, 0);
#endif
if (((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK))
&& ((nlval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) {
nlval &= PTH_RWS_CV_RESET_PBIT;
needclearpre = 1;
}
oldval64 = (((uint64_t)scntval) << 32);
oldval64 |= lcntval;
newval64 = (((uint64_t)nlval) << 32);
newval64 |= lcntval;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, 0x25, nlval, scntval, updateval, 0);
#endif
if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
goto retry2;
if ((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
cond->busy = (npthread_mutex_t *)NULL;
}
if (needclearpre != 0) {
(void)__psynch_cvclrprepost(ocond, lcntval, ucntval, nlval, 0, lcntval, flags);
}
}
}
error = 0;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, 0, error, 0, 0);
#endif
return(error);
}
int
pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
{
npthread_cond_t * cond = (npthread_cond_t *)ocond;
int sig = cond->sig;
uint32_t flags, updateval;
uint32_t lcntval , ucntval, scntval;
uint32_t nlval, ulval=0;
volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
uint64_t oldval64, newval64, mugen, cvlsgen, mtid = 0;
int needclearpre = 0, retry_count = 0;
int error;
if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
return(EINVAL);
if (cond->sig != _PTHREAD_COND_SIG) {
LOCK(cond->lock);
if (cond->sig != _PTHREAD_COND_SIG) {
if (cond->sig == _PTHREAD_COND_SIG_init) {
_pthread_cond_init(ocond, NULL, 0);
UNLOCK(cond->lock);
return(0);
} else {
UNLOCK(cond->lock);
return(EINVAL);
}
}
UNLOCK(cond->lock);
}
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0);
#endif
COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
retry:
lcntval = *c_lseqcnt;
ucntval = *c_useqcnt;
scntval = *c_sseqcnt;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0);
#endif
if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) ||
((thread == 0) && ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK)))) {
oldval64 = (((uint64_t)scntval) << 32);
oldval64 |= lcntval;
newval64 = oldval64;
if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
goto retry;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, 0xf1f1f1f1, 0);
(void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, scntval, 0, 0xf1f1f1f1, 0);
#endif
return(0);
}
if (((thread == 0) && (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK)))) || is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
retry_count++;
if (retry_count > 8192) {
return(EAGAIN);
} else {
sched_yield();
goto retry;
}
}
if (thread == 0) {
if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) {
ulval = (scntval & PTHRW_COUNT_MASK) + PTHRW_INC;
} else {
ulval = (ucntval & PTHRW_COUNT_MASK) + PTHRW_INC;
}
if (OSAtomicCompareAndSwap32(ucntval, ulval, (volatile int32_t *)c_useqcnt) != TRUE) {
goto retry;
}
}
flags = 0;
if (cond->pshared == PTHREAD_PROCESS_SHARED)
flags |= _PTHREAD_MTX_OPT_PSHARED;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 3, nlval, ulval, 0);
#endif
nlval = lcntval;
mugen = 0;
cvlsgen = ((uint64_t)scntval << 32) | nlval;
updateval = __psynch_cvsignal(ocond, cvlsgen, ucntval, pthread_mach_thread_np(thread), (pthread_mutex_t *)0, mugen, mtid, flags);
if (updateval != (uint32_t)-1) {
if (updateval != 0) {
retry2:
lcntval = *c_lseqcnt;
ucntval = *c_useqcnt;
scntval = *c_sseqcnt;
nlval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
nlval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL));
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, 0x25, 0, 0, updateval, 0);
#endif
if (((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK))
&& ((nlval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) {
nlval &= PTH_RWS_CV_RESET_PBIT;
needclearpre = 1;
} else
needclearpre = 0;
oldval64 = (((uint64_t)scntval) << 32);
oldval64 |= lcntval;
newval64 = (((uint64_t)nlval) << 32);
newval64 |= lcntval;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, 0x25, nlval, ulval, updateval, 0);
#endif
if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
goto retry2;
if ((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
cond->busy = (npthread_mutex_t *)NULL;
}
if (needclearpre != 0) {
(void)__psynch_cvclrprepost(ocond, lcntval, ucntval, nlval, 0, lcntval, flags);
}
}
}
error = 0;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
#endif
return (error);
}
int
pthread_cond_signal(pthread_cond_t *cond)
{
return pthread_cond_signal_thread_np(cond, NULL);
}
__private_extern__ int
_pthread_cond_wait(pthread_cond_t *ocond,
pthread_mutex_t *omutex,
const struct timespec *abstime,
int isRelative,
int isconforming)
{
int retval;
npthread_cond_t * cond = (npthread_cond_t *)ocond;
npthread_mutex_t * mutex = (npthread_mutex_t * )omutex;
mach_timespec_t then = {0,0};
struct timespec cthen = {0,0};
int sig = cond->sig;
int msig = mutex->sig;
npthread_mutex_t * pmtx;
uint32_t mtxgen, mtxugen, flags=0, updateval;
uint32_t lcntval , ucntval, scntval;
uint32_t nlval, ulval, savebits;
volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
uint64_t oldval64, newval64, mugen, cvlsgen;
uint32_t * npmtx = NULL;
int error, local_error;
extern void _pthread_testcancel(pthread_t thread, int isconforming);
if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
return(EINVAL);
if (isconforming) {
if((msig != _PTHREAD_MUTEX_SIG) && ((msig & _PTHREAD_MUTEX_SIG_init_MASK) != _PTHREAD_MUTEX_SIG_CMP))
return(EINVAL);
if (isconforming > 0)
_pthread_testcancel(pthread_self(), 1);
}
if (cond->sig != _PTHREAD_COND_SIG)
{
LOCK(cond->lock);
if (cond->sig != _PTHREAD_COND_SIG) {
if (cond->sig == _PTHREAD_COND_SIG_init) {
_pthread_cond_init(ocond, NULL, 0);
} else {
UNLOCK(cond->lock);
return(EINVAL);
}
}
UNLOCK(cond->lock);
}
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_START, (uint32_t)cond, isRelative, 0, (uint32_t)abstime, 0);
#endif
COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
if (abstime) {
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_START, 0x11111111, abstime->tv_nsec, abstime->tv_sec, 0, 0);
#endif
if (isRelative == 0) {
struct timespec now;
struct timeval tv;
gettimeofday(&tv, NULL);
TIMEVAL_TO_TIMESPEC(&tv, &now);
then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
then.tv_sec = abstime->tv_sec - now.tv_sec;
if (then.tv_nsec < 0)
{
then.tv_nsec += NSEC_PER_SEC;
then.tv_sec--;
}
if (((int)then.tv_sec < 0) ||
((then.tv_sec == 0) && (then.tv_nsec == 0)))
{
return ETIMEDOUT;
}
if (isconforming != 0) {
cthen.tv_sec = abstime->tv_sec;
cthen.tv_nsec = abstime->tv_nsec;
if ((cthen.tv_sec < 0) || (cthen.tv_nsec < 0)) {
return EINVAL;
}
if (cthen.tv_nsec >= NSEC_PER_SEC) {
return EINVAL;
}
}
} else {
then.tv_sec = abstime->tv_sec;
then.tv_nsec = abstime->tv_nsec;
if ((then.tv_sec == 0) && (then.tv_nsec == 0)) {
return ETIMEDOUT;
}
}
if(isconforming && ((then.tv_sec < 0) || (then.tv_nsec < 0))) {
return EINVAL;
}
if (then.tv_nsec >= NSEC_PER_SEC) {
return EINVAL;
}
}
if ((cond->busy != (npthread_mutex_t *)NULL) && (cond->busy != mutex))
return (EINVAL);
pmtx = mutex;
retry:
lcntval = *c_lseqcnt;
ucntval = *c_useqcnt;
scntval = *c_sseqcnt;
oldval64 = (((uint64_t)scntval) << 32);
oldval64 |= lcntval;
savebits = scntval & PTH_RWS_CV_BITSALL;
ulval = (scntval & PTHRW_COUNT_MASK);
nlval = lcntval + PTHRW_INC;
newval64 = (((uint64_t)ulval) << 32);
newval64 |= nlval;
if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
goto retry;
cond->busy = mutex;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0);
#endif
retval = __mtx_droplock(pmtx, PTHRW_INC, &flags, &npmtx, &mtxgen, &mtxugen);
if (retval != 0)
return(EINVAL);
if ((flags & _PTHREAD_MTX_OPT_NOTIFY) == 0) {
npmtx = NULL;
mugen = 0;
} else
mugen = ((uint64_t)mtxugen << 32) | mtxgen;
flags &= ~_PTHREAD_MTX_OPT_MUTEX;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 3, (uint32_t)mutex, flags, 0);
#endif
cvlsgen = ((uint64_t)(ulval | savebits)<< 32) | nlval;
if (isconforming) {
pthread_cleanup_push(cond_cleanup, (void *)cond);
updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
pthread_cleanup_pop(0);
} else {
updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
}
retval = 0;
if (updateval == (uint32_t)-1) {
local_error = errno;
error = local_error & 0xff;
if (error == ETIMEDOUT) {
retval = ETIMEDOUT;
} else if (error == EINTR) {
retval = 0;
} else
retval = EINVAL;
cond_dropwait(cond, local_error, 0);
} else {
if (updateval != 0) {
cond_dropwait(cond, 0, updateval);
}
retval = 0;
}
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 4, retval, 0, 0);
#endif
pthread_mutex_lock(omutex);
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 0, 0, retval, 0);
#endif
return(retval);
}
static void
__pthread_cond_set_signature(npthread_cond_t * cond)
{
cond->sig = _PTHREAD_COND_SIG;
}
static void
cond_cleanup(void *arg)
{
npthread_cond_t *cond = (npthread_cond_t *)arg;
pthread_mutex_t *mutex;
pthread_t thread = pthread_self();
int thcanceled = 0;
LOCK(thread->lock);
thcanceled = (thread->detached & _PTHREAD_WASCANCEL);
UNLOCK(thread->lock);
if (thcanceled == 0)
return;
mutex = (pthread_mutex_t *) cond->busy;
cond_dropwait(cond, thread->cancel_error, 0);
if (mutex != NULL)
(void)pthread_mutex_lock(mutex);
}
#define ECVCERORR 256
#define ECVPERORR 512
void
cond_dropwait(npthread_cond_t * cond, int error, uint32_t updateval)
{
int sig = cond->sig;
pthread_cond_t * ocond = (pthread_cond_t *)cond;
int needclearpre = 0;
uint32_t diffgen, nlval, ulval, flags;
uint32_t lcntval , ucntval, scntval, lval;
volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
uint64_t oldval64, newval64;
if (sig != _PTHREAD_COND_SIG)
return;
COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
if (error != 0) {
lval = PTHRW_INC;
if ((error & ECVCERORR) != 0)
lval |= PTH_RWS_CV_CBIT;
if ((error & ECVPERORR) != 0)
lval |= PTH_RWS_CV_PBIT;
} else {
lval = updateval;
}
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_START, (uint32_t)cond, error, updateval, 0xee, 0);
#endif
retry:
lcntval = *c_lseqcnt;
ucntval = *c_useqcnt;
scntval = *c_sseqcnt;
diffgen = diff_genseq((lcntval & PTHRW_COUNT_MASK), (scntval & PTHRW_COUNT_MASK));
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, lcntval, scntval, diffgen, 0);
#endif
if (diffgen <= 0) {
oldval64 = (((uint64_t)scntval) << 32);
oldval64 |= lcntval;
newval64 = oldval64;
if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
goto retry;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
#endif
return;
}
oldval64 = (((uint64_t)scntval) << 32);
oldval64 |= lcntval;
ulval = (scntval & PTHRW_COUNT_MASK) + (lval & PTHRW_COUNT_MASK);
ulval |= ((scntval & PTH_RWS_CV_BITSALL) | (lval & PTH_RWS_CV_BITSALL));
nlval = lcntval;
needclearpre = 0;
if ((nlval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
if ((ulval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL) {
needclearpre = 1;
ulval &= PTH_RWS_CV_RESET_PBIT;
}
}
newval64 = (((uint64_t)ulval) << 32);
newval64 |= nlval;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, 0xffff, nlval, ulval, 0);
#endif
if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
goto retry;
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, 2, 0, 0xee, 0);
#endif
if ((nlval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
cond->busy = NULL;
}
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, nlval, ucntval, ulval, PTHRW_INC, 0);
#endif
if (needclearpre != 0) {
flags = 0;
if (cond->pshared == PTHREAD_PROCESS_SHARED)
flags |= _PTHREAD_MTX_OPT_PSHARED;
(void)__psynch_cvclrprepost(ocond, nlval, ucntval, ulval, 0, nlval, flags);
}
#if _KSYN_TRACE_
(void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_END, nlval, ucntval, ulval, PTHRW_INC, 0);
#endif
return;
}
int
pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
pthread_mutex_t *mutex,
const struct timespec *abstime)
{
return (_pthread_cond_wait(cond, mutex, abstime, 1, 0));
}
#else
extern int _pthread_cond_wait(pthread_cond_t *cond,
pthread_mutex_t *mutex,
const struct timespec *abstime,
int isRelative,
int isconforming);
#endif
int
pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *attr)
{
int conforming;
#if __DARWIN_UNIX03
conforming = 1;
#else
conforming = 0;
#endif
LOCK_INIT(cond->lock);
return (_pthread_cond_init(cond, attr, conforming));
}