#ifndef K5_THREAD_H
#define K5_THREAD_H
#include "autoconf.h"
#ifndef KRB5_CALLCONV
# define KRB5_CALLCONV
#endif
#ifndef KRB5_CALLCONV_C
# define KRB5_CALLCONV_C
#endif
#define DEBUG_THREADS
#define DEBUG_THREADS_LOC
#undef DEBUG_THREADS_SLOW
#undef DEBUG_THREADS_STATS
#include <assert.h>
#ifdef DEBUG_THREADS_LOC
typedef struct {
const char *filename;
int lineno;
} k5_debug_loc;
#define K5_DEBUG_LOC_INIT { __FILE__, __LINE__ }
#if __GNUC__ >= 2
#define K5_DEBUG_LOC (__extension__ (k5_debug_loc)K5_DEBUG_LOC_INIT)
#else
static inline k5_debug_loc k5_debug_make_loc(const char *file, int line)
{
k5_debug_loc l;
l.filename = file;
l.lineno = line;
return l;
}
#define K5_DEBUG_LOC (k5_debug_make_loc(__FILE__,__LINE__))
#endif
#else
typedef char k5_debug_loc;
#define K5_DEBUG_LOC_INIT 0
#define K5_DEBUG_LOC 0
#endif
#define k5_debug_update_loc(L) ((L) = K5_DEBUG_LOC)
#ifdef DEBUG_THREADS_STATS
#if HAVE_TIME_H && (!defined(HAVE_SYS_TIME_H) || defined(TIME_WITH_SYS_TIME))
# include <time.h>
#endif
#if HAVE_SYS_TIME_H
# include <sys/time.h>
#endif
#ifdef HAVE_STDINT_H
# include <stdint.h>
#endif
#include <string.h>
#include <inttypes.h>
typedef uint64_t k5_debug_timediff_t;
typedef struct timeval k5_debug_time_t;
static inline k5_debug_timediff_t
timediff(k5_debug_time_t t2, k5_debug_time_t t1)
{
return (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec);
}
static inline k5_debug_time_t get_current_time(void)
{
struct timeval tv;
if (gettimeofday(&tv,0) < 0) { tv.tv_sec = tv.tv_usec = 0; }
return tv;
}
struct k5_timediff_stats {
k5_debug_timediff_t valmin, valmax, valsum, valsqsum;
};
typedef struct {
int count;
k5_debug_time_t time_acquired, time_created;
struct k5_timediff_stats lockwait, lockheld;
} k5_debug_mutex_stats;
#define k5_mutex_init_stats(S) \
(memset((S), 0, sizeof(k5_debug_mutex_stats)), \
(S)->time_created = get_current_time(), \
0)
#define k5_mutex_finish_init_stats(S) (0)
#define K5_MUTEX_STATS_INIT { 0, {0}, {0}, {0}, {0} }
typedef k5_debug_time_t k5_mutex_stats_tmp;
#define k5_mutex_stats_start() get_current_time()
void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
k5_mutex_stats_tmp start);
void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
#define k5_mutex_lock_update_stats krb5int_mutex_lock_update_stats
#define k5_mutex_unlock_update_stats krb5int_mutex_unlock_update_stats
void KRB5_CALLCONV krb5int_mutex_report_stats();
#else
typedef char k5_debug_mutex_stats;
#define k5_mutex_init_stats(S) (*(S) = 's', 0)
#define k5_mutex_finish_init_stats(S) (0)
#define K5_MUTEX_STATS_INIT 's'
typedef int k5_mutex_stats_tmp;
#define k5_mutex_stats_start() (0)
#ifdef __GNUC__
static inline void
k5_mutex_lock_update_stats(k5_debug_mutex_stats *m, k5_mutex_stats_tmp t)
{
}
#else
# define k5_mutex_lock_update_stats(M,S) (S)
#endif
#define k5_mutex_unlock_update_stats(M) (*(M) = 's')
void KRB5_CALLCONV krb5int_mutex_lock_update_stats(k5_debug_mutex_stats *m,
k5_mutex_stats_tmp start);
void KRB5_CALLCONV krb5int_mutex_unlock_update_stats(k5_debug_mutex_stats *m);
void KRB5_CALLCONV krb5int_mutex_report_stats();
#define krb5int_mutex_report_stats(M) ((M)->stats = 'd')
#endif
#ifdef DEBUG_THREADS
enum k5_mutex_init_states {
K5_MUTEX_DEBUG_PARTLY_INITIALIZED = 0x12,
K5_MUTEX_DEBUG_INITIALIZED,
K5_MUTEX_DEBUG_DESTROYED
};
enum k5_mutex_flag_states {
K5_MUTEX_DEBUG_UNLOCKED = 0x23,
K5_MUTEX_DEBUG_LOCKED
};
typedef struct {
enum k5_mutex_init_states initialized;
enum k5_mutex_flag_states locked;
} k5_os_nothread_mutex;
# define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER \
{ K5_MUTEX_DEBUG_PARTLY_INITIALIZED, K5_MUTEX_DEBUG_UNLOCKED }
# define k5_os_nothread_mutex_finish_init(M) \
(assert((M)->initialized != K5_MUTEX_DEBUG_INITIALIZED), \
assert((M)->initialized == K5_MUTEX_DEBUG_PARTLY_INITIALIZED), \
assert((M)->locked == K5_MUTEX_DEBUG_UNLOCKED), \
(M)->initialized = K5_MUTEX_DEBUG_INITIALIZED, 0)
# define k5_os_nothread_mutex_init(M) \
((M)->initialized = K5_MUTEX_DEBUG_INITIALIZED, \
(M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0)
# define k5_os_nothread_mutex_destroy(M) \
(assert((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \
(M)->initialized = K5_MUTEX_DEBUG_DESTROYED, 0)
# define k5_os_nothread_mutex_lock(M) \
(k5_os_nothread_mutex_assert_unlocked(M), \
(M)->locked = K5_MUTEX_DEBUG_LOCKED, 0)
# define k5_os_nothread_mutex_unlock(M) \
(k5_os_nothread_mutex_assert_locked(M), \
(M)->locked = K5_MUTEX_DEBUG_UNLOCKED, 0)
# define k5_os_nothread_mutex_assert_locked(M) \
(assert((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \
assert((M)->locked != K5_MUTEX_DEBUG_UNLOCKED), \
assert((M)->locked == K5_MUTEX_DEBUG_LOCKED))
# define k5_os_nothread_mutex_assert_unlocked(M) \
(assert((M)->initialized == K5_MUTEX_DEBUG_INITIALIZED), \
assert((M)->locked != K5_MUTEX_DEBUG_LOCKED), \
assert((M)->locked == K5_MUTEX_DEBUG_UNLOCKED))
#else
typedef char k5_os_nothread_mutex;
# define K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER 0
static inline int k5_os_nothread_mutex_finish_init(k5_os_nothread_mutex *m) {
return 0;
}
static inline int k5_os_nothread_mutex_init(k5_os_nothread_mutex *m) {
return 0;
}
static inline int k5_os_nothread_mutex_destroy(k5_os_nothread_mutex *m) {
return 0;
}
static inline int k5_os_nothread_mutex_lock(k5_os_nothread_mutex *m) {
return 0;
}
static inline int k5_os_nothread_mutex_unlock(k5_os_nothread_mutex *m) {
return 0;
}
# define k5_os_nothread_mutex_assert_locked(M) ((void)0)
# define k5_os_nothread_mutex_assert_unlocked(M) ((void)0)
#endif
typedef unsigned char k5_os_nothread_once_t;
# define K5_OS_NOTHREAD_ONCE_INIT 2
# define k5_os_nothread_once(O,F) \
(*(O) == 3 ? 0 \
: *(O) == 2 ? (*(O) = 4, (F)(), *(O) = 3, 0) \
: (assert(*(O) != 4), assert(*(O) == 2 || *(O) == 3), 0))
#ifndef ENABLE_THREADS
typedef k5_os_nothread_mutex k5_os_mutex;
# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER
# define k5_os_mutex_finish_init k5_os_nothread_mutex_finish_init
# define k5_os_mutex_init k5_os_nothread_mutex_init
# define k5_os_mutex_destroy k5_os_nothread_mutex_destroy
# define k5_os_mutex_lock k5_os_nothread_mutex_lock
# define k5_os_mutex_unlock k5_os_nothread_mutex_unlock
# define k5_os_mutex_assert_locked k5_os_nothread_mutex_assert_locked
# define k5_os_mutex_assert_unlocked k5_os_nothread_mutex_assert_unlocked
# define k5_once_t k5_os_nothread_once_t
# define K5_ONCE_INIT K5_OS_NOTHREAD_ONCE_INIT
# define k5_once k5_os_nothread_once
#elif HAVE_PTHREAD
# include <pthread.h>
#ifdef HAVE_PRAGMA_WEAK_REF
# pragma weak pthread_once
# pragma weak pthread_mutex_lock
# pragma weak pthread_mutex_unlock
# pragma weak pthread_mutex_destroy
# pragma weak pthread_mutex_init
# pragma weak pthread_self
# pragma weak pthread_equal
extern int krb5int_pthread_loaded(void);
# define K5_PTHREADS_LOADED (krb5int_pthread_loaded())
#else
# define K5_PTHREADS_LOADED (1)
#endif
#if defined(__mips) && defined(__sgi) && (defined(_SYSTYPE_SVR4) || defined(__SYSTYPE_SVR4__))
# ifndef HAVE_PRAGMA_WEAK_REF
# if defined(__GNUC__) && __GNUC__ < 3
# error "Please update to a newer gcc with weak symbol support, or switch to native cc, reconfigure and recompile."
# else
# error "Weak reference support is required"
# endif
# endif
#endif
#ifdef HAVE_PRAGMA_WEAK_REF
# define USE_PTHREAD_LOCK_ONLY_IF_LOADED
#endif
#ifdef HAVE_PRAGMA_WEAK_REF
typedef struct {
pthread_once_t o;
k5_os_nothread_once_t n;
} k5_once_t;
# define K5_ONCE_INIT { PTHREAD_ONCE_INIT, K5_OS_NOTHREAD_ONCE_INIT }
# define k5_once(O,F) (K5_PTHREADS_LOADED \
? pthread_once(&(O)->o,F) \
: k5_os_nothread_once(&(O)->n,F))
#else
typedef pthread_once_t k5_once_t;
# define K5_ONCE_INIT PTHREAD_ONCE_INIT
# define k5_once pthread_once
#endif
typedef struct {
pthread_mutex_t p;
#ifdef DEBUG_THREADS
pthread_t owner;
#endif
#ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
k5_os_nothread_mutex n;
#endif
} k5_os_mutex;
#ifdef DEBUG_THREADS
# ifdef __GNUC__
# define k5_pthread_mutex_lock(M) \
({ \
k5_os_mutex *_m2 = (M); \
int _r2 = pthread_mutex_lock(&_m2->p); \
if (_r2 == 0) _m2->owner = pthread_self(); \
_r2; \
})
# else
static inline int
k5_pthread_mutex_lock(k5_os_mutex *m)
{
int r = pthread_mutex_lock(&m->p);
if (r)
return r;
m->owner = pthread_self();
return 0;
}
# endif
# define k5_pthread_assert_locked(M) \
(K5_PTHREADS_LOADED \
? assert(pthread_equal((M)->owner, pthread_self())) \
: (void)0)
# define k5_pthread_mutex_unlock(M) \
(k5_pthread_assert_locked(M), \
(M)->owner = (pthread_t) 0, \
pthread_mutex_unlock(&(M)->p))
#else
# define k5_pthread_mutex_lock(M) pthread_mutex_lock(&(M)->p)
static inline void k5_pthread_assert_locked(k5_os_mutex *m) { }
# define k5_pthread_mutex_unlock(M) pthread_mutex_unlock(&(M)->p)
#endif
static inline void k5_pthread_assert_unlocked(pthread_mutex_t *m) { }
#if defined(DEBUG_THREADS_SLOW) && HAVE_SCHED_H && (HAVE_SCHED_YIELD || HAVE_PRAGMA_WEAK_REF)
# include <sched.h>
# if !HAVE_SCHED_YIELD
# pragma weak sched_yield
# define MAYBE_SCHED_YIELD() ((void)((&sched_yield != NULL) ? sched_yield() : 0))
# else
# define MAYBE_SCHED_YIELD() ((void)sched_yield())
# endif
#else
# define MAYBE_SCHED_YIELD() ((void)0)
#endif
#ifdef __GNUC__
#define return_after_yield(R) \
__extension__ ({ \
int _r = (R); \
MAYBE_SCHED_YIELD(); \
_r; \
})
#else
static inline int return_after_yield(int r)
{
MAYBE_SCHED_YIELD();
return r;
}
#endif
#ifdef USE_PTHREAD_LOCK_ONLY_IF_LOADED
# if defined(PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP) && defined(DEBUG_THREADS)
# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
{ PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0, \
K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
# elif defined(DEBUG_THREADS)
# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
{ PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, \
K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
# else
# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
{ PTHREAD_MUTEX_INITIALIZER, K5_OS_NOTHREAD_MUTEX_PARTIAL_INITIALIZER }
# endif
# define k5_os_mutex_finish_init(M) \
k5_os_nothread_mutex_finish_init(&(M)->n)
# define k5_os_mutex_init(M) \
(k5_os_nothread_mutex_init(&(M)->n), \
(K5_PTHREADS_LOADED \
? pthread_mutex_init(&(M)->p, 0) \
: 0))
# define k5_os_mutex_destroy(M) \
(k5_os_nothread_mutex_destroy(&(M)->n), \
(K5_PTHREADS_LOADED \
? pthread_mutex_destroy(&(M)->p) \
: 0))
# define k5_os_mutex_lock(M) \
return_after_yield(K5_PTHREADS_LOADED \
? k5_pthread_mutex_lock(M) \
: k5_os_nothread_mutex_lock(&(M)->n))
# define k5_os_mutex_unlock(M) \
(MAYBE_SCHED_YIELD(), \
(K5_PTHREADS_LOADED \
? k5_pthread_mutex_unlock(M) \
: k5_os_nothread_mutex_unlock(&(M)->n)))
# define k5_os_mutex_assert_unlocked(M) \
(K5_PTHREADS_LOADED \
? k5_pthread_assert_unlocked(&(M)->p) \
: k5_os_nothread_mutex_assert_unlocked(&(M)->n))
# define k5_os_mutex_assert_locked(M) \
(K5_PTHREADS_LOADED \
? k5_pthread_assert_locked(M) \
: k5_os_nothread_mutex_assert_locked(&(M)->n))
#else
# ifdef DEBUG_THREADS
# ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
{ PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, (pthread_t) 0 }
# else
# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
{ PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0 }
# endif
# else
# define K5_OS_MUTEX_PARTIAL_INITIALIZER \
{ PTHREAD_MUTEX_INITIALIZER }
# endif
static inline int k5_os_mutex_finish_init(k5_os_mutex *m) { return 0; }
# define k5_os_mutex_init(M) pthread_mutex_init(&(M)->p, 0)
# define k5_os_mutex_destroy(M) pthread_mutex_destroy(&(M)->p)
# define k5_os_mutex_lock(M) return_after_yield(k5_pthread_mutex_lock(M))
# define k5_os_mutex_unlock(M) (MAYBE_SCHED_YIELD(),k5_pthread_mutex_unlock(M))
# define k5_os_mutex_assert_unlocked(M) k5_pthread_assert_unlocked(&(M)->p)
# define k5_os_mutex_assert_locked(M) k5_pthread_assert_locked(M)
#endif
#elif defined _WIN32
typedef struct {
HANDLE h;
int is_locked;
} k5_os_mutex;
# define K5_OS_MUTEX_PARTIAL_INITIALIZER { INVALID_HANDLE_VALUE, 0 }
# define k5_os_mutex_finish_init(M) \
(assert((M)->h == INVALID_HANDLE_VALUE), \
((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError())
# define k5_os_mutex_init(M) \
((M)->is_locked = 0, \
((M)->h = CreateMutex(NULL, FALSE, NULL)) ? 0 : GetLastError())
# define k5_os_mutex_destroy(M) \
(CloseHandle((M)->h) ? ((M)->h = 0, 0) : GetLastError())
static inline int k5_os_mutex_lock(k5_os_mutex *m)
{
DWORD res;
res = WaitForSingleObject(m->h, INFINITE);
if (res == WAIT_FAILED)
return GetLastError();
assert(res != WAIT_TIMEOUT);
assert(res != WAIT_ABANDONED);
assert(res == WAIT_OBJECT_0);
assert(m->is_locked == 0);
m->is_locked = 1;
return 0;
}
# define k5_os_mutex_unlock(M) \
(assert((M)->is_locked == 1), \
(M)->is_locked = 0, \
ReleaseMutex((M)->h) ? 0 : GetLastError())
# define k5_os_mutex_assert_unlocked(M) ((void)0)
# define k5_os_mutex_assert_locked(M) ((void)0)
#else
# error "Thread support enabled, but thread system unknown"
#endif
typedef struct {
k5_debug_loc loc_last, loc_created;
k5_os_mutex os;
k5_debug_mutex_stats stats;
} k5_mutex_t;
#define K5_MUTEX_PARTIAL_INITIALIZER \
{ K5_DEBUG_LOC_INIT, K5_DEBUG_LOC_INIT, \
K5_OS_MUTEX_PARTIAL_INITIALIZER, K5_MUTEX_STATS_INIT }
static inline int k5_mutex_init_1(k5_mutex_t *m, k5_debug_loc l)
{
int err = k5_os_mutex_init(&m->os);
if (err) return err;
m->loc_created = m->loc_last = l;
err = k5_mutex_init_stats(&m->stats);
assert(err == 0);
return 0;
}
#define k5_mutex_init(M) k5_mutex_init_1((M), K5_DEBUG_LOC)
static inline int k5_mutex_finish_init_1(k5_mutex_t *m, k5_debug_loc l)
{
int err = k5_os_mutex_finish_init(&m->os);
if (err) return err;
m->loc_created = m->loc_last = l;
err = k5_mutex_finish_init_stats(&m->stats);
assert(err == 0);
return 0;
}
#define k5_mutex_finish_init(M) k5_mutex_finish_init_1((M), K5_DEBUG_LOC)
#define k5_mutex_destroy(M) \
(k5_os_mutex_assert_unlocked(&(M)->os), \
krb5int_mutex_report_stats(M), \
k5_mutex_lock(M), (M)->loc_last = K5_DEBUG_LOC, k5_mutex_unlock(M), \
k5_os_mutex_destroy(&(M)->os))
#ifdef __GNUC__
#define k5_mutex_lock(M) \
__extension__ ({ \
int _err = 0; \
k5_mutex_stats_tmp _stats = k5_mutex_stats_start(); \
k5_mutex_t *_m = (M); \
_err = k5_os_mutex_lock(&_m->os); \
if (_err == 0) _m->loc_last = K5_DEBUG_LOC; \
if (_err == 0) k5_mutex_lock_update_stats(&_m->stats, _stats); \
_err; \
})
#else
static inline int k5_mutex_lock_1(k5_mutex_t *m, k5_debug_loc l)
{
int err = 0;
k5_mutex_stats_tmp stats = k5_mutex_stats_start();
err = k5_os_mutex_lock(&m->os);
if (err)
return err;
m->loc_last = l;
k5_mutex_lock_update_stats(&m->stats, stats);
return err;
}
#define k5_mutex_lock(M) k5_mutex_lock_1(M, K5_DEBUG_LOC)
#endif
#define k5_mutex_unlock(M) \
(k5_mutex_assert_locked(M), \
k5_mutex_unlock_update_stats(&(M)->stats), \
(M)->loc_last = K5_DEBUG_LOC, \
k5_os_mutex_unlock(&(M)->os))
#define k5_mutex_assert_locked(M) k5_os_mutex_assert_locked(&(M)->os)
#define k5_mutex_assert_unlocked(M) k5_os_mutex_assert_unlocked(&(M)->os)
#define k5_assert_locked k5_mutex_assert_locked
#define k5_assert_unlocked k5_mutex_assert_unlocked
typedef enum {
K5_KEY_COM_ERR,
K5_KEY_GSS_KRB5_SET_CCACHE_OLD_NAME,
K5_KEY_GSS_KRB5_CCACHE_NAME,
K5_KEY_MAX
} k5_key_t;
#define k5_key_register krb5int_key_register
#define k5_getspecific krb5int_getspecific
#define k5_setspecific krb5int_setspecific
#define k5_key_delete krb5int_key_delete
extern int k5_key_register(k5_key_t, void (*)(void *));
extern void *k5_getspecific(k5_key_t);
extern int k5_setspecific(k5_key_t, void *);
extern int k5_key_delete(k5_key_t);
extern int KRB5_CALLCONV krb5int_mutex_alloc (k5_mutex_t **);
extern void KRB5_CALLCONV krb5int_mutex_free (k5_mutex_t *);
extern int KRB5_CALLCONV krb5int_mutex_lock (k5_mutex_t *);
extern int KRB5_CALLCONV krb5int_mutex_unlock (k5_mutex_t *);
#ifdef PLUGIN
#undef k5_mutex_lock
#define k5_mutex_lock krb5int_mutex_lock
#undef k5_mutex_unlock
#define k5_mutex_unlock krb5int_mutex_unlock
#endif
#endif