#ifndef _I386_LOCK_H_
#define _I386_LOCK_H_
#include <sys/appleapiopts.h>
#ifdef __APPLE_API_PRIVATE
#ifdef MACH_KERNEL_PRIVATE
#include <kern/macro_help.h>
#include <kern/assert.h>
#include <i386/hw_lock_types.h>
#include <mach_rt.h>
#include <mach_ldebug.h>
#include <cpus.h>
#if defined(__GNUC__)
#define bit_lock(bit,l) \
__asm__ volatile(" jmp 1f \n \
0: btl %0, %1 \n \
jb 0b \n \
1: lock \n \
btsl %0,%1 \n \
jb 0b" : \
: \
"r" (bit), "m" (*(volatile int *)(l)) : \
"memory");
#define bit_unlock(bit,l) \
__asm__ volatile(" lock \n \
btrl %0,%1" : \
: \
"r" (bit), "m" (*(volatile int *)(l)));
#define i_bit_set(bit,l) \
__asm__ volatile(" lock \n \
btsl %0,%1" : \
: \
"r" (bit), "m" (*(volatile int *)(l)));
#define i_bit_clear(bit,l) \
__asm__ volatile(" lock \n \
btrl %0,%1" : \
: \
"r" (bit), "m" (*(volatile int *)(l)));
extern __inline__ unsigned long i_bit_isset(unsigned int testbit, volatile unsigned long *word)
{
int bit;
__asm__ volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit)
: "m" (word), "ir" (testbit));
return bit;
}
extern __inline__ char xchgb(volatile char * cp, char new);
extern __inline__ void atomic_incl(long * p, long delta);
extern __inline__ void atomic_incs(short * p, short delta);
extern __inline__ void atomic_incb(char * p, char delta);
extern __inline__ void atomic_decl(long * p, long delta);
extern __inline__ void atomic_decs(short * p, short delta);
extern __inline__ void atomic_decb(char * p, char delta);
extern __inline__ long atomic_getl(long * p);
extern __inline__ short atomic_gets(short * p);
extern __inline__ char atomic_getb(char * p);
extern __inline__ void atomic_setl(long * p, long value);
extern __inline__ void atomic_sets(short * p, short value);
extern __inline__ void atomic_setb(char * p, char value);
extern __inline__ char xchgb(volatile char * cp, char new)
{
register char old = new;
__asm__ volatile (" xchgb %0,%2" :
"=q" (old) :
"0" (new), "m" (*(volatile char *)cp) : "memory");
return (old);
}
extern __inline__ void atomic_incl(long * p, long delta)
{
#if NEED_ATOMIC
__asm__ volatile (" lock \n \
addl %0,%1" : \
: \
"r" (delta), "m" (*(volatile long *)p));
#else
*p += delta;
#endif
}
extern __inline__ void atomic_incs(short * p, short delta)
{
#if NEED_ATOMIC
__asm__ volatile (" lock \n \
addw %0,%1" : \
: \
"q" (delta), "m" (*(volatile short *)p));
#else
*p += delta;
#endif
}
extern __inline__ void atomic_incb(char * p, char delta)
{
#if NEED_ATOMIC
__asm__ volatile (" lock \n \
addb %0,%1" : \
: \
"q" (delta), "m" (*(volatile char *)p));
#else
*p += delta;
#endif
}
extern __inline__ void atomic_decl(long * p, long delta)
{
#if NCPUS > 1
__asm__ volatile (" lock \n \
subl %0,%1" : \
: \
"r" (delta), "m" (*(volatile long *)p));
#else
*p -= delta;
#endif
}
extern __inline__ void atomic_decs(short * p, short delta)
{
#if NEED_ATOMIC
__asm__ volatile (" lock \n \
subw %0,%1" : \
: \
"q" (delta), "m" (*(volatile short *)p));
#else
*p -= delta;
#endif
}
extern __inline__ void atomic_decb(char * p, char delta)
{
#if NEED_ATOMIC
__asm__ volatile (" lock \n \
subb %0,%1" : \
: \
"q" (delta), "m" (*(volatile char *)p));
#else
*p -= delta;
#endif
}
extern __inline__ long atomic_getl(long * p)
{
return (*p);
}
extern __inline__ short atomic_gets(short * p)
{
return (*p);
}
extern __inline__ char atomic_getb(char * p)
{
return (*p);
}
extern __inline__ void atomic_setl(long * p, long value)
{
*p = value;
}
extern __inline__ void atomic_sets(short * p, short value)
{
*p = value;
}
extern __inline__ void atomic_setb(char * p, char value)
{
*p = value;
}
#else
extern void i_bit_set(
int index,
void *addr);
extern void i_bit_clear(
int index,
void *addr);
extern void bit_lock(
int index,
void *addr);
extern void bit_unlock(
int index,
void *addr);
#endif
extern void kernel_preempt_check (void);
#endif
#endif
#endif