#include <cpus.h>
#include <platforms.h>
#include <mach_kdb.h>
#include <mach/mach_types.h>
#include <kern/cpu_number.h>
#include <kern/cpu_data.h>
#include <kern/clock.h>
#include <kern/host_notify.h>
#include <kern/macro_help.h>
#include <kern/misc_protos.h>
#include <kern/spl.h>
#include <machine/mach_param.h>
#include <mach/vm_prot.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h>
#include <i386/ipl.h>
#include <i386/pit.h>
#include <i386/pio.h>
#include <i386/misc_protos.h>
#include <i386/rtclock_entries.h>
#include <i386/hardclock_entries.h>
#include <i386/proc_reg.h>
#include <i386/machine_cpu.h>
#include <pexpert/pexpert.h>
#define DISPLAYENTER(x) printf("[RTCLOCK] entering " #x "\n");
#define DISPLAYEXIT(x) printf("[RTCLOCK] leaving " #x "\n");
#define DISPLAYVALUE(x,y) printf("[RTCLOCK] " #x ":" #y " = 0x%08x \n",y);
int sysclk_config(void);
int sysclk_init(void);
kern_return_t sysclk_gettime(
mach_timespec_t *cur_time);
kern_return_t sysclk_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count);
kern_return_t sysclk_setattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t count);
void sysclk_setalarm(
mach_timespec_t *alarm_time);
extern void (*IOKitRegisterInterruptHook)(void *, int irq, int isclock);
struct clock_ops sysclk_ops = {
sysclk_config, sysclk_init,
sysclk_gettime, 0,
sysclk_getattr, sysclk_setattr,
sysclk_setalarm,
};
int calend_config(void);
int calend_init(void);
kern_return_t calend_gettime(
mach_timespec_t *cur_time);
kern_return_t calend_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count);
struct clock_ops calend_ops = {
calend_config, calend_init,
calend_gettime, 0,
calend_getattr, 0,
0,
};
mach_timespec_t *RtcTime = (mach_timespec_t *)0;
mach_timespec_t *RtcAlrm;
clock_res_t RtcDelt;
struct {
uint64_t abstime;
mach_timespec_t time;
mach_timespec_t alarm_time;
mach_timespec_t calend_offset;
boolean_t calend_is_set;
int64_t calend_adjtotal;
int32_t calend_adjdelta;
uint64_t timer_deadline;
boolean_t timer_is_set;
clock_timer_func_t timer_expire;
clock_res_t new_ires;
clock_res_t intr_nsec;
mach_timebase_info_data_t timebase_const;
decl_simple_lock_data(,lock)
} rtclock;
unsigned int clknum;
unsigned int new_clknum;
unsigned int time_per_clk;
unsigned int clks_per_int;
unsigned int clks_per_int_99;
int rtc_intr_count;
int rtc_intr_hertz;
int rtc_intr_freq;
int rtc_print_lost_tick;
uint32_t rtc_cyc_per_sec;
uint32_t rtc_quant_scale;
#define LOCK_RTC(s) \
MACRO_BEGIN \
(s) = splclock(); \
simple_lock(&rtclock.lock); \
MACRO_END
#define UNLOCK_RTC(s) \
MACRO_BEGIN \
simple_unlock(&rtclock.lock); \
splx(s); \
MACRO_END
#define RTC_MINRES (NSEC_PER_SEC / HZ)
#define RTC_MAXRES (RTC_MINRES / 20)
#define ZANO (1000000000)
#define ZHZ (ZANO / (NSEC_PER_SEC / HZ))
#define READ_8254(val) { \
outb(PITCTL_PORT, PIT_C0); \
(val) = inb(PITCTR0_PORT); \
(val) |= inb(PITCTR0_PORT) << 8 ; }
#define UI_CPUFREQ_ROUNDING_FACTOR 10000000
void rtc_setvals( unsigned int, clock_res_t );
static void rtc_set_cyc_per_sec();
inline static uint64_t
rdtsc_64(void)
{
uint64_t result;
asm volatile("rdtsc": "=A" (result));
return result;
}
inline static uint32_t
create_mul_quant_GHZ(uint32_t quant)
{
return (uint32_t)((50000000ULL << 32) / quant);
}
inline static uint64_t
fast_get_nano_from_abs(uint64_t value, int scale)
{
asm (" movl %%edx,%%esi \n\t"
" mull %%ecx \n\t"
" movl %%edx,%%edi \n\t"
" movl %%esi,%%eax \n\t"
" mull %%ecx \n\t"
" xorl %%ecx,%%ecx \n\t"
" addl %%edi,%%eax \n\t"
" adcl %%ecx,%%edx "
: "+A" (value)
: "c" (scale)
: "%esi", "%edi");
return value;
}
inline static mach_timespec_t
nanos_to_timespec(uint64_t nanos)
{
union {
mach_timespec_t ts;
uint64_t u64;
} ret;
ret.u64 = nanos;
asm volatile("divl %1" : "+A" (ret.u64) : "r" (NSEC_PER_SEC));
return ret.ts;
}
inline static void
longmul(uint64_t *abstime, uint32_t multiplicand, uint32_t *result)
{
asm volatile(
" pushl %%ebx \n\t"
" movl %%eax,%%ebx \n\t"
" movl (%%eax),%%eax \n\t"
" mull %%ecx \n\t"
" xchg %%eax,%%ebx \n\t"
" pushl %%edx \n\t"
" movl 4(%%eax),%%eax \n\t"
" mull %%ecx \n\t"
" movl %2,%%ecx \n\t"
" movl %%ebx,(%%ecx) \n\t"
" popl %%ebx \n\t"
" addl %%ebx,%%eax \n\t"
" popl %%ebx \n\t"
" movl %%eax,4(%%ecx) \n\t"
" adcl $0,%%edx \n\t"
" movl %%edx,8(%%ecx) // and save it"
: : "a"(abstime), "c"(multiplicand), "m"(result));
}
inline static uint64_t
longdiv(uint32_t *numer, uint32_t denom)
{
uint64_t result;
asm volatile(
" pushl %%ebx \n\t"
" movl %%eax,%%ebx \n\t"
" movl 8(%%eax),%%edx \n\t"
" movl 4(%%eax),%%eax \n\t"
" divl %%ecx \n\t"
" xchg %%ebx,%%eax \n\t"
" movl (%%eax),%%eax \n\t"
" divl %%ecx \n\t"
" xchg %%ebx,%%edx \n\t"
" popl %%ebx \n\t"
: "=A"(result) : "a"(numer),"c"(denom));
return result;
}
#define PIT_Mode4 0x08
inline static void
enable_PIT2()
{
asm volatile(
" inb $97,%%al \n\t"
" and $253,%%al \n\t"
" or $1,%%al \n\t"
" outb %%al,$97 \n\t"
: : : "%al" );
}
inline static void
disable_PIT2()
{
asm volatile(
" inb $97,%%al \n\t"
" and $253,%%al \n\t"
" outb %%al,$97 \n\t"
: : : "%al" );
}
inline static void
set_PIT2(int value)
{
asm volatile(
" movb $184,%%al \n\t"
" outb %%al,$67 \n\t"
" movb %%dl,%%al \n\t"
" outb %%al,$66 \n\t"
" movb %%dh,%%al \n\t"
" outb %%al,$66 \n"
"1: inb $66,%%al \n\t"
" inb $66,%%al \n\t"
" cmp %%al,%%dh \n\t"
" jne 1b"
: : "d"(value) : "%al");
}
inline static uint64_t
get_PIT2(unsigned int *value)
{
register uint64_t result;
asm volatile(
" xorl %%ecx,%%ecx \n\t"
" movb $128,%%al \n\t"
" outb %%al,$67 \n\t"
" rdtsc \n\t"
" pushl %%eax \n\t"
" inb $66,%%al \n\t"
" movb %%al,%%cl \n\t"
" inb $66,%%al \n\t"
" movb %%al,%%ch \n\t"
" popl %%eax "
: "=A"(result), "=c"(*value));
return result;
}
static uint32_t
timeRDTSC(void)
{
uint64_t latchTime;
uint64_t saveTime,intermediate;
unsigned int timerValue,x;
boolean_t int_enabled;
uint64_t fact[6] = { 2000011734ll,
2000045259ll,
2000078785ll,
2000112312ll,
2000145841ll,
2000179371ll};
int_enabled = ml_set_interrupts_enabled(FALSE);
enable_PIT2(); set_PIT2(0); latchTime = rdtsc_64(); latchTime = get_PIT2(&timerValue) - latchTime; set_PIT2(59658); saveTime = rdtsc_64(); get_PIT2(&x);
do { get_PIT2(&timerValue); x = timerValue;} while (timerValue > x);
do {
intermediate = get_PIT2(&timerValue);
if (timerValue>x) printf("Hey we are going backwards! %d, %d\n",timerValue,x);
x = timerValue;
} while ((timerValue != 0) && (timerValue >5));
printf("Timer value:%d\n",timerValue);
printf("intermediate 0x%08x:0x%08x\n",intermediate);
printf("saveTime 0x%08x:0x%08x\n",saveTime);
intermediate = intermediate - saveTime; intermediate = intermediate * fact[timerValue]; intermediate = intermediate / 2000000000ll; intermediate = intermediate + latchTime; set_PIT2(0); disable_PIT2(0); ml_set_interrupts_enabled(int_enabled);
return intermediate;
}
static uint64_t
rdtsctime_to_nanoseconds( void )
{
uint32_t numer;
uint32_t denom;
uint64_t abstime;
uint32_t intermediate[3];
numer = rtclock.timebase_const.numer;
denom = rtclock.timebase_const.denom;
abstime = rdtsc_64();
if (denom == 0xFFFFFFFF) {
abstime = fast_get_nano_from_abs(abstime, numer);
} else {
longmul(&abstime, numer, intermediate);
abstime = longdiv(intermediate, denom);
}
return abstime;
}
inline static mach_timespec_t
rdtsc_to_timespec(void)
{
uint64_t currNanos;
currNanos = rdtsctime_to_nanoseconds();
return nanos_to_timespec(currNanos);
}
void
rtc_setvals(
unsigned int new_clknum,
clock_res_t new_ires
)
{
unsigned int timeperclk;
unsigned int scale0;
unsigned int scale1;
unsigned int res;
clknum = new_clknum;
rtc_intr_freq = (NSEC_PER_SEC / new_ires);
rtc_intr_hertz = rtc_intr_freq / HZ;
clks_per_int = (clknum + (rtc_intr_freq / 2)) / rtc_intr_freq;
clks_per_int_99 = clks_per_int - clks_per_int/100;
timeperclk = div_scale(ZANO, clknum, &scale0);
time_per_clk = mul_scale(ZHZ, timeperclk, &scale1);
if (scale0 > scale1)
time_per_clk >>= (scale0 - scale1);
else if (scale0 < scale1)
panic("rtc_clock: time_per_clk overflow\n");
res = mul_scale(clks_per_int, timeperclk, &scale1);
if (scale0 > scale1)
rtclock.intr_nsec = res >> (scale0 - scale1);
else
panic("rtc_clock: rtclock.intr_nsec overflow\n");
rtc_intr_count = 1;
RtcDelt = rtclock.intr_nsec/2;
}
int
sysclk_config(void)
{
int RtcFlag;
int pic;
#if NCPUS > 1
mp_disable_preemption();
if (cpu_number() != master_cpu) {
mp_enable_preemption();
return(1);
}
mp_enable_preemption();
#endif
pic = 0;
RtcFlag = 1;
printf("realtime clock configured\n");
simple_lock_init(&rtclock.lock, ETAP_NO_TRACE);
return (RtcFlag);
}
int
sysclk_init(void)
{
vm_offset_t *vp;
#if NCPUS > 1
mp_disable_preemption();
if (cpu_number() != master_cpu) {
mp_enable_preemption();
return(1);
}
mp_enable_preemption();
#endif
RtcTime = &rtclock.time;
rtc_setvals( CLKNUM, RTC_MINRES );
rtc_set_cyc_per_sec();
clock_timebase_init();
return (1);
}
static volatile unsigned int last_ival = 0;
kern_return_t
sysclk_gettime(
mach_timespec_t *cur_time)
{
if (!RtcTime) {
cur_time->tv_nsec = 0;
cur_time->tv_sec = 0;
return (KERN_SUCCESS);
}
*cur_time = rdtsc_to_timespec();
return (KERN_SUCCESS);
}
kern_return_t
sysclk_gettime_internal(
mach_timespec_t *cur_time)
{
if (!RtcTime) {
cur_time->tv_nsec = 0;
cur_time->tv_sec = 0;
return (KERN_SUCCESS);
}
*cur_time = rdtsc_to_timespec();
return (KERN_SUCCESS);
}
void
sysclk_gettime_interrupts_disabled(
mach_timespec_t *cur_time)
{
if (!RtcTime) {
cur_time->tv_nsec = 0;
cur_time->tv_sec = 0;
return;
}
*cur_time = rdtsc_to_timespec();
}
static void
rtc_set_cyc_per_sec()
{
uint32_t twen_cycles;
uint32_t cycles;
twen_cycles = timeRDTSC();
if (twen_cycles> (1000000000/20)) {
rtc_quant_scale = create_mul_quant_GHZ(twen_cycles);
rtclock.timebase_const.numer = rtc_quant_scale; rtclock.timebase_const.denom = 0xffffffff;
} else {
rtclock.timebase_const.numer = 1000000000/20; rtclock.timebase_const.denom = twen_cycles; }
cycles = twen_cycles; rtc_cyc_per_sec = cycles*20;
cycles = ((rtc_cyc_per_sec + UI_CPUFREQ_ROUNDING_FACTOR - 1) / UI_CPUFREQ_ROUNDING_FACTOR) * UI_CPUFREQ_ROUNDING_FACTOR;
gPEClockFrequencyInfo.cpu_clock_rate_hz = cycles;
DISPLAYVALUE(rtc_set_cyc_per_sec,rtc_cyc_per_sec);
DISPLAYEXIT(rtc_set_cyc_per_sec);
}
void
clock_get_system_microtime(
uint32_t *secs,
uint32_t *microsecs)
{
mach_timespec_t now;
sysclk_gettime(&now);
*secs = now.tv_sec;
*microsecs = now.tv_nsec / NSEC_PER_USEC;
}
void
clock_get_system_nanotime(
uint32_t *secs,
uint32_t *nanosecs)
{
mach_timespec_t now;
sysclk_gettime(&now);
*secs = now.tv_sec;
*nanosecs = now.tv_nsec;
}
kern_return_t
sysclk_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count)
{
spl_t s;
if (*count != 1)
return (KERN_FAILURE);
switch (flavor) {
case CLOCK_GET_TIME_RES:
#if (NCPUS == 1)
LOCK_RTC(s);
*(clock_res_t *) attr = 1000;
UNLOCK_RTC(s);
break;
#endif
case CLOCK_ALARM_CURRES:
LOCK_RTC(s);
*(clock_res_t *) attr = rtclock.intr_nsec;
UNLOCK_RTC(s);
break;
case CLOCK_ALARM_MAXRES:
*(clock_res_t *) attr = RTC_MAXRES;
break;
case CLOCK_ALARM_MINRES:
*(clock_res_t *) attr = RTC_MINRES;
break;
default:
return (KERN_INVALID_VALUE);
}
return (KERN_SUCCESS);
}
kern_return_t
sysclk_setattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t count)
{
spl_t s;
int freq;
int adj;
clock_res_t new_ires;
if (count != 1)
return (KERN_FAILURE);
switch (flavor) {
case CLOCK_GET_TIME_RES:
case CLOCK_ALARM_MAXRES:
case CLOCK_ALARM_MINRES:
return (KERN_FAILURE);
case CLOCK_ALARM_CURRES:
new_ires = *(clock_res_t *) attr;
if (new_ires < RTC_MAXRES || new_ires > RTC_MINRES)
return (KERN_INVALID_VALUE);
freq = (NSEC_PER_SEC / new_ires);
adj = (((clknum % freq) * new_ires) / clknum);
if (adj > (new_ires / 1000))
return (KERN_INVALID_VALUE);
LOCK_RTC(s);
if ( freq != rtc_intr_freq ) {
rtclock.new_ires = new_ires;
new_clknum = clknum;
}
UNLOCK_RTC(s);
return (KERN_SUCCESS);
default:
return (KERN_INVALID_VALUE);
}
}
void
sysclk_setalarm(
mach_timespec_t *alarm_time)
{
spl_t s;
LOCK_RTC(s);
rtclock.alarm_time = *alarm_time;
RtcAlrm = &rtclock.alarm_time;
UNLOCK_RTC(s);
}
int
calend_config(void)
{
return bbc_config();
}
int
calend_init(void)
{
return (1);
}
kern_return_t
calend_gettime(
mach_timespec_t *cur_time)
{
spl_t s;
LOCK_RTC(s);
if (!rtclock.calend_is_set) {
UNLOCK_RTC(s);
return (KERN_FAILURE);
}
(void) sysclk_gettime_internal(cur_time);
ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset);
UNLOCK_RTC(s);
return (KERN_SUCCESS);
}
void
clock_get_calendar_microtime(
uint32_t *secs,
uint32_t *microsecs)
{
mach_timespec_t now;
calend_gettime(&now);
*secs = now.tv_sec;
*microsecs = now.tv_nsec / NSEC_PER_USEC;
}
void
clock_get_calendar_nanotime(
uint32_t *secs,
uint32_t *nanosecs)
{
mach_timespec_t now;
calend_gettime(&now);
*secs = now.tv_sec;
*nanosecs = now.tv_nsec;
}
void
clock_set_calendar_microtime(
uint32_t secs,
uint32_t microsecs)
{
mach_timespec_t new_time, curr_time;
spl_t s;
LOCK_RTC(s);
(void) sysclk_gettime_internal(&curr_time);
rtclock.calend_offset.tv_sec = new_time.tv_sec = secs;
rtclock.calend_offset.tv_nsec = new_time.tv_nsec = microsecs * NSEC_PER_USEC;
SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
rtclock.calend_is_set = TRUE;
UNLOCK_RTC(s);
(void) bbc_settime(&new_time);
host_notify_calendar_change();
}
kern_return_t
calend_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count)
{
spl_t s;
if (*count != 1)
return (KERN_FAILURE);
switch (flavor) {
case CLOCK_GET_TIME_RES:
#if (NCPUS == 1)
LOCK_RTC(s);
*(clock_res_t *) attr = 1000;
UNLOCK_RTC(s);
break;
#else
LOCK_RTC(s);
*(clock_res_t *) attr = rtclock.intr_nsec;
UNLOCK_RTC(s);
break;
#endif
case CLOCK_ALARM_CURRES:
case CLOCK_ALARM_MINRES:
case CLOCK_ALARM_MAXRES:
*(clock_res_t *) attr = 0;
break;
default:
return (KERN_INVALID_VALUE);
}
return (KERN_SUCCESS);
}
#define tickadj (40*NSEC_PER_USEC)
#define bigadj (NSEC_PER_SEC)
uint32_t
clock_set_calendar_adjtime(
int32_t *secs,
int32_t *microsecs)
{
int64_t total, ototal;
uint32_t interval = 0;
spl_t s;
total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
LOCK_RTC(s);
ototal = rtclock.calend_adjtotal;
if (total != 0) {
int32_t delta = tickadj;
if (total > 0) {
if (total > bigadj)
delta *= 10;
if (delta > total)
delta = total;
}
else {
if (total < -bigadj)
delta *= 10;
delta = -delta;
if (delta < total)
delta = total;
}
rtclock.calend_adjtotal = total;
rtclock.calend_adjdelta = delta;
interval = (NSEC_PER_SEC / HZ);
}
else
rtclock.calend_adjdelta = rtclock.calend_adjtotal = 0;
UNLOCK_RTC(s);
if (ototal == 0)
*secs = *microsecs = 0;
else {
*secs = ototal / NSEC_PER_SEC;
*microsecs = ototal % NSEC_PER_SEC;
}
return (interval);
}
uint32_t
clock_adjust_calendar(void)
{
uint32_t interval = 0;
int32_t delta;
spl_t s;
LOCK_RTC(s);
delta = rtclock.calend_adjdelta;
ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, delta);
rtclock.calend_adjtotal -= delta;
if (delta > 0) {
if (delta > rtclock.calend_adjtotal)
rtclock.calend_adjdelta = rtclock.calend_adjtotal;
}
else
if (delta < 0) {
if (delta < rtclock.calend_adjtotal)
rtclock.calend_adjdelta = rtclock.calend_adjtotal;
}
if (rtclock.calend_adjdelta != 0)
interval = (NSEC_PER_SEC / HZ);
UNLOCK_RTC(s);
return (interval);
}
void
clock_initialize_calendar(void)
{
mach_timespec_t bbc_time, curr_time;
spl_t s;
if (bbc_gettime(&bbc_time) != KERN_SUCCESS)
return;
LOCK_RTC(s);
if (!rtclock.calend_is_set) {
(void) sysclk_gettime_internal(&curr_time);
rtclock.calend_offset = bbc_time;
SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
rtclock.calend_is_set = TRUE;
}
UNLOCK_RTC(s);
host_notify_calendar_change();
}
void
clock_timebase_info(
mach_timebase_info_t info)
{
spl_t s;
LOCK_RTC(s);
if (rtclock.timebase_const.denom == 0xFFFFFFFF) {
info->numer = info->denom = rtc_quant_scale;
} else {
info->numer = info->denom = 1;
}
UNLOCK_RTC(s);
}
void
clock_set_timer_deadline(
uint64_t deadline)
{
spl_t s;
LOCK_RTC(s);
rtclock.timer_deadline = deadline;
rtclock.timer_is_set = TRUE;
UNLOCK_RTC(s);
}
void
clock_set_timer_func(
clock_timer_func_t func)
{
spl_t s;
LOCK_RTC(s);
if (rtclock.timer_expire == NULL)
rtclock.timer_expire = func;
UNLOCK_RTC(s);
}
#define RTCLOCK_RESET() { \
outb(PITCTL_PORT, PIT_C0|PIT_NDIVMODE|PIT_READMODE); \
outb(PITCTR0_PORT, (clks_per_int & 0xff)); \
outb(PITCTR0_PORT, (clks_per_int >> 8)); \
}
void
rtclock_reset(void)
{
int s;
#if NCPUS > 1
mp_disable_preemption();
if (cpu_number() != master_cpu) {
mp_enable_preemption();
return;
}
mp_enable_preemption();
#endif
LOCK_RTC(s);
RTCLOCK_RESET();
UNLOCK_RTC(s);
}
int
rtclock_intr(struct i386_interrupt_state *regs)
{
uint64_t abstime;
mach_timespec_t clock_time;
int i;
spl_t s;
boolean_t usermode;
LOCK_RTC(s);
abstime = rdtsctime_to_nanoseconds(); clock_time = nanos_to_timespec(abstime); rtclock.time.tv_nsec = clock_time.tv_nsec;
rtclock.time.tv_sec = clock_time.tv_sec;
rtclock.abstime = abstime;
last_ival = 0;
if ((i = --rtc_intr_count) == 0) {
if (rtclock.new_ires) {
rtc_setvals(new_clknum, rtclock.new_ires);
RTCLOCK_RESET();
rtclock.new_ires = 0;
}
rtc_intr_count = rtc_intr_hertz;
UNLOCK_RTC(s);
usermode = (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0);
hertz_tick(usermode, regs->eip);
LOCK_RTC(s);
}
if ( rtclock.timer_is_set &&
rtclock.timer_deadline <= abstime ) {
rtclock.timer_is_set = FALSE;
UNLOCK_RTC(s);
(*rtclock.timer_expire)(abstime);
LOCK_RTC(s);
}
if (RtcAlrm && (RtcAlrm->tv_sec < RtcTime->tv_sec ||
(RtcAlrm->tv_sec == RtcTime->tv_sec &&
RtcDelt >= RtcAlrm->tv_nsec - RtcTime->tv_nsec))) {
clock_time.tv_sec = 0;
clock_time.tv_nsec = RtcDelt;
ADD_MACH_TIMESPEC (&clock_time, RtcTime);
RtcAlrm = 0;
UNLOCK_RTC(s);
clock_alarm_intr(SYSTEM_CLOCK, &clock_time);
LOCK_RTC(s);
}
UNLOCK_RTC(s);
return (i);
}
void
clock_get_uptime(
uint64_t *result)
{
*result = rdtsctime_to_nanoseconds();
}
uint64_t
mach_absolute_time(void)
{
return rdtsctime_to_nanoseconds();
}
void
clock_interval_to_deadline(
uint32_t interval,
uint32_t scale_factor,
uint64_t *result)
{
uint64_t abstime;
clock_get_uptime(result);
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
*result += abstime;
}
void
clock_interval_to_absolutetime_interval(
uint32_t interval,
uint32_t scale_factor,
uint64_t *result)
{
*result = (uint64_t)interval * scale_factor;
}
void
clock_absolutetime_interval_to_deadline(
uint64_t abstime,
uint64_t *result)
{
clock_get_uptime(result);
*result += abstime;
}
void
absolutetime_to_nanoseconds(
uint64_t abstime,
uint64_t *result)
{
*result = abstime;
}
void
nanoseconds_to_absolutetime(
uint64_t nanoseconds,
uint64_t *result)
{
*result = nanoseconds;
}
void
delay_for_interval(
uint32_t interval,
uint32_t scale_factor)
{
uint64_t now, end;
clock_interval_to_deadline(interval, scale_factor, &end);
do {
cpu_pause();
now = mach_absolute_time();
} while (now < end);
}
void
clock_delay_until(
uint64_t deadline)
{
uint64_t now;
do {
cpu_pause();
now = mach_absolute_time();
} while (now < deadline);
}
void
delay(
int usec)
{
delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
}