#include <platforms.h>
#include <mach_kdb.h>
#include <mach/mach_types.h>
#include <kern/cpu_data.h>
#include <kern/cpu_number.h>
#include <kern/clock.h>
#include <kern/host_notify.h>
#include <kern/macro_help.h>
#include <kern/misc_protos.h>
#include <kern/spl.h>
#include <kern/assert.h>
#include <mach/vm_prot.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h>
#include <i386/ipl.h>
#include <i386/pit.h>
#include <i386/pio.h>
#include <i386/misc_protos.h>
#include <i386/proc_reg.h>
#include <i386/machine_cpu.h>
#include <i386/mp.h>
#include <i386/cpuid.h>
#include <i386/cpu_data.h>
#include <i386/cpu_threads.h>
#include <i386/perfmon.h>
#include <i386/machine_routines.h>
#include <i386/AT386/bbclock_entries.h>
#include <pexpert/pexpert.h>
#include <machine/limits.h>
#include <machine/commpage.h>
#include <sys/kdebug.h>
#define MAX(a,b) (((a)>(b))?(a):(b))
#define MIN(a,b) (((a)>(b))?(b):(a))
#define NSEC_PER_HZ (NSEC_PER_SEC / 100)
#define UI_CPUFREQ_ROUNDING_FACTOR 10000000
int sysclk_config(void);
int sysclk_init(void);
kern_return_t sysclk_gettime(
mach_timespec_t *cur_time);
kern_return_t sysclk_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count);
void sysclk_setalarm(
mach_timespec_t *alarm_time);
struct clock_ops sysclk_ops = {
sysclk_config, sysclk_init,
sysclk_gettime, 0,
sysclk_getattr, 0,
sysclk_setalarm,
};
int calend_config(void);
int calend_init(void);
kern_return_t calend_gettime(
mach_timespec_t *cur_time);
kern_return_t calend_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count);
struct clock_ops calend_ops = {
calend_config, calend_init,
calend_gettime, 0,
calend_getattr, 0,
0,
};
static clock_timer_func_t rtclock_timer_expire;
static timer_call_data_t rtclock_alarm_timer;
static void rtclock_alarm_expire(
timer_call_param_t p0,
timer_call_param_t p1);
struct {
mach_timespec_t calend_offset;
boolean_t calend_is_set;
int64_t calend_adjtotal;
int32_t calend_adjdelta;
uint32_t boottime;
mach_timebase_info_data_t timebase_const;
decl_simple_lock_data(,lock)
} rtclock;
boolean_t rtc_initialized = FALSE;
clock_res_t rtc_intr_nsec = NSEC_PER_HZ;
uint64_t rtc_cycle_count;
uint64_t rtc_cyc_per_sec;
uint32_t rtc_boot_frequency;
uint32_t rtc_quant_scale;
uint32_t rtc_quant_shift;
uint64_t rtc_decrementer_min;
static mach_timebase_info_data_t rtc_lapic_scale;
#define RTC_INTRS_OFF(s) \
(s) = splclock()
#define RTC_INTRS_ON(s) \
splx(s)
#define RTC_LOCK(s) \
MACRO_BEGIN \
RTC_INTRS_OFF(s); \
simple_lock(&rtclock.lock); \
MACRO_END
#define RTC_UNLOCK(s) \
MACRO_BEGIN \
simple_unlock(&rtclock.lock); \
RTC_INTRS_ON(s); \
MACRO_END
static uint64_t rtc_set_cyc_per_sec(uint64_t cycles);
uint64_t rtc_nanotime_read(void);
#define RTC_FAST_DENOM 0xFFFFFFFF
inline static uint32_t
create_mul_quant_GHZ(int shift, uint32_t quant)
{
return (uint32_t)((((uint64_t)NSEC_PER_SEC/20) << shift) / quant);
}
inline static uint64_t
fast_get_nano_from_abs(uint64_t value, int scale)
{
asm (" movl %%edx,%%esi \n\t"
" mull %%ecx \n\t"
" movl %%edx,%%edi \n\t"
" movl %%esi,%%eax \n\t"
" mull %%ecx \n\t"
" xorl %%ecx,%%ecx \n\t"
" addl %%edi,%%eax \n\t"
" adcl %%ecx,%%edx "
: "+A" (value)
: "c" (scale)
: "%esi", "%edi");
return value;
}
inline static mach_timespec_t
nanos_to_timespec(uint64_t nanos)
{
union {
mach_timespec_t ts;
uint64_t u64;
} ret;
ret.u64 = nanos;
asm volatile("divl %1" : "+A" (ret.u64) : "r" (NSEC_PER_SEC));
return ret.ts;
}
inline static void
longmul(uint64_t *abstime, uint32_t multiplicand, uint32_t *result)
{
asm volatile(
" pushl %%ebx \n\t"
" movl %%eax,%%ebx \n\t"
" movl (%%eax),%%eax \n\t"
" mull %%ecx \n\t"
" xchg %%eax,%%ebx \n\t"
" pushl %%edx \n\t"
" movl 4(%%eax),%%eax \n\t"
" mull %%ecx \n\t"
" movl %2,%%ecx \n\t"
" movl %%ebx,(%%ecx) \n\t"
" popl %%ebx \n\t"
" addl %%ebx,%%eax \n\t"
" popl %%ebx \n\t"
" movl %%eax,4(%%ecx) \n\t"
" adcl $0,%%edx \n\t"
" movl %%edx,8(%%ecx) // and save it"
: : "a"(abstime), "c"(multiplicand), "m"(result));
}
inline static uint64_t
longdiv(uint32_t *numer, uint32_t denom)
{
uint64_t result;
asm volatile(
" pushl %%ebx \n\t"
" movl %%eax,%%ebx \n\t"
" movl 8(%%eax),%%edx \n\t"
" movl 4(%%eax),%%eax \n\t"
" divl %%ecx \n\t"
" xchg %%ebx,%%eax \n\t"
" movl (%%eax),%%eax \n\t"
" divl %%ecx \n\t"
" xchg %%ebx,%%edx \n\t"
" popl %%ebx \n\t"
: "=A"(result) : "a"(numer),"c"(denom));
return result;
}
inline static void
enable_PIT2(void)
{
asm volatile(
" inb $0x61,%%al \n\t"
" and $0xFC,%%al \n\t"
" or $1,%%al \n\t"
" outb %%al,$0x61 \n\t"
: : : "%al" );
}
inline static void
disable_PIT2(void)
{
asm volatile(
" inb $0x61,%%al \n\t"
" and $0xFC,%%al \n\t"
" outb %%al,$0x61 \n\t"
: : : "%al" );
}
inline static void
set_PIT2(int value)
{
asm volatile(
" movb $0xB8,%%al \n\t"
" outb %%al,$0x43 \n\t"
" movb %%dl,%%al \n\t"
" outb %%al,$0x42 \n\t"
" movb %%dh,%%al \n\t"
" outb %%al,$0x42 \n"
"1: inb $0x42,%%al \n\t"
" inb $0x42,%%al \n\t"
" cmp %%al,%%dh \n\t"
" jne 1b"
: : "d"(value) : "%al");
}
inline static uint64_t
get_PIT2(unsigned int *value)
{
register uint64_t result;
asm volatile(
" xorl %%ecx,%%ecx \n\t"
" movb $0x80,%%al \n\t"
" outb %%al,$0x43 \n\t"
" rdtsc \n\t"
" pushl %%eax \n\t"
" inb $0x42,%%al \n\t"
" movb %%al,%%cl \n\t"
" inb $0x42,%%al \n\t"
" movb %%al,%%ch \n\t"
" popl %%eax "
: "=A"(result), "=c"(*value));
return result;
}
static uint64_t
timeRDTSC(void)
{
int attempts = 0;
uint64_t latchTime;
uint64_t saveTime,intermediate;
unsigned int timerValue, lastValue;
boolean_t int_enabled;
#define SAMPLE_CLKS_EXACT (((double) CLKNUM) / 20.0)
#define SAMPLE_CLKS_INT ((int) CLKNUM / 20)
#define SAMPLE_NSECS (2000000000LL)
#define SAMPLE_MULTIPLIER (((double)SAMPLE_NSECS)*SAMPLE_CLKS_EXACT)
#define ROUND64(x) ((uint64_t)((x) + 0.5))
uint64_t scale[6] = {
ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-0)),
ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-1)),
ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-2)),
ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-3)),
ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-4)),
ROUND64(SAMPLE_MULTIPLIER/(double)(SAMPLE_CLKS_INT-5))
};
int_enabled = ml_set_interrupts_enabled(FALSE);
restart:
if (attempts >= 2)
panic("timeRDTSC() calibation failed with %d attempts\n", attempts);
attempts++;
enable_PIT2(); set_PIT2(0); latchTime = rdtsc64(); latchTime = get_PIT2(&timerValue) - latchTime; set_PIT2(SAMPLE_CLKS_INT); saveTime = rdtsc64(); get_PIT2(&lastValue);
get_PIT2(&lastValue); do {
intermediate = get_PIT2(&timerValue);
if (timerValue > lastValue) {
printf("Hey we are going backwards! %u -> %u, restarting timing\n",
timerValue,lastValue);
set_PIT2(0);
disable_PIT2();
goto restart;
}
lastValue = timerValue;
} while (timerValue > 5);
kprintf("timerValue %d\n",timerValue);
kprintf("intermediate 0x%016llx\n",intermediate);
kprintf("saveTime 0x%016llx\n",saveTime);
intermediate -= saveTime; intermediate *= scale[timerValue]; intermediate /= SAMPLE_NSECS; intermediate += latchTime;
set_PIT2(0); disable_PIT2();
ml_set_interrupts_enabled(int_enabled);
return intermediate;
}
static uint64_t
tsc_to_nanoseconds(uint64_t abstime)
{
uint32_t numer;
uint32_t denom;
uint32_t intermediate[3];
numer = rtclock.timebase_const.numer;
denom = rtclock.timebase_const.denom;
if (denom == RTC_FAST_DENOM) {
abstime = fast_get_nano_from_abs(abstime, numer);
} else {
longmul(&abstime, numer, intermediate);
abstime = longdiv(intermediate, denom);
}
return abstime;
}
inline static mach_timespec_t
tsc_to_timespec(void)
{
uint64_t currNanos;
currNanos = rtc_nanotime_read();
return nanos_to_timespec(currNanos);
}
#define DECREMENTER_MAX UINT_MAX
static uint32_t
deadline_to_decrementer(
uint64_t deadline,
uint64_t now)
{
uint64_t delta;
if (deadline <= now)
return rtc_decrementer_min;
else {
delta = deadline - now;
return MIN(MAX(rtc_decrementer_min,delta),DECREMENTER_MAX);
}
}
static inline uint64_t
lapic_time_countdown(uint32_t initial_count)
{
boolean_t state;
uint64_t start_time;
uint64_t stop_time;
lapic_timer_count_t count;
state = ml_set_interrupts_enabled(FALSE);
lapic_set_timer(FALSE, one_shot, divide_by_1, initial_count);
start_time = rdtsc64();
do {
lapic_get_timer(NULL, NULL, NULL, &count);
} while (count > 0);
stop_time = rdtsc64();
ml_set_interrupts_enabled(state);
return tsc_to_nanoseconds(stop_time - start_time);
}
static void
rtc_lapic_timer_calibrate(void)
{
uint32_t nsecs;
uint64_t countdown;
if (!(cpuid_features() & CPUID_FEATURE_APIC))
return;
nsecs = (uint32_t) lapic_time_countdown(rtc_intr_nsec);
countdown = (uint64_t)rtc_intr_nsec * (uint64_t)rtc_intr_nsec / nsecs;
nsecs = (uint32_t) lapic_time_countdown((uint32_t) countdown);
rtc_lapic_scale.numer = countdown;
rtc_lapic_scale.denom = nsecs;
kprintf("rtc_lapic_timer_calibrate() scale: %d/%d\n",
(uint32_t) countdown, nsecs);
}
static void
rtc_lapic_set_timer(
uint32_t interval)
{
uint64_t count;
assert(rtc_lapic_scale.denom);
count = interval * (uint64_t) rtc_lapic_scale.numer;
count /= rtc_lapic_scale.denom;
lapic_set_timer(TRUE, one_shot, divide_by_1, (uint32_t) count);
}
static void
rtc_lapic_start_ticking(void)
{
uint64_t abstime;
uint64_t first_tick;
uint64_t decr;
abstime = mach_absolute_time();
first_tick = abstime + NSEC_PER_HZ;
current_cpu_datap()->cpu_rtc_tick_deadline = first_tick;
decr = deadline_to_decrementer(first_tick, abstime);
rtc_lapic_set_timer(decr);
}
int
sysclk_config(void)
{
mp_disable_preemption();
if (cpu_number() != master_cpu) {
mp_enable_preemption();
return(1);
}
mp_enable_preemption();
timer_call_setup(&rtclock_alarm_timer, rtclock_alarm_expire, NULL);
simple_lock_init(&rtclock.lock, 0);
return (1);
}
static inline void
rtc_nanotime_set_commpage(rtc_nanotime_t *rntp)
{
commpage_nanotime_t cp_nanotime;
if (cpu_number() != master_cpu)
return;
cp_nanotime.nt_base_tsc = rntp->rnt_tsc;
cp_nanotime.nt_base_ns = rntp->rnt_nanos;
cp_nanotime.nt_scale = rntp->rnt_scale;
cp_nanotime.nt_shift = rntp->rnt_shift;
commpage_set_nanotime(&cp_nanotime);
}
static void
rtc_nanotime_init(void)
{
rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime;
rtc_nanotime_t *master_rntp = &cpu_datap(master_cpu)->cpu_rtc_nanotime;
if (cpu_number() == master_cpu) {
rntp->rnt_tsc = rdtsc64();
rntp->rnt_nanos = tsc_to_nanoseconds(rntp->rnt_tsc);
rntp->rnt_scale = rtc_quant_scale;
rntp->rnt_shift = rtc_quant_shift;
rntp->rnt_step_tsc = 0ULL;
rntp->rnt_step_nanos = 0ULL;
} else {
do {
*rntp = *master_rntp;
} while (rntp->rnt_tsc != master_rntp->rnt_tsc);
}
}
static inline void
_rtc_nanotime_update(rtc_nanotime_t *rntp, uint64_t tsc)
{
uint64_t tsc_delta;
uint64_t ns_delta;
tsc_delta = tsc - rntp->rnt_step_tsc;
ns_delta = tsc_to_nanoseconds(tsc_delta);
rntp->rnt_nanos = rntp->rnt_step_nanos + ns_delta;
rntp->rnt_tsc = tsc;
}
static void
rtc_nanotime_update(void)
{
rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime;
assert(get_preemption_level() > 0);
assert(!ml_get_interrupts_enabled());
_rtc_nanotime_update(rntp, rdtsc64());
rtc_nanotime_set_commpage(rntp);
}
static void
rtc_nanotime_scale_update(void)
{
rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime;
uint64_t tsc = rdtsc64();
assert(!ml_get_interrupts_enabled());
_rtc_nanotime_update(rntp, tsc);
rntp->rnt_scale = rtc_quant_scale;
rntp->rnt_shift = rtc_quant_shift;
rntp->rnt_step_tsc = rntp->rnt_tsc;
rntp->rnt_step_nanos = rntp->rnt_nanos;
rtc_nanotime_set_commpage(rntp);
}
static uint64_t
_rtc_nanotime_read(void)
{
rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime;
uint64_t rnt_tsc;
uint32_t rnt_scale;
uint32_t rnt_shift;
uint64_t rnt_nanos;
uint64_t tsc;
uint64_t tsc_delta;
rnt_scale = rntp->rnt_scale;
if (rnt_scale == 0)
return 0ULL;
rnt_shift = rntp->rnt_shift;
rnt_nanos = rntp->rnt_nanos;
rnt_tsc = rntp->rnt_tsc;
tsc = rdtsc64();
tsc_delta = tsc - rnt_tsc;
if ((tsc_delta >> 32) != 0)
return rnt_nanos + tsc_to_nanoseconds(tsc_delta);
if (rnt_shift == 32)
return rnt_nanos + ((tsc_delta * rnt_scale) >> 32);
else
return rnt_nanos + ((tsc_delta * rnt_scale) >> rnt_shift);
}
uint64_t
rtc_nanotime_read(void)
{
uint64_t result;
uint64_t rnt_tsc;
rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime;
do {
rnt_tsc = rntp->rnt_tsc;
result = _rtc_nanotime_read();
} while (rnt_tsc != rntp->rnt_tsc);
return result;
}
void
rtc_clock_stepping(__unused uint32_t new_frequency,
__unused uint32_t old_frequency)
{
boolean_t istate;
istate = ml_set_interrupts_enabled(FALSE);
rtc_nanotime_scale_update();
ml_set_interrupts_enabled(istate);
}
void
rtc_clock_stepped(uint32_t new_frequency, uint32_t old_frequency)
{
boolean_t istate;
istate = ml_set_interrupts_enabled(FALSE);
if (rtc_boot_frequency == 0) {
rtc_boot_frequency = old_frequency;
}
rtc_set_cyc_per_sec(rtc_cycle_count * new_frequency /
rtc_boot_frequency);
rtc_nanotime_scale_update();
ml_set_interrupts_enabled(istate);
}
void
rtc_sleep_wakeup(void)
{
rtc_nanotime_t *rntp = ¤t_cpu_datap()->cpu_rtc_nanotime;
boolean_t istate;
istate = ml_set_interrupts_enabled(FALSE);
rntp->rnt_tsc = rdtsc64();
rntp->rnt_step_tsc = 0ULL;
rntp->rnt_step_nanos = rntp->rnt_nanos;
rtc_nanotime_set_commpage(rntp);
rtc_lapic_start_ticking();
ml_set_interrupts_enabled(istate);
}
int
sysclk_init(void)
{
uint64_t cycles;
mp_disable_preemption();
if (cpu_number() == master_cpu) {
rtc_cycle_count = timeRDTSC();
cycles = rtc_set_cyc_per_sec(rtc_cycle_count);
gPEClockFrequencyInfo.cpu_frequency_min_hz = cycles;
gPEClockFrequencyInfo.cpu_frequency_max_hz = cycles;
printf("[RTCLOCK] frequency %llu (%llu)\n",
cycles, rtc_cyc_per_sec);
rtc_lapic_timer_calibrate();
rtc_decrementer_min = deadline_to_decrementer(NSEC_PER_USEC,
0ULL);
lapic_set_timer_func((i386_intr_func_t) rtclock_intr);
clock_timebase_init();
rtc_initialized = TRUE;
}
rtc_nanotime_init();
rtc_lapic_start_ticking();
mp_enable_preemption();
return (1);
}
static kern_return_t
sysclk_gettime_internal(
mach_timespec_t *cur_time)
{
*cur_time = tsc_to_timespec();
return (KERN_SUCCESS);
}
kern_return_t
sysclk_gettime(
mach_timespec_t *cur_time)
{
return sysclk_gettime_internal(cur_time);
}
void
sysclk_gettime_interrupts_disabled(
mach_timespec_t *cur_time)
{
(void) sysclk_gettime_internal(cur_time);
}
static uint64_t
rtc_set_cyc_per_sec(uint64_t cycles)
{
if (cycles > (NSEC_PER_SEC/20)) {
rtc_quant_shift = 32;
rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles);
rtclock.timebase_const.numer = rtc_quant_scale; rtclock.timebase_const.denom = RTC_FAST_DENOM;
} else {
rtc_quant_shift = 26;
rtc_quant_scale = create_mul_quant_GHZ(rtc_quant_shift, cycles);
rtclock.timebase_const.numer = NSEC_PER_SEC/20; rtclock.timebase_const.denom = cycles;
}
rtc_cyc_per_sec = cycles*20;
cycles = ((rtc_cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2))
/ UI_CPUFREQ_ROUNDING_FACTOR)
* UI_CPUFREQ_ROUNDING_FACTOR;
if (cycles >= 0x100000000ULL) {
gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL;
} else {
gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles;
}
gPEClockFrequencyInfo.cpu_frequency_hz = cycles;
kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, rtc_cyc_per_sec);
return(cycles);
}
void
clock_get_system_microtime(
uint32_t *secs,
uint32_t *microsecs)
{
mach_timespec_t now;
(void) sysclk_gettime_internal(&now);
*secs = now.tv_sec;
*microsecs = now.tv_nsec / NSEC_PER_USEC;
}
void
clock_get_system_nanotime(
uint32_t *secs,
uint32_t *nanosecs)
{
mach_timespec_t now;
(void) sysclk_gettime_internal(&now);
*secs = now.tv_sec;
*nanosecs = now.tv_nsec;
}
kern_return_t
sysclk_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count)
{
if (*count != 1)
return (KERN_FAILURE);
switch (flavor) {
case CLOCK_GET_TIME_RES:
*(clock_res_t *) attr = rtc_intr_nsec;
break;
case CLOCK_ALARM_CURRES:
case CLOCK_ALARM_MAXRES:
case CLOCK_ALARM_MINRES:
*(clock_res_t *) attr = 0;
break;
default:
return (KERN_INVALID_VALUE);
}
return (KERN_SUCCESS);
}
void
sysclk_setalarm(
mach_timespec_t *alarm_time)
{
timer_call_enter(&rtclock_alarm_timer,
(uint64_t) alarm_time->tv_sec * NSEC_PER_SEC
+ alarm_time->tv_nsec);
}
int
calend_config(void)
{
return bbc_config();
}
int
calend_init(void)
{
return (1);
}
kern_return_t
calend_gettime(
mach_timespec_t *cur_time)
{
spl_t s;
RTC_LOCK(s);
if (!rtclock.calend_is_set) {
RTC_UNLOCK(s);
return (KERN_FAILURE);
}
(void) sysclk_gettime_internal(cur_time);
ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset);
RTC_UNLOCK(s);
return (KERN_SUCCESS);
}
void
clock_get_calendar_microtime(
uint32_t *secs,
uint32_t *microsecs)
{
mach_timespec_t now;
calend_gettime(&now);
*secs = now.tv_sec;
*microsecs = now.tv_nsec / NSEC_PER_USEC;
}
void
clock_get_calendar_nanotime(
uint32_t *secs,
uint32_t *nanosecs)
{
mach_timespec_t now;
calend_gettime(&now);
*secs = now.tv_sec;
*nanosecs = now.tv_nsec;
}
void
clock_set_calendar_microtime(
uint32_t secs,
uint32_t microsecs)
{
mach_timespec_t new_time, curr_time;
uint32_t old_offset;
spl_t s;
new_time.tv_sec = secs;
new_time.tv_nsec = microsecs * NSEC_PER_USEC;
RTC_LOCK(s);
old_offset = rtclock.calend_offset.tv_sec;
(void) sysclk_gettime_internal(&curr_time);
rtclock.calend_offset = new_time;
SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
rtclock.boottime += rtclock.calend_offset.tv_sec - old_offset;
rtclock.calend_is_set = TRUE;
RTC_UNLOCK(s);
(void) bbc_settime(&new_time);
host_notify_calendar_change();
}
kern_return_t
calend_getattr(
clock_flavor_t flavor,
clock_attr_t attr,
mach_msg_type_number_t *count)
{
if (*count != 1)
return (KERN_FAILURE);
switch (flavor) {
case CLOCK_GET_TIME_RES:
*(clock_res_t *) attr = rtc_intr_nsec;
break;
case CLOCK_ALARM_CURRES:
case CLOCK_ALARM_MINRES:
case CLOCK_ALARM_MAXRES:
*(clock_res_t *) attr = 0;
break;
default:
return (KERN_INVALID_VALUE);
}
return (KERN_SUCCESS);
}
#define tickadj (40*NSEC_PER_USEC)
#define bigadj (NSEC_PER_SEC)
uint32_t
clock_set_calendar_adjtime(
int32_t *secs,
int32_t *microsecs)
{
int64_t total, ototal;
uint32_t interval = 0;
spl_t s;
total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
RTC_LOCK(s);
ototal = rtclock.calend_adjtotal;
if (total != 0) {
int32_t delta = tickadj;
if (total > 0) {
if (total > bigadj)
delta *= 10;
if (delta > total)
delta = total;
}
else {
if (total < -bigadj)
delta *= 10;
delta = -delta;
if (delta < total)
delta = total;
}
rtclock.calend_adjtotal = total;
rtclock.calend_adjdelta = delta;
interval = NSEC_PER_HZ;
}
else
rtclock.calend_adjdelta = rtclock.calend_adjtotal = 0;
RTC_UNLOCK(s);
if (ototal == 0)
*secs = *microsecs = 0;
else {
*secs = ototal / NSEC_PER_SEC;
*microsecs = ototal % NSEC_PER_SEC;
}
return (interval);
}
uint32_t
clock_adjust_calendar(void)
{
uint32_t interval = 0;
int32_t delta;
spl_t s;
RTC_LOCK(s);
delta = rtclock.calend_adjdelta;
ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, delta);
rtclock.calend_adjtotal -= delta;
if (delta > 0) {
if (delta > rtclock.calend_adjtotal)
rtclock.calend_adjdelta = rtclock.calend_adjtotal;
}
else
if (delta < 0) {
if (delta < rtclock.calend_adjtotal)
rtclock.calend_adjdelta = rtclock.calend_adjtotal;
}
if (rtclock.calend_adjdelta != 0)
interval = NSEC_PER_HZ;
RTC_UNLOCK(s);
return (interval);
}
void
clock_initialize_calendar(void)
{
mach_timespec_t bbc_time, curr_time;
spl_t s;
if (bbc_gettime(&bbc_time) != KERN_SUCCESS)
return;
RTC_LOCK(s);
if (rtclock.boottime == 0)
rtclock.boottime = bbc_time.tv_sec;
(void) sysclk_gettime_internal(&curr_time);
rtclock.calend_offset = bbc_time;
SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time);
rtclock.calend_is_set = TRUE;
RTC_UNLOCK(s);
host_notify_calendar_change();
}
void
clock_get_boottime_nanotime(
uint32_t *secs,
uint32_t *nanosecs)
{
*secs = rtclock.boottime;
*nanosecs = 0;
}
void
clock_timebase_info(
mach_timebase_info_t info)
{
info->numer = info->denom = 1;
}
void
clock_set_timer_deadline(
uint64_t deadline)
{
spl_t s;
cpu_data_t *pp = current_cpu_datap();
rtclock_timer_t *mytimer = &pp->cpu_rtc_timer;
uint64_t abstime;
uint64_t decr;
assert(get_preemption_level() > 0);
assert(rtclock_timer_expire);
RTC_INTRS_OFF(s);
mytimer->deadline = deadline;
mytimer->is_set = TRUE;
if (!mytimer->has_expired) {
abstime = mach_absolute_time();
if (mytimer->deadline < pp->cpu_rtc_tick_deadline) {
decr = deadline_to_decrementer(mytimer->deadline,
abstime);
rtc_lapic_set_timer(decr);
pp->cpu_rtc_intr_deadline = mytimer->deadline;
KERNEL_DEBUG_CONSTANT(
MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) |
DBG_FUNC_NONE, decr, 2, 0, 0, 0);
}
}
RTC_INTRS_ON(s);
}
void
clock_set_timer_func(
clock_timer_func_t func)
{
if (rtclock_timer_expire == NULL)
rtclock_timer_expire = func;
}
void
rtclock_intr(struct i386_interrupt_state *regs)
{
uint64_t abstime;
uint32_t latency;
uint64_t decr;
uint64_t decr_tick;
uint64_t decr_timer;
cpu_data_t *pp = current_cpu_datap();
rtclock_timer_t *mytimer = &pp->cpu_rtc_timer;
assert(get_preemption_level() > 0);
assert(!ml_get_interrupts_enabled());
abstime = _rtc_nanotime_read();
latency = (uint32_t) abstime - pp->cpu_rtc_intr_deadline;
if (pp->cpu_rtc_tick_deadline <= abstime) {
rtc_nanotime_update();
clock_deadline_for_periodic_event(
NSEC_PER_HZ, abstime, &pp->cpu_rtc_tick_deadline);
hertz_tick(
#if STAT_TIME
NSEC_PER_HZ,
#endif
(regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0),
regs->eip);
}
abstime = _rtc_nanotime_read();
if (mytimer->is_set && mytimer->deadline <= abstime) {
mytimer->has_expired = TRUE;
mytimer->is_set = FALSE;
(*rtclock_timer_expire)(abstime);
assert(!ml_get_interrupts_enabled());
mytimer->has_expired = FALSE;
}
KERNEL_DEBUG_CONSTANT(
MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE,
-latency, (uint32_t)regs->eip, 0, 0, 0);
abstime = _rtc_nanotime_read();
decr_tick = deadline_to_decrementer(pp->cpu_rtc_tick_deadline, abstime);
decr_timer = (mytimer->is_set) ?
deadline_to_decrementer(mytimer->deadline, abstime) :
DECREMENTER_MAX;
decr = MIN(decr_tick, decr_timer);
pp->cpu_rtc_intr_deadline = abstime + decr;
rtc_lapic_set_timer(decr);
KERNEL_DEBUG_CONSTANT(
MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | DBG_FUNC_NONE,
decr, 3, 0, 0, 0);
}
static void
rtclock_alarm_expire(
__unused timer_call_param_t p0,
__unused timer_call_param_t p1)
{
mach_timespec_t clock_time;
(void) sysclk_gettime_internal(&clock_time);
clock_alarm_intr(SYSTEM_CLOCK, &clock_time);
}
void
clock_get_uptime(
uint64_t *result)
{
*result = rtc_nanotime_read();
}
uint64_t
mach_absolute_time(void)
{
return rtc_nanotime_read();
}
void
absolutetime_to_microtime(
uint64_t abstime,
uint32_t *secs,
uint32_t *microsecs)
{
uint32_t remain;
asm volatile(
"divl %3"
: "=a" (*secs), "=d" (remain)
: "A" (abstime), "r" (NSEC_PER_SEC));
asm volatile(
"divl %3"
: "=a" (*microsecs)
: "0" (remain), "d" (0), "r" (NSEC_PER_USEC));
}
void
clock_interval_to_deadline(
uint32_t interval,
uint32_t scale_factor,
uint64_t *result)
{
uint64_t abstime;
clock_get_uptime(result);
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
*result += abstime;
}
void
clock_interval_to_absolutetime_interval(
uint32_t interval,
uint32_t scale_factor,
uint64_t *result)
{
*result = (uint64_t)interval * scale_factor;
}
void
clock_absolutetime_interval_to_deadline(
uint64_t abstime,
uint64_t *result)
{
clock_get_uptime(result);
*result += abstime;
}
void
absolutetime_to_nanoseconds(
uint64_t abstime,
uint64_t *result)
{
*result = abstime;
}
void
nanoseconds_to_absolutetime(
uint64_t nanoseconds,
uint64_t *result)
{
*result = nanoseconds;
}
void
machine_delay_until(
uint64_t deadline)
{
uint64_t now;
do {
cpu_pause();
now = mach_absolute_time();
} while (now < deadline);
}