#include <mach/mach_types.h>
#include <kern/lock.h>
#include <kern/spl.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
#include <kern/clock.h>
#include <kern/host_notify.h>
#include <IOKit/IOPlatformExpert.h>
#include <machine/commpage.h>
#include <mach/mach_traps.h>
#include <mach/mach_time.h>
uint32_t hz_tick_interval = 1;
decl_simple_lock_data(,clock_lock)
#define clock_lock() \
simple_lock(&clock_lock)
#define clock_unlock() \
simple_unlock(&clock_lock)
#define clock_lock_init() \
simple_lock_init(&clock_lock, 0)
static struct clock_calend {
uint64_t epoch;
uint64_t offset;
int32_t adjdelta;
uint64_t adjstart;
uint32_t adjoffset;
} clock_calend;
#if CONFIG_DTRACE
static struct unlocked_clock_calend {
struct clock_calend calend;
uint32_t gen;
} flipflop[ 2];
static void clock_track_calend_nowait(void);
#endif
#define calend_adjperiod (NSEC_PER_SEC / 100)
#define calend_adjskew (40 * NSEC_PER_USEC)
#define calend_adjbig (NSEC_PER_SEC)
static int64_t calend_adjtotal;
static uint64_t calend_adjdeadline;
static uint32_t calend_adjinterval;
static timer_call_data_t calend_adjcall;
static uint32_t calend_adjactive;
static uint32_t calend_set_adjustment(
long *secs,
int *microsecs);
static void calend_adjust_call(void);
static uint32_t calend_adjust(void);
static thread_call_data_t calend_wakecall;
extern void IOKitResetTime(void);
void _clock_delay_until_deadline(uint64_t interval,
uint64_t deadline);
static uint64_t clock_boottime;
#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
MACRO_BEGIN \
if (((rfrac) += (frac)) >= (unit)) { \
(rfrac) -= (unit); \
(rsecs) += 1; \
} \
(rsecs) += (secs); \
MACRO_END
#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
MACRO_BEGIN \
if ((int)((rfrac) -= (frac)) < 0) { \
(rfrac) += (unit); \
(rsecs) -= 1; \
} \
(rsecs) -= (secs); \
MACRO_END
void
clock_config(void)
{
clock_lock_init();
timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
clock_oldconfig();
}
void
clock_init(void)
{
clock_oldinit();
}
void
clock_timebase_init(void)
{
uint64_t abstime;
nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
calend_adjinterval = (uint32_t)abstime;
nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
hz_tick_interval = (uint32_t)abstime;
sched_timebase_init();
}
kern_return_t
mach_timebase_info_trap(
struct mach_timebase_info_trap_args *args)
{
mach_vm_address_t out_info_addr = args->info;
mach_timebase_info_data_t info;
clock_timebase_info(&info);
copyout((void *)&info, out_info_addr, sizeof (info));
return (KERN_SUCCESS);
}
void
clock_get_calendar_microtime(
clock_sec_t *secs,
clock_usec_t *microsecs)
{
clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
}
void
clock_get_calendar_absolute_and_microtime(
clock_sec_t *secs,
clock_usec_t *microsecs,
uint64_t *abstime)
{
uint64_t now;
spl_t s;
s = splclock();
clock_lock();
now = mach_absolute_time();
if (abstime)
*abstime = now;
if (clock_calend.adjdelta < 0) {
uint32_t t32;
if (now > clock_calend.adjstart) {
t32 = (uint32_t)(now - clock_calend.adjstart);
if (t32 > clock_calend.adjoffset)
now -= clock_calend.adjoffset;
else
now = clock_calend.adjstart;
}
}
now += clock_calend.offset;
absolutetime_to_microtime(now, secs, microsecs);
*secs += (clock_sec_t)clock_calend.epoch;
clock_unlock();
splx(s);
}
void
clock_get_calendar_nanotime(
clock_sec_t *secs,
clock_nsec_t *nanosecs)
{
uint64_t now;
spl_t s;
s = splclock();
clock_lock();
now = mach_absolute_time();
if (clock_calend.adjdelta < 0) {
uint32_t t32;
if (now > clock_calend.adjstart) {
t32 = (uint32_t)(now - clock_calend.adjstart);
if (t32 > clock_calend.adjoffset)
now -= clock_calend.adjoffset;
else
now = clock_calend.adjstart;
}
}
now += clock_calend.offset;
absolutetime_to_microtime(now, secs, nanosecs);
*nanosecs *= NSEC_PER_USEC;
*secs += (clock_sec_t)clock_calend.epoch;
clock_unlock();
splx(s);
}
void
clock_gettimeofday(
clock_sec_t *secs,
clock_usec_t *microsecs)
{
uint64_t now;
spl_t s;
s = splclock();
clock_lock();
now = mach_absolute_time();
if (clock_calend.adjdelta >= 0) {
clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
}
else {
uint32_t t32;
if (now > clock_calend.adjstart) {
t32 = (uint32_t)(now - clock_calend.adjstart);
if (t32 > clock_calend.adjoffset)
now -= clock_calend.adjoffset;
else
now = clock_calend.adjstart;
}
now += clock_calend.offset;
absolutetime_to_microtime(now, secs, microsecs);
*secs += (clock_sec_t)clock_calend.epoch;
}
clock_unlock();
splx(s);
}
void
clock_set_calendar_microtime(
clock_sec_t secs,
clock_usec_t microsecs)
{
clock_sec_t sys;
clock_usec_t microsys;
clock_sec_t newsecs;
spl_t s;
newsecs = (microsecs < 500*USEC_PER_SEC)? secs: secs + 1;
s = splclock();
clock_lock();
commpage_disable_timestamp();
clock_get_system_microtime(&sys, µsys);
TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
clock_boottime += secs - clock_calend.epoch;
clock_calend.epoch = secs;
nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
calend_adjtotal = clock_calend.adjdelta = 0;
clock_unlock();
PESetGMTTimeOfDay(newsecs);
splx(s);
host_notify_calendar_change();
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
}
void
clock_initialize_calendar(void)
{
clock_sec_t sys, secs = PEGetGMTTimeOfDay();
clock_usec_t microsys, microsecs = 0;
spl_t s;
s = splclock();
clock_lock();
commpage_disable_timestamp();
if ((long)secs >= (long)clock_boottime) {
if (clock_boottime == 0)
clock_boottime = secs;
clock_get_system_microtime(&sys, µsys);
TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
clock_calend.epoch = secs;
nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
calend_adjtotal = clock_calend.adjdelta = 0;
}
clock_unlock();
splx(s);
host_notify_calendar_change();
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
}
void
clock_get_boottime_nanotime(
clock_sec_t *secs,
clock_nsec_t *nanosecs)
{
spl_t s;
s = splclock();
clock_lock();
*secs = (clock_sec_t)clock_boottime;
*nanosecs = 0;
clock_unlock();
splx(s);
}
void
clock_adjtime(
long *secs,
int *microsecs)
{
uint32_t interval;
spl_t s;
s = splclock();
clock_lock();
interval = calend_set_adjustment(secs, microsecs);
if (interval != 0) {
calend_adjdeadline = mach_absolute_time() + interval;
if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
calend_adjactive++;
}
else
if (timer_call_cancel(&calend_adjcall))
calend_adjactive--;
clock_unlock();
splx(s);
}
static uint32_t
calend_set_adjustment(
long *secs,
int *microsecs)
{
uint64_t now, t64;
int64_t total, ototal;
uint32_t interval = 0;
total = ((int64_t)*secs * (int64_t)NSEC_PER_SEC) + (*microsecs * (int64_t)NSEC_PER_USEC);
commpage_disable_timestamp();
now = mach_absolute_time();
ototal = calend_adjtotal;
if (total != 0) {
int32_t delta = calend_adjskew;
if (total > 0) {
if (total > (int64_t) calend_adjbig)
delta *= 10;
if (delta > total)
delta = (int32_t)total;
nanoseconds_to_absolutetime((uint64_t)delta, &t64);
clock_calend.adjoffset = (uint32_t)t64;
}
else {
if (total < (int64_t) -calend_adjbig)
delta *= 10;
delta = -delta;
if (delta < total)
delta = (int32_t)total;
clock_calend.adjstart = now;
nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
clock_calend.adjoffset = (uint32_t)t64;
}
calend_adjtotal = total;
clock_calend.adjdelta = delta;
interval = calend_adjinterval;
}
else {
calend_adjtotal = clock_calend.adjdelta = 0;
}
if (ototal != 0) {
*secs = (long)(ototal / (long)NSEC_PER_SEC);
*microsecs = (int)((ototal % (int)NSEC_PER_SEC) / (int)NSEC_PER_USEC);
}
else
*secs = *microsecs = 0;
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
return (interval);
}
static void
calend_adjust_call(void)
{
uint32_t interval;
spl_t s;
s = splclock();
clock_lock();
if (--calend_adjactive == 0) {
interval = calend_adjust();
if (interval != 0) {
clock_deadline_for_periodic_event(interval, mach_absolute_time(), &calend_adjdeadline);
if (!timer_call_enter(&calend_adjcall, calend_adjdeadline, TIMER_CALL_SYS_CRITICAL))
calend_adjactive++;
}
}
clock_unlock();
splx(s);
}
static uint32_t
calend_adjust(void)
{
uint64_t now, t64;
int32_t delta;
uint32_t interval = 0;
commpage_disable_timestamp();
now = mach_absolute_time();
delta = clock_calend.adjdelta;
if (delta > 0) {
clock_calend.offset += clock_calend.adjoffset;
calend_adjtotal -= delta;
if (delta > calend_adjtotal) {
clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
nanoseconds_to_absolutetime((uint64_t)delta, &t64);
clock_calend.adjoffset = (uint32_t)t64;
}
}
else
if (delta < 0) {
clock_calend.offset -= clock_calend.adjoffset;
calend_adjtotal -= delta;
if (delta < calend_adjtotal) {
clock_calend.adjdelta = delta = (int32_t)calend_adjtotal;
nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
clock_calend.adjoffset = (uint32_t)t64;
}
if (clock_calend.adjdelta != 0)
clock_calend.adjstart = now;
}
if (clock_calend.adjdelta != 0)
interval = calend_adjinterval;
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
return (interval);
}
void
clock_wakeup_calendar(void)
{
thread_call_enter(&calend_wakecall);
}
static void
mach_wait_until_continue(
__unused void *parameter,
wait_result_t wresult)
{
thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
}
kern_return_t
mach_wait_until_trap(
struct mach_wait_until_trap_args *args)
{
uint64_t deadline = args->deadline;
wait_result_t wresult;
wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
if (wresult == THREAD_WAITING)
wresult = thread_block(mach_wait_until_continue);
return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
}
void
clock_delay_until(
uint64_t deadline)
{
uint64_t now = mach_absolute_time();
if (now >= deadline)
return;
_clock_delay_until_deadline(deadline - now, deadline);
}
void
_clock_delay_until_deadline(
uint64_t interval,
uint64_t deadline)
{
if (interval == 0)
return;
if ( ml_delay_should_spin(interval) ||
get_preemption_level() != 0 ||
ml_get_interrupts_enabled() == FALSE ) {
machine_delay_until(interval, deadline);
} else {
assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
thread_block(THREAD_CONTINUE_NULL);
}
}
void
delay_for_interval(
uint32_t interval,
uint32_t scale_factor)
{
uint64_t abstime;
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
_clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
}
void
delay(
int usec)
{
delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
}
void
clock_interval_to_deadline(
uint32_t interval,
uint32_t scale_factor,
uint64_t *result)
{
uint64_t abstime;
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
*result = mach_absolute_time() + abstime;
}
void
clock_absolutetime_interval_to_deadline(
uint64_t abstime,
uint64_t *result)
{
*result = mach_absolute_time() + abstime;
}
void
clock_get_uptime(
uint64_t *result)
{
*result = mach_absolute_time();
}
void
clock_deadline_for_periodic_event(
uint64_t interval,
uint64_t abstime,
uint64_t *deadline)
{
assert(interval != 0);
*deadline += interval;
if (*deadline <= abstime) {
*deadline = abstime + interval;
abstime = mach_absolute_time();
if (*deadline <= abstime)
*deadline = abstime + interval;
}
}
#if CONFIG_DTRACE
void
clock_get_calendar_nanotime_nowait(
clock_sec_t *secs,
clock_nsec_t *nanosecs)
{
int i = 0;
uint64_t now;
struct unlocked_clock_calend stable;
for (;;) {
stable = flipflop[i];
(void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
if (flipflop[i].gen == stable.gen)
break;
i ^= 1;
}
now = mach_absolute_time();
if (stable.calend.adjdelta < 0) {
uint32_t t32;
if (now > stable.calend.adjstart) {
t32 = (uint32_t)(now - stable.calend.adjstart);
if (t32 > stable.calend.adjoffset)
now -= stable.calend.adjoffset;
else
now = stable.calend.adjstart;
}
}
now += stable.calend.offset;
absolutetime_to_microtime(now, secs, nanosecs);
*nanosecs *= NSEC_PER_USEC;
*secs += (clock_sec_t)stable.calend.epoch;
}
static void
clock_track_calend_nowait(void)
{
int i;
for (i = 0; i < 2; i++) {
struct clock_calend tmp = clock_calend;
(void)hw_atomic_or(&flipflop[i].gen, 1);
flipflop[i].calend = tmp;
(void)hw_atomic_add(&flipflop[i].gen, 1);
}
}
#endif