#include <mach/mach_types.h>
#include <kern/lock.h>
#include <kern/spl.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
#include <kern/clock.h>
#include <kern/host_notify.h>
#include <IOKit/IOPlatformExpert.h>
#include <machine/commpage.h>
#include <mach/mach_traps.h>
#include <mach/mach_time.h>
uint32_t hz_tick_interval = 1;
#if CONFIG_DTRACE
static void clock_track_calend_nowait(void);
#endif
decl_simple_lock_data(static,clock_lock)
static struct clock_calend {
uint64_t epoch;
uint64_t offset;
int64_t adjtotal;
uint64_t adjdeadline;
uint32_t adjinterval;
int32_t adjdelta;
uint64_t adjstart;
uint32_t adjoffset;
uint32_t adjactive;
timer_call_data_t adjcall;
} clock_calend;
#if CONFIG_DTRACE
static struct unlocked_clock_calend {
struct clock_calend calend;
uint32_t gen;
} flipflop[ 2];
#endif
#define calend_adjperiod (NSEC_PER_SEC / 100)
#define calend_adjskew (40 * NSEC_PER_USEC)
#define calend_adjbig (NSEC_PER_SEC)
static uint32_t calend_set_adjustment(
int32_t *secs,
int32_t *microsecs);
static void calend_adjust_call(void);
static uint32_t calend_adjust(void);
static thread_call_data_t calend_wakecall;
extern void IOKitResetTime(void);
static uint64_t clock_boottime;
#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
MACRO_BEGIN \
if (((rfrac) += (frac)) >= (unit)) { \
(rfrac) -= (unit); \
(rsecs) += 1; \
} \
(rsecs) += (secs); \
MACRO_END
#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
MACRO_BEGIN \
if ((int32_t)((rfrac) -= (frac)) < 0) { \
(rfrac) += (unit); \
(rsecs) -= 1; \
} \
(rsecs) -= (secs); \
MACRO_END
void
clock_config(void)
{
simple_lock_init(&clock_lock, 0);
timer_call_setup(&clock_calend.adjcall, (timer_call_func_t)calend_adjust_call, NULL);
thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);
clock_oldconfig();
timer_call_initialize();
}
void
clock_init(void)
{
clock_oldinit();
}
void
clock_timebase_init(void)
{
uint64_t abstime;
nanoseconds_to_absolutetime(calend_adjperiod, &abstime);
clock_calend.adjinterval = abstime;
nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
hz_tick_interval = abstime;
sched_timebase_init();
}
kern_return_t
mach_timebase_info_trap(
struct mach_timebase_info_trap_args *args)
{
mach_vm_address_t out_info_addr = args->info;
mach_timebase_info_data_t info;
clock_timebase_info(&info);
copyout((void *)&info, out_info_addr, sizeof (info));
return (KERN_SUCCESS);
}
void
clock_get_calendar_microtime(
uint32_t *secs,
uint32_t *microsecs)
{
uint64_t now;
spl_t s;
s = splclock();
simple_lock(&clock_lock);
now = mach_absolute_time();
if (clock_calend.adjdelta < 0) {
uint32_t t32;
if (now > clock_calend.adjstart) {
t32 = now - clock_calend.adjstart;
if (t32 > clock_calend.adjoffset)
now -= clock_calend.adjoffset;
else
now = clock_calend.adjstart;
}
}
now += clock_calend.offset;
absolutetime_to_microtime(now, secs, microsecs);
*secs += clock_calend.epoch;
simple_unlock(&clock_lock);
splx(s);
}
void
clock_get_calendar_nanotime(
uint32_t *secs,
uint32_t *nanosecs)
{
uint64_t now;
spl_t s;
s = splclock();
simple_lock(&clock_lock);
now = mach_absolute_time();
if (clock_calend.adjdelta < 0) {
uint32_t t32;
if (now > clock_calend.adjstart) {
t32 = now - clock_calend.adjstart;
if (t32 > clock_calend.adjoffset)
now -= clock_calend.adjoffset;
else
now = clock_calend.adjstart;
}
}
now += clock_calend.offset;
absolutetime_to_microtime(now, secs, nanosecs);
*nanosecs *= NSEC_PER_USEC;
*secs += clock_calend.epoch;
simple_unlock(&clock_lock);
splx(s);
}
void
clock_gettimeofday(
uint32_t *secs,
uint32_t *microsecs)
{
uint64_t now;
spl_t s;
s = splclock();
simple_lock(&clock_lock);
now = mach_absolute_time();
if (clock_calend.adjdelta >= 0) {
clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs);
}
else {
uint32_t t32;
if (now > clock_calend.adjstart) {
t32 = now - clock_calend.adjstart;
if (t32 > clock_calend.adjoffset)
now -= clock_calend.adjoffset;
else
now = clock_calend.adjstart;
}
now += clock_calend.offset;
absolutetime_to_microtime(now, secs, microsecs);
*secs += clock_calend.epoch;
}
simple_unlock(&clock_lock);
splx(s);
}
void
clock_set_calendar_microtime(
uint32_t secs,
uint32_t microsecs)
{
uint32_t sys, microsys;
uint32_t newsecs;
spl_t s;
newsecs = (microsecs < 500*USEC_PER_SEC)?
secs: secs + 1;
s = splclock();
simple_lock(&clock_lock);
commpage_disable_timestamp();
clock_get_system_microtime(&sys, µsys);
TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
clock_boottime += secs - clock_calend.epoch;
clock_calend.epoch = secs;
nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
clock_calend.adjdelta = clock_calend.adjtotal = 0;
simple_unlock(&clock_lock);
PESetGMTTimeOfDay(newsecs);
splx(s);
host_notify_calendar_change();
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
}
void
clock_initialize_calendar(void)
{
uint32_t sys, microsys;
uint32_t microsecs = 0, secs = PEGetGMTTimeOfDay();
spl_t s;
s = splclock();
simple_lock(&clock_lock);
commpage_disable_timestamp();
if ((int32_t)secs >= (int32_t)clock_boottime) {
if (clock_boottime == 0)
clock_boottime = secs;
clock_get_system_microtime(&sys, µsys);
TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
clock_calend.epoch = secs;
nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset);
clock_calend.adjdelta = clock_calend.adjtotal = 0;
}
simple_unlock(&clock_lock);
splx(s);
host_notify_calendar_change();
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
}
void
clock_get_boottime_nanotime(
uint32_t *secs,
uint32_t *nanosecs)
{
*secs = clock_boottime;
*nanosecs = 0;
}
void
clock_adjtime(
int32_t *secs,
int32_t *microsecs)
{
uint32_t interval;
spl_t s;
s = splclock();
simple_lock(&clock_lock);
interval = calend_set_adjustment(secs, microsecs);
if (interval != 0) {
clock_calend.adjdeadline = mach_absolute_time() + interval;
if (!timer_call_enter(&clock_calend.adjcall, clock_calend.adjdeadline))
clock_calend.adjactive++;
}
else
if (timer_call_cancel(&clock_calend.adjcall))
clock_calend.adjactive--;
simple_unlock(&clock_lock);
splx(s);
}
static uint32_t
calend_set_adjustment(
int32_t *secs,
int32_t *microsecs)
{
uint64_t now, t64;
int64_t total, ototal;
uint32_t interval = 0;
total = (int64_t)*secs * NSEC_PER_SEC + *microsecs * NSEC_PER_USEC;
commpage_disable_timestamp();
now = mach_absolute_time();
ototal = clock_calend.adjtotal;
if (total != 0) {
int32_t delta = calend_adjskew;
if (total > 0) {
if (total > calend_adjbig)
delta *= 10;
if (delta > total)
delta = total;
nanoseconds_to_absolutetime((uint64_t)delta, &t64);
clock_calend.adjoffset = t64;
}
else {
if (total < -calend_adjbig)
delta *= 10;
delta = -delta;
if (delta < total)
delta = total;
clock_calend.adjstart = now;
nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
clock_calend.adjoffset = t64;
}
clock_calend.adjtotal = total;
clock_calend.adjdelta = delta;
interval = clock_calend.adjinterval;
}
else
clock_calend.adjdelta = clock_calend.adjtotal = 0;
if (ototal != 0) {
*secs = ototal / NSEC_PER_SEC;
*microsecs = (ototal % NSEC_PER_SEC) / NSEC_PER_USEC;
}
else
*secs = *microsecs = 0;
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
return (interval);
}
static void
calend_adjust_call(void)
{
uint32_t interval;
spl_t s;
s = splclock();
simple_lock(&clock_lock);
if (--clock_calend.adjactive == 0) {
interval = calend_adjust();
if (interval != 0) {
clock_deadline_for_periodic_event(interval, mach_absolute_time(),
&clock_calend.adjdeadline);
if (!timer_call_enter(&clock_calend.adjcall, clock_calend.adjdeadline))
clock_calend.adjactive++;
}
}
simple_unlock(&clock_lock);
splx(s);
}
static uint32_t
calend_adjust(void)
{
uint64_t now, t64;
int32_t delta;
uint32_t interval = 0;
commpage_disable_timestamp();
now = mach_absolute_time();
delta = clock_calend.adjdelta;
if (delta > 0) {
clock_calend.offset += clock_calend.adjoffset;
clock_calend.adjtotal -= delta;
if (delta > clock_calend.adjtotal) {
clock_calend.adjdelta = delta = clock_calend.adjtotal;
nanoseconds_to_absolutetime((uint64_t)delta, &t64);
clock_calend.adjoffset = t64;
}
}
else
if (delta < 0) {
clock_calend.offset -= clock_calend.adjoffset;
clock_calend.adjtotal -= delta;
if (delta < clock_calend.adjtotal) {
clock_calend.adjdelta = delta = clock_calend.adjtotal;
nanoseconds_to_absolutetime((uint64_t)-delta, &t64);
clock_calend.adjoffset = t64;
}
if (clock_calend.adjdelta != 0)
clock_calend.adjstart = now;
}
if (clock_calend.adjdelta != 0)
interval = clock_calend.adjinterval;
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
return (interval);
}
void
clock_wakeup_calendar(void)
{
thread_call_enter(&calend_wakecall);
}
static void
mach_wait_until_continue(
__unused void *parameter,
wait_result_t wresult)
{
thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
}
kern_return_t
mach_wait_until_trap(
struct mach_wait_until_trap_args *args)
{
uint64_t deadline = args->deadline;
wait_result_t wresult;
wresult = assert_wait_deadline((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, deadline);
if (wresult == THREAD_WAITING)
wresult = thread_block(mach_wait_until_continue);
return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
}
void
clock_delay_until(
uint64_t deadline)
{
uint64_t now = mach_absolute_time();
if (now >= deadline)
return;
if ( (deadline - now) < (8 * sched_cswtime) ||
get_preemption_level() != 0 ||
ml_get_interrupts_enabled() == FALSE )
machine_delay_until(deadline);
else {
assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline - sched_cswtime);
thread_block(THREAD_CONTINUE_NULL);
}
}
void
delay_for_interval(
uint32_t interval,
uint32_t scale_factor)
{
uint64_t end;
clock_interval_to_deadline(interval, scale_factor, &end);
clock_delay_until(end);
}
void
delay(
int usec)
{
delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
}
void
clock_interval_to_deadline(
uint32_t interval,
uint32_t scale_factor,
uint64_t *result)
{
uint64_t abstime;
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
*result = mach_absolute_time() + abstime;
}
void
clock_absolutetime_interval_to_deadline(
uint64_t abstime,
uint64_t *result)
{
*result = mach_absolute_time() + abstime;
}
void
clock_get_uptime(
uint64_t *result)
{
*result = mach_absolute_time();
}
void
clock_deadline_for_periodic_event(
uint64_t interval,
uint64_t abstime,
uint64_t *deadline)
{
assert(interval != 0);
*deadline += interval;
if (*deadline <= abstime) {
*deadline = abstime + interval;
abstime = mach_absolute_time();
if (*deadline <= abstime)
*deadline = abstime + interval;
}
}
#if CONFIG_DTRACE
void
clock_get_calendar_nanotime_nowait(
uint32_t *secs,
uint32_t *nanosecs)
{
int i = 0;
uint64_t now;
struct unlocked_clock_calend stable;
for (;;) {
stable = flipflop[i];
(void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
if (flipflop[i].gen == stable.gen)
break;
i ^= 1;
}
now = mach_absolute_time();
if (stable.calend.adjdelta < 0) {
uint32_t t32;
if (now > stable.calend.adjstart) {
t32 = now - stable.calend.adjstart;
if (t32 > stable.calend.adjoffset)
now -= stable.calend.adjoffset;
else
now = stable.calend.adjstart;
}
}
now += stable.calend.offset;
absolutetime_to_microtime(now, secs, nanosecs);
*nanosecs *= NSEC_PER_USEC;
*secs += stable.calend.epoch;
}
static void
clock_track_calend_nowait(void)
{
int i;
for (i = 0; i < 2; i++) {
struct clock_calend tmp = clock_calend;
(void)hw_atomic_or(&flipflop[i].gen, 1);
flipflop[i].calend = tmp;
(void)hw_atomic_add(&flipflop[i].gen, 1);
}
}
#endif