#include <mach/mach_types.h>
#include <kern/spl.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
#include <kern/clock.h>
#include <kern/host_notify.h>
#include <kern/thread_call.h>
#include <libkern/OSAtomic.h>
#include <IOKit/IOPlatformExpert.h>
#include <machine/commpage.h>
#include <machine/config.h>
#include <machine/machine_routines.h>
#include <mach/mach_traps.h>
#include <mach/mach_time.h>
#include <sys/kdebug.h>
#include <sys/timex.h>
#include <kern/arithmetic_128.h>
#include <os/log.h>
uint32_t hz_tick_interval = 1;
static uint64_t has_monotonic_clock = 0;
decl_simple_lock_data(,clock_lock)
lck_grp_attr_t * settime_lock_grp_attr;
lck_grp_t * settime_lock_grp;
lck_attr_t * settime_lock_attr;
lck_mtx_t settime_lock;
#define clock_lock() \
simple_lock(&clock_lock)
#define clock_unlock() \
simple_unlock(&clock_lock)
#define clock_lock_init() \
simple_lock_init(&clock_lock, 0)
#ifdef kdp_simple_lock_is_acquired
boolean_t kdp_clock_is_locked()
{
return kdp_simple_lock_is_acquired(&clock_lock);
}
#endif
struct bintime {
time_t sec;
uint64_t frac;
};
static __inline void
bintime_addx(struct bintime *_bt, uint64_t _x)
{
uint64_t _u;
_u = _bt->frac;
_bt->frac += _x;
if (_u > _bt->frac)
_bt->sec++;
}
static __inline void
bintime_subx(struct bintime *_bt, uint64_t _x)
{
uint64_t _u;
_u = _bt->frac;
_bt->frac -= _x;
if (_u < _bt->frac)
_bt->sec--;
}
static __inline void
bintime_addns(struct bintime *bt, uint64_t ns)
{
bt->sec += ns/ (uint64_t)NSEC_PER_SEC;
ns = ns % (uint64_t)NSEC_PER_SEC;
if (ns) {
ns = ns * (uint64_t)18446744073LL;
bintime_addx(bt, ns);
}
}
static __inline void
bintime_subns(struct bintime *bt, uint64_t ns)
{
bt->sec -= ns/ (uint64_t)NSEC_PER_SEC;
ns = ns % (uint64_t)NSEC_PER_SEC;
if (ns) {
ns = ns * (uint64_t)18446744073LL;
bintime_subx(bt, ns);
}
}
static __inline void
bintime_addxns(struct bintime *bt, uint64_t a, int64_t xns)
{
uint64_t uxns = (xns > 0)?(uint64_t )xns:(uint64_t)-xns;
uint64_t ns = multi_overflow(a, uxns);
if (xns > 0) {
if (ns)
bintime_addns(bt, ns);
ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
bintime_addx(bt, ns);
}
else{
if (ns)
bintime_subns(bt, ns);
ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
bintime_subx(bt,ns);
}
}
static __inline void
bintime_add(struct bintime *_bt, const struct bintime *_bt2)
{
uint64_t _u;
_u = _bt->frac;
_bt->frac += _bt2->frac;
if (_u > _bt->frac)
_bt->sec++;
_bt->sec += _bt2->sec;
}
static __inline void
bintime_sub(struct bintime *_bt, const struct bintime *_bt2)
{
uint64_t _u;
_u = _bt->frac;
_bt->frac -= _bt2->frac;
if (_u < _bt->frac)
_bt->sec--;
_bt->sec -= _bt2->sec;
}
static __inline void
clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bintime *_bt)
{
_bt->sec = *secs;
_bt->frac = *microsecs * (uint64_t)18446744073709LL;
}
static __inline void
bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *microsecs)
{
*secs = _bt->sec;
*microsecs = ((uint64_t)USEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
}
static __inline void
bintime2nsclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *nanosecs)
{
*secs = _bt->sec;
*nanosecs = ((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
}
static __inline void
bintime2absolutetime(const struct bintime *_bt, uint64_t *abs)
{
uint64_t nsec;
nsec = (uint64_t) _bt->sec * (uint64_t)NSEC_PER_SEC + (((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32);
nanoseconds_to_absolutetime(nsec, abs);
}
struct latched_time {
uint64_t monotonic_time_usec;
uint64_t mach_time;
};
extern int
kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
static struct clock_calend {
uint64_t s_scale_ns;
int64_t s_adj_nsx;
uint64_t tick_scale_x;
uint64_t offset_count;
struct bintime offset;
struct bintime bintime;
struct bintime boottime;
struct bintime basesleep;
} clock_calend;
static uint64_t ticks_per_sec;
#if DEVELOPMENT || DEBUG
clock_sec_t last_utc_sec = 0;
clock_usec_t last_utc_usec = 0;
clock_sec_t max_utc_sec = 0;
clock_sec_t last_sys_sec = 0;
clock_usec_t last_sys_usec = 0;
#endif
#if DEVELOPMENT || DEBUG
extern int g_should_log_clock_adjustments;
static void print_all_clock_variables(const char*, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* calend_cp);
static void print_all_clock_variables_internal(const char *, struct clock_calend* calend_cp);
#else
#define print_all_clock_variables(...) do { } while (0)
#define print_all_clock_variables_internal(...) do { } while (0)
#endif
#if CONFIG_DTRACE
static struct unlocked_clock_calend {
struct clock_calend calend;
uint32_t gen;
} flipflop[ 2];
static void clock_track_calend_nowait(void);
#endif
void _clock_delay_until_deadline(uint64_t interval, uint64_t deadline);
void _clock_delay_until_deadline_with_leeway(uint64_t interval, uint64_t deadline, uint64_t leeway);
static uint64_t clock_boottime;
static uint32_t clock_boottime_usec;
#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
MACRO_BEGIN \
if (((rfrac) += (frac)) >= (unit)) { \
(rfrac) -= (unit); \
(rsecs) += 1; \
} \
(rsecs) += (secs); \
MACRO_END
#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
MACRO_BEGIN \
if ((int)((rfrac) -= (frac)) < 0) { \
(rfrac) += (unit); \
(rsecs) -= 1; \
} \
(rsecs) -= (secs); \
MACRO_END
void
clock_config(void)
{
clock_lock_init();
settime_lock_grp_attr = lck_grp_attr_alloc_init();
settime_lock_grp = lck_grp_alloc_init("settime grp", settime_lock_grp_attr);
settime_lock_attr = lck_attr_alloc_init();
lck_mtx_init(&settime_lock, settime_lock_grp, settime_lock_attr);
clock_oldconfig();
ntp_init();
nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
}
void
clock_init(void)
{
clock_oldinit();
}
void
clock_timebase_init(void)
{
uint64_t abstime;
nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime);
hz_tick_interval = (uint32_t)abstime;
sched_timebase_init();
}
kern_return_t
mach_timebase_info_trap(
struct mach_timebase_info_trap_args *args)
{
mach_vm_address_t out_info_addr = args->info;
mach_timebase_info_data_t info = {};
clock_timebase_info(&info);
copyout((void *)&info, out_info_addr, sizeof (info));
return (KERN_SUCCESS);
}
void
clock_get_calendar_microtime(
clock_sec_t *secs,
clock_usec_t *microsecs)
{
clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
}
static void
get_scale_factors_from_adj(int64_t adjustment, uint64_t* tick_scale_x, uint64_t* s_scale_ns, int64_t* s_adj_nsx)
{
uint64_t scale;
int64_t nano, frac;
scale = (uint64_t)1 << 63;
scale += (adjustment / 1024) * 2199;
scale /= ticks_per_sec;
*tick_scale_x = scale * 2;
nano = (adjustment > 0)? adjustment >> 32 : -((-adjustment) >> 32);
scale = (uint64_t) NSEC_PER_SEC;
scale += nano;
*s_scale_ns = scale;
frac = (adjustment > 0)? ((uint32_t) adjustment) : -((uint32_t) (-adjustment));
*s_adj_nsx = (frac>0)? frac << 32 : -( (-frac) << 32);
return;
}
static struct bintime
scale_delta(uint64_t delta, uint64_t tick_scale_x, uint64_t s_scale_ns, int64_t s_adj_nsx)
{
uint64_t sec, new_ns, over;
struct bintime bt;
bt.sec = 0;
bt.frac = 0;
if (delta > ticks_per_sec) {
sec = (delta/ticks_per_sec);
new_ns = sec * s_scale_ns;
bintime_addns(&bt, new_ns);
if (s_adj_nsx) {
if (sec == 1) {
if (s_adj_nsx > 0)
bintime_addx(&bt, (uint64_t)s_adj_nsx/ (uint64_t)NSEC_PER_SEC);
else
bintime_subx(&bt, (uint64_t)-s_adj_nsx/ (uint64_t)NSEC_PER_SEC);
}
else{
bintime_addxns(&bt, sec, s_adj_nsx);
}
}
delta = (delta % ticks_per_sec);
}
over = multi_overflow(tick_scale_x, delta);
if(over){
bt.sec += over;
}
bintime_addx(&bt, delta * tick_scale_x);
return bt;
}
static struct bintime
get_scaled_time(uint64_t now)
{
uint64_t delta;
delta = now - clock_calend.offset_count;
return scale_delta(delta, clock_calend.tick_scale_x, clock_calend.s_scale_ns, clock_calend.s_adj_nsx);
}
static void
clock_get_calendar_absolute_and_microtime_locked(
clock_sec_t *secs,
clock_usec_t *microsecs,
uint64_t *abstime)
{
uint64_t now;
struct bintime bt;
now = mach_absolute_time();
if (abstime)
*abstime = now;
bt = get_scaled_time(now);
bintime_add(&bt, &clock_calend.bintime);
bintime2usclock(&bt, secs, microsecs);
}
static void
clock_get_calendar_absolute_and_nanotime_locked(
clock_sec_t *secs,
clock_usec_t *nanosecs,
uint64_t *abstime)
{
uint64_t now;
struct bintime bt;
now = mach_absolute_time();
if (abstime)
*abstime = now;
bt = get_scaled_time(now);
bintime_add(&bt, &clock_calend.bintime);
bintime2nsclock(&bt, secs, nanosecs);
}
void
clock_get_calendar_absolute_and_microtime(
clock_sec_t *secs,
clock_usec_t *microsecs,
uint64_t *abstime)
{
spl_t s;
s = splclock();
clock_lock();
clock_get_calendar_absolute_and_microtime_locked(secs, microsecs, abstime);
clock_unlock();
splx(s);
}
void
clock_get_calendar_nanotime(
clock_sec_t *secs,
clock_nsec_t *nanosecs)
{
spl_t s;
s = splclock();
clock_lock();
clock_get_calendar_absolute_and_nanotime_locked(secs, nanosecs, NULL);
clock_unlock();
splx(s);
}
void
clock_gettimeofday(
clock_sec_t *secs,
clock_usec_t *microsecs)
{
clock_gettimeofday_and_absolute_time(secs, microsecs, NULL);
}
void
clock_gettimeofday_and_absolute_time(
clock_sec_t *secs,
clock_usec_t *microsecs,
uint64_t *mach_time)
{
uint64_t now;
spl_t s;
struct bintime bt;
s = splclock();
clock_lock();
now = mach_absolute_time();
bt = get_scaled_time(now);
bintime_add(&bt, &clock_calend.bintime);
bintime2usclock(&bt, secs, microsecs);
clock_gettimeofday_set_commpage(now, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
clock_unlock();
splx(s);
if (mach_time) {
*mach_time = now;
}
}
static void
update_basesleep(struct bintime delta, bool forward)
{
if (!has_monotonic_clock) {
if (forward)
bintime_add(&clock_calend.basesleep, &delta);
else
bintime_sub(&clock_calend.basesleep, &delta);
}
}
void
clock_set_calendar_microtime(
clock_sec_t secs,
clock_usec_t microsecs)
{
uint64_t absolutesys;
clock_sec_t newsecs;
clock_sec_t oldsecs;
clock_usec_t newmicrosecs;
clock_usec_t oldmicrosecs;
uint64_t commpage_value;
spl_t s;
struct bintime bt;
clock_sec_t deltasecs;
clock_usec_t deltamicrosecs;
newsecs = secs;
newmicrosecs = microsecs;
lck_mtx_lock(&settime_lock);
s = splclock();
clock_lock();
#if DEVELOPMENT || DEBUG
struct clock_calend clock_calend_cp = clock_calend;
#endif
commpage_disable_timestamp();
clock_get_calendar_absolute_and_microtime_locked(&oldsecs, &oldmicrosecs, &absolutesys);
#if DEVELOPMENT || DEBUG
if (g_should_log_clock_adjustments) {
os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
__func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys);
os_log(OS_LOG_DEFAULT, "%s requested %lu s %d u\n",
__func__, (unsigned long)secs, microsecs );
}
#endif
if (oldsecs < secs || (oldsecs == secs && oldmicrosecs < microsecs)) {
deltasecs = secs;
deltamicrosecs = microsecs;
TIME_SUB(deltasecs, oldsecs, deltamicrosecs, oldmicrosecs, USEC_PER_SEC);
#if DEVELOPMENT || DEBUG
if (g_should_log_clock_adjustments) {
os_log(OS_LOG_DEFAULT, "%s delta requested %lu s %d u\n",
__func__, (unsigned long)deltasecs, deltamicrosecs);
}
#endif
TIME_ADD(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
clock2bintime(&deltasecs, &deltamicrosecs, &bt);
bintime_add(&clock_calend.boottime, &bt);
update_basesleep(bt, TRUE);
} else {
deltasecs = oldsecs;
deltamicrosecs = oldmicrosecs;
TIME_SUB(deltasecs, secs, deltamicrosecs, microsecs, USEC_PER_SEC);
#if DEVELOPMENT || DEBUG
if (g_should_log_clock_adjustments) {
os_log(OS_LOG_DEFAULT, "%s negative delta requested %lu s %d u\n",
__func__, (unsigned long)deltasecs, deltamicrosecs);
}
#endif
TIME_SUB(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
clock2bintime(&deltasecs, &deltamicrosecs, &bt);
bintime_sub(&clock_calend.boottime, &bt);
update_basesleep(bt, FALSE);
}
clock_calend.bintime = clock_calend.boottime;
bintime_add(&clock_calend.bintime, &clock_calend.offset);
clock2bintime((clock_sec_t *) &secs, (clock_usec_t *) µsecs, &bt);
clock_gettimeofday_set_commpage(absolutesys, bt.sec, bt.frac, clock_calend.tick_scale_x, ticks_per_sec);
#if DEVELOPMENT || DEBUG
struct clock_calend clock_calend_cp1 = clock_calend;
#endif
commpage_value = clock_boottime * USEC_PER_SEC + clock_boottime_usec;
clock_unlock();
splx(s);
#if DEVELOPMENT || DEBUG
uint64_t now_b = mach_absolute_time();
#endif
PESetUTCTimeOfDay(newsecs, newmicrosecs);
#if DEVELOPMENT || DEBUG
uint64_t now_a = mach_absolute_time();
if (g_should_log_clock_adjustments) {
os_log(OS_LOG_DEFAULT, "%s mach bef PESet %llu mach aft %llu \n", __func__, now_b, now_a);
}
#endif
print_all_clock_variables_internal(__func__, &clock_calend_cp);
print_all_clock_variables_internal(__func__, &clock_calend_cp1);
commpage_update_boottime(commpage_value);
host_notify_calendar_change();
host_notify_calendar_set();
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
lck_mtx_unlock(&settime_lock);
}
uint64_t mach_absolutetime_asleep = 0;
uint64_t mach_absolutetime_last_sleep = 0;
void
clock_get_calendar_uptime(clock_sec_t *secs)
{
uint64_t now;
spl_t s;
struct bintime bt;
s = splclock();
clock_lock();
now = mach_absolute_time();
bt = get_scaled_time(now);
bintime_add(&bt, &clock_calend.offset);
*secs = bt.sec;
clock_unlock();
splx(s);
}
void
clock_update_calendar(void)
{
uint64_t now, delta;
struct bintime bt;
spl_t s;
int64_t adjustment;
s = splclock();
clock_lock();
now = mach_absolute_time();
bt = get_scaled_time(now);
bintime_add(&clock_calend.offset, &bt);
delta = now - clock_calend.offset_count;
clock_calend.offset_count += delta;
clock_calend.bintime = clock_calend.offset;
bintime_add(&clock_calend.bintime, &clock_calend.boottime);
ntp_update_second(&adjustment, clock_calend.bintime.sec);
#if DEVELOPMENT || DEBUG
if (g_should_log_clock_adjustments) {
os_log(OS_LOG_DEFAULT, "%s adjustment %lld\n", __func__, adjustment);
}
#endif
get_scale_factors_from_adj(adjustment, &clock_calend.tick_scale_x, &clock_calend.s_scale_ns, &clock_calend.s_adj_nsx);
clock_gettimeofday_set_commpage(now, clock_calend.bintime.sec, clock_calend.bintime.frac, clock_calend.tick_scale_x, ticks_per_sec);
#if DEVELOPMENT || DEBUG
struct clock_calend calend_cp = clock_calend;
#endif
clock_unlock();
splx(s);
print_all_clock_variables(__func__, NULL,NULL,NULL,NULL, &calend_cp);
}
#if DEVELOPMENT || DEBUG
void print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp)
{
clock_sec_t offset_secs;
clock_usec_t offset_microsecs;
clock_sec_t bintime_secs;
clock_usec_t bintime_microsecs;
clock_sec_t bootime_secs;
clock_usec_t bootime_microsecs;
if (!g_should_log_clock_adjustments)
return;
bintime2usclock(&clock_calend_cp->offset, &offset_secs, &offset_microsecs);
bintime2usclock(&clock_calend_cp->bintime, &bintime_secs, &bintime_microsecs);
bintime2usclock(&clock_calend_cp->boottime, &bootime_secs, &bootime_microsecs);
os_log(OS_LOG_DEFAULT, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
func , clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx,
clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count);
os_log(OS_LOG_DEFAULT, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac,
(unsigned long)offset_secs, offset_microsecs);
os_log(OS_LOG_DEFAULT, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac,
(unsigned long)bintime_secs, bintime_microsecs);
os_log(OS_LOG_DEFAULT, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac,
(unsigned long)bootime_secs, bootime_microsecs);
clock_sec_t basesleep_secs;
clock_usec_t basesleep_microsecs;
bintime2usclock(&clock_calend_cp->basesleep, &basesleep_secs, &basesleep_microsecs);
os_log(OS_LOG_DEFAULT, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac,
(unsigned long)basesleep_secs, basesleep_microsecs);
}
void print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp)
{
if (!g_should_log_clock_adjustments)
return;
struct bintime bt;
clock_sec_t wall_secs;
clock_usec_t wall_microsecs;
uint64_t now;
uint64_t delta;
if (pmu_secs) {
os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec);
}
if (sys_secs) {
os_log(OS_LOG_DEFAULT, "%s sys %lu s %d u \n", func, (unsigned long)*sys_secs, *sys_usec);
}
print_all_clock_variables_internal(func, clock_calend_cp);
now = mach_absolute_time();
delta = now - clock_calend_cp->offset_count;
bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx);
bintime_add(&bt, &clock_calend_cp->bintime);
bintime2usclock(&bt, &wall_secs, &wall_microsecs);
os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
func, (unsigned long)wall_secs, wall_microsecs, now);
}
#endif
void
clock_initialize_calendar(void)
{
clock_sec_t sys; clock_sec_t secs; clock_sec_t utc_offset_secs; clock_usec_t microsys;
clock_usec_t microsecs;
clock_usec_t utc_offset_microsecs;
spl_t s;
struct bintime bt;
struct bintime monotonic_bt;
struct latched_time monotonic_time;
uint64_t monotonic_usec_total;
clock_sec_t sys2, monotonic_sec;
clock_usec_t microsys2, monotonic_usec;
size_t size;
PEGetUTCTimeOfDay(&secs, µsecs);
clock_get_system_microtime(&sys, µsys);
size = sizeof(monotonic_time);
if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
has_monotonic_clock = 0;
os_log(OS_LOG_DEFAULT, "%s system does not have monotonic clock.\n", __func__);
} else {
has_monotonic_clock = 1;
monotonic_usec_total = monotonic_time.monotonic_time_usec;
absolutetime_to_microtime(monotonic_time.mach_time, &sys2, µsys2);
os_log(OS_LOG_DEFAULT, "%s system has monotonic clock.\n", __func__);
}
s = splclock();
clock_lock();
commpage_disable_timestamp();
utc_offset_secs = secs;
utc_offset_microsecs = microsecs;
#if DEVELOPMENT || DEBUG
last_utc_sec = secs;
last_utc_usec = microsecs;
last_sys_sec = sys;
last_sys_usec = microsys;
if (secs > max_utc_sec)
max_utc_sec = secs;
#endif
if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
os_log(OS_LOG_DEFAULT, "%s WARNING: PMU offset is less then sys PMU %lu s %d u sys %lu s %d u\n",
__func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys);
secs = utc_offset_secs = sys;
microsecs = utc_offset_microsecs = microsys;
}
TIME_SUB(utc_offset_secs, sys, utc_offset_microsecs, microsys, USEC_PER_SEC);
clock2bintime(&utc_offset_secs, &utc_offset_microsecs, &bt);
clock_boottime = secs;
clock_boottime_usec = microsecs;
commpage_update_boottime(clock_boottime * USEC_PER_SEC + clock_boottime_usec);
nanoseconds_to_absolutetime((uint64_t)NSEC_PER_SEC, &ticks_per_sec);
clock_calend.boottime = bt;
clock_calend.bintime = bt;
clock_calend.offset.sec = 0;
clock_calend.offset.frac = 0;
clock_calend.tick_scale_x = (uint64_t)1 << 63;
clock_calend.tick_scale_x /= ticks_per_sec;
clock_calend.tick_scale_x *= 2;
clock_calend.s_scale_ns = NSEC_PER_SEC;
clock_calend.s_adj_nsx = 0;
if (has_monotonic_clock) {
monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
TIME_SUB(monotonic_sec, sys2, monotonic_usec, microsys2, USEC_PER_SEC);
clock2bintime(&monotonic_sec, &monotonic_usec, &monotonic_bt);
clock_calend.basesleep = monotonic_bt;
} else {
clock_calend.basesleep = bt;
}
commpage_update_mach_continuous_time(mach_absolutetime_asleep);
#if DEVELOPMENT || DEBUG
struct clock_calend clock_calend_cp = clock_calend;
#endif
clock_unlock();
splx(s);
print_all_clock_variables(__func__, &secs, µsecs, &sys, µsys, &clock_calend_cp);
host_notify_calendar_change();
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
}
void
clock_wakeup_calendar(void)
{
clock_sec_t sys;
clock_sec_t secs;
clock_usec_t microsys;
clock_usec_t microsecs;
spl_t s;
struct bintime bt, last_sleep_bt;
clock_sec_t basesleep_s, last_sleep_sec;
clock_usec_t basesleep_us, last_sleep_usec;
struct latched_time monotonic_time;
uint64_t monotonic_usec_total;
size_t size;
clock_sec_t secs_copy;
clock_usec_t microsecs_copy;
#if DEVELOPMENT || DEBUG
clock_sec_t utc_sec;
clock_usec_t utc_usec;
PEGetUTCTimeOfDay(&utc_sec, &utc_usec);
#endif
if (has_monotonic_clock) {
size = sizeof(monotonic_time);
if (kernel_sysctlbyname("kern.monotonicclock_usecs", &monotonic_time, &size, NULL, 0) != 0) {
panic("%s: could not call kern.monotonicclock_usecs", __func__);
}
monotonic_usec_total = monotonic_time.monotonic_time_usec;
absolutetime_to_microtime(monotonic_time.mach_time, &sys, µsys);
secs = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
microsecs = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
} else {
PEGetUTCTimeOfDay(&secs, µsecs);
clock_get_system_microtime(&sys, µsys);
}
s = splclock();
clock_lock();
commpage_disable_timestamp();
secs_copy = secs;
microsecs_copy = microsecs;
#if DEVELOPMENT || DEBUG
struct clock_calend clock_calend_cp1 = clock_calend;
#endif
#if DEVELOPMENT || DEBUG
last_utc_sec = secs;
last_utc_usec = microsecs;
last_sys_sec = sys;
last_sys_usec = microsys;
if (secs > max_utc_sec)
max_utc_sec = secs;
#endif
if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
os_log(OS_LOG_DEFAULT, "%s WARNING: %s is less then sys %s %lu s %d u sys %lu s %d u\n",
__func__, (has_monotonic_clock)?"monotonic":"PMU", (has_monotonic_clock)?"monotonic":"PMU", (unsigned long)secs, microsecs, (unsigned long)sys, microsys);
secs = sys;
microsecs = microsys;
}
TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC);
clock2bintime(&secs, µsecs, &bt);
if ((bt.sec > clock_calend.basesleep.sec) ||
((bt.sec == clock_calend.basesleep.sec) && (bt.frac > clock_calend.basesleep.frac))) {
last_sleep_bt = bt;
bintime_sub(&last_sleep_bt, &clock_calend.basesleep);
clock_calend.basesleep = bt;
bintime2usclock(&last_sleep_bt, &last_sleep_sec, &last_sleep_usec);
bintime2absolutetime(&last_sleep_bt, &mach_absolutetime_last_sleep);
mach_absolutetime_asleep += mach_absolutetime_last_sleep;
bintime_add(&clock_calend.offset, &last_sleep_bt);
bintime_add(&clock_calend.bintime, &last_sleep_bt);
} else{
mach_absolutetime_last_sleep = 0;
last_sleep_sec = last_sleep_usec = 0;
bintime2usclock(&clock_calend.basesleep, &basesleep_s, &basesleep_us);
os_log(OS_LOG_DEFAULT, "%s WARNING: basesleep (%lu s %d u) > %s-sys (%lu s %d u) \n",
__func__, (unsigned long) basesleep_s, basesleep_us, (has_monotonic_clock)?"monotonic":"PMU", (unsigned long) secs_copy, microsecs_copy );
}
KERNEL_DEBUG_CONSTANT(
MACHDBG_CODE(DBG_MACH_CLOCK,MACH_EPOCH_CHANGE) | DBG_FUNC_NONE,
(uintptr_t) mach_absolutetime_last_sleep,
(uintptr_t) mach_absolutetime_asleep,
(uintptr_t) (mach_absolutetime_last_sleep >> 32),
(uintptr_t) (mach_absolutetime_asleep >> 32),
0);
commpage_update_mach_continuous_time(mach_absolutetime_asleep);
adjust_cont_time_thread_calls();
#if DEVELOPMENT || DEBUG
struct clock_calend clock_calend_cp = clock_calend;
#endif
clock_unlock();
splx(s);
#if DEVELOPMENT || DEBUG
if (g_should_log_clock_adjustments) {
os_log(OS_LOG_DEFAULT, "PMU was %lu s %d u\n",(unsigned long) utc_sec, utc_usec);
os_log(OS_LOG_DEFAULT, "last sleep was %lu s %d u\n",(unsigned long) last_sleep_sec, last_sleep_usec);
print_all_clock_variables("clock_wakeup_calendar:BEFORE",
&secs_copy, µsecs_copy, &sys, µsys, &clock_calend_cp1);
print_all_clock_variables("clock_wakeup_calendar:AFTER", NULL, NULL, NULL, NULL, &clock_calend_cp);
}
#endif
host_notify_calendar_change();
#if CONFIG_DTRACE
clock_track_calend_nowait();
#endif
}
void
clock_get_boottime_nanotime(
clock_sec_t *secs,
clock_nsec_t *nanosecs)
{
spl_t s;
s = splclock();
clock_lock();
*secs = (clock_sec_t)clock_boottime;
*nanosecs = (clock_nsec_t)clock_boottime_usec * NSEC_PER_USEC;
clock_unlock();
splx(s);
}
void
clock_get_boottime_microtime(
clock_sec_t *secs,
clock_usec_t *microsecs)
{
spl_t s;
s = splclock();
clock_lock();
*secs = (clock_sec_t)clock_boottime;
*microsecs = (clock_nsec_t)clock_boottime_usec;
clock_unlock();
splx(s);
}
static void
mach_wait_until_continue(
__unused void *parameter,
wait_result_t wresult)
{
thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
}
kern_return_t
mach_wait_until_trap(
struct mach_wait_until_trap_args *args)
{
uint64_t deadline = args->deadline;
wait_result_t wresult;
wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
TIMEOUT_URGENCY_USER_NORMAL, deadline, 0);
if (wresult == THREAD_WAITING)
wresult = thread_block(mach_wait_until_continue);
return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
}
void
clock_delay_until(
uint64_t deadline)
{
uint64_t now = mach_absolute_time();
if (now >= deadline)
return;
_clock_delay_until_deadline(deadline - now, deadline);
}
void
_clock_delay_until_deadline(
uint64_t interval,
uint64_t deadline)
{
_clock_delay_until_deadline_with_leeway(interval, deadline, 0);
}
void
_clock_delay_until_deadline_with_leeway(
uint64_t interval,
uint64_t deadline,
uint64_t leeway)
{
if (interval == 0)
return;
if ( ml_delay_should_spin(interval) ||
get_preemption_level() != 0 ||
ml_get_interrupts_enabled() == FALSE ) {
machine_delay_until(interval, deadline);
} else {
if (leeway) {
assert_wait_deadline_with_leeway((event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
} else {
assert_wait_deadline((event_t)clock_delay_until, THREAD_UNINT, deadline);
}
thread_block(THREAD_CONTINUE_NULL);
}
}
void
delay_for_interval(
uint32_t interval,
uint32_t scale_factor)
{
uint64_t abstime;
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
_clock_delay_until_deadline(abstime, mach_absolute_time() + abstime);
}
void
delay_for_interval_with_leeway(
uint32_t interval,
uint32_t leeway,
uint32_t scale_factor)
{
uint64_t abstime_interval;
uint64_t abstime_leeway;
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval);
clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway);
_clock_delay_until_deadline_with_leeway(abstime_interval, mach_absolute_time() + abstime_interval, abstime_leeway);
}
void
delay(
int usec)
{
delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
}
void
clock_interval_to_deadline(
uint32_t interval,
uint32_t scale_factor,
uint64_t *result)
{
uint64_t abstime;
clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime);
*result = mach_absolute_time() + abstime;
}
void
clock_absolutetime_interval_to_deadline(
uint64_t abstime,
uint64_t *result)
{
*result = mach_absolute_time() + abstime;
}
void
clock_continuoustime_interval_to_deadline(
uint64_t conttime,
uint64_t *result)
{
*result = mach_continuous_time() + conttime;
}
void
clock_get_uptime(
uint64_t *result)
{
*result = mach_absolute_time();
}
void
clock_deadline_for_periodic_event(
uint64_t interval,
uint64_t abstime,
uint64_t *deadline)
{
assert(interval != 0);
*deadline += interval;
if (*deadline <= abstime) {
*deadline = abstime + interval;
abstime = mach_absolute_time();
if (*deadline <= abstime)
*deadline = abstime + interval;
}
}
uint64_t
mach_continuous_time(void)
{
while(1) {
uint64_t read1 = mach_absolutetime_asleep;
uint64_t absolute = mach_absolute_time();
OSMemoryBarrier();
uint64_t read2 = mach_absolutetime_asleep;
if(__builtin_expect(read1 == read2, 1)) {
return absolute + read1;
}
}
}
uint64_t
mach_continuous_approximate_time(void)
{
while(1) {
uint64_t read1 = mach_absolutetime_asleep;
uint64_t absolute = mach_approximate_time();
OSMemoryBarrier();
uint64_t read2 = mach_absolutetime_asleep;
if(__builtin_expect(read1 == read2, 1)) {
return absolute + read1;
}
}
}
uint64_t
continuoustime_to_absolutetime(uint64_t conttime) {
if (conttime <= mach_absolutetime_asleep)
return 0;
else
return conttime - mach_absolutetime_asleep;
}
uint64_t
absolutetime_to_continuoustime(uint64_t abstime) {
return abstime + mach_absolutetime_asleep;
}
#if CONFIG_DTRACE
void
clock_get_calendar_nanotime_nowait(
clock_sec_t *secs,
clock_nsec_t *nanosecs)
{
int i = 0;
uint64_t now;
struct unlocked_clock_calend stable;
struct bintime bt;
for (;;) {
stable = flipflop[i];
(void)hw_atomic_and(&stable.gen, ~(uint32_t)1);
if (flipflop[i].gen == stable.gen)
break;
i ^= 1;
}
now = mach_absolute_time();
bt = get_scaled_time(now);
bintime_add(&bt, &clock_calend.bintime);
bintime2nsclock(&bt, secs, nanosecs);
}
static void
clock_track_calend_nowait(void)
{
int i;
for (i = 0; i < 2; i++) {
struct clock_calend tmp = clock_calend;
(void)hw_atomic_or(&flipflop[i].gen, 1);
flipflop[i].calend = tmp;
(void)hw_atomic_add(&flipflop[i].gen, 1);
}
}
#endif