#include <mach/mach_types.h>
#include <kern/clock.h>
#include <kern/processor.h>
#include <kern/timer_call.h>
#include <kern/timer_queue.h>
#include <kern/call_entry.h>
#include <kern/thread.h>
#include <sys/kdebug.h>
#if CONFIG_DTRACE
#include <mach/sdt.h>
#endif
#if DEBUG
#define TIMER_ASSERT 1
#endif
#if TIMER_DBG
#define DBG(x...) kprintf("DBG: " x);
#else
#define DBG(x...)
#endif
#if TIMER_TRACE
#define TIMER_KDEBUG_TRACE KERNEL_DEBUG_CONSTANT_IST
#else
#define TIMER_KDEBUG_TRACE(x...)
#endif
lck_grp_t timer_call_lck_grp;
lck_attr_t timer_call_lck_attr;
lck_grp_attr_t timer_call_lck_grp_attr;
lck_grp_t timer_longterm_lck_grp;
lck_attr_t timer_longterm_lck_attr;
lck_grp_attr_t timer_longterm_lck_grp_attr;
#define timer_queue_lock_spin(queue) \
lck_mtx_lock_spin_always(&queue->lock_data)
#define timer_queue_unlock(queue) \
lck_mtx_unlock_always(&queue->lock_data)
#define QUEUE(x) ((queue_t)(x))
#define MPQUEUE(x) ((mpqueue_head_t *)(x))
#define TIMER_CALL(x) ((timer_call_t)(x))
#define TCE(x) (&(x->call_entry))
#define TIMER_LONGTERM_NONE EndOfAllTime
#if defined(__x86_64__)
#define TIMER_LONGTERM_THRESHOLD (1ULL * NSEC_PER_SEC)
#else
#define TIMER_LONGTERM_THRESHOLD TIMER_LONGTERM_NONE
#endif
typedef struct {
uint64_t interval;
uint64_t margin;
uint64_t deadline;
uint64_t preempted;
timer_call_t call;
uint64_t deadline_set;
timer_call_data_t timer;
uint64_t scans;
uint64_t preempts;
uint64_t latency;
uint64_t latency_min;
uint64_t latency_max;
} threshold_t;
typedef struct {
mpqueue_head_t queue;
uint64_t enqueues;
uint64_t dequeues;
uint64_t escalates;
uint64_t scan_time;
threshold_t threshold;
} timer_longterm_t;
timer_longterm_t timer_longterm;
static mpqueue_head_t *timer_longterm_queue = NULL;
static void timer_longterm_init(void);
static void timer_longterm_callout(
timer_call_param_t p0,
timer_call_param_t p1);
extern void timer_longterm_scan(
timer_longterm_t *tlp,
uint64_t now);
static void timer_longterm_update(
timer_longterm_t *tlp);
static void timer_longterm_update_locked(
timer_longterm_t *tlp);
static mpqueue_head_t * timer_longterm_enqueue_unlocked(
timer_call_t call,
uint64_t now,
uint64_t deadline,
mpqueue_head_t ** old_queue,
uint64_t soft_deadline,
uint64_t ttd,
timer_call_param_t param1,
uint32_t callout_flags);
static void timer_longterm_dequeued_locked(
timer_call_t call);
uint64_t past_deadline_timers;
uint64_t past_deadline_deltas;
uint64_t past_deadline_longest;
uint64_t past_deadline_shortest = ~0ULL;
enum {PAST_DEADLINE_TIMER_ADJUSTMENT_NS = 10 * 1000};
uint64_t past_deadline_timer_adjustment;
static boolean_t timer_call_enter_internal(timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint64_t leeway, uint32_t flags, boolean_t ratelimited);
boolean_t mach_timer_coalescing_enabled = TRUE;
mpqueue_head_t *timer_call_enqueue_deadline_unlocked(
timer_call_t call,
mpqueue_head_t *queue,
uint64_t deadline,
uint64_t soft_deadline,
uint64_t ttd,
timer_call_param_t param1,
uint32_t flags);
mpqueue_head_t *timer_call_dequeue_unlocked(
timer_call_t call);
timer_coalescing_priority_params_t tcoal_prio_params;
#if TCOAL_PRIO_STATS
int32_t nc_tcl, rt_tcl, bg_tcl, kt_tcl, fp_tcl, ts_tcl, qos_tcl;
#define TCOAL_PRIO_STAT(x) (x++)
#else
#define TCOAL_PRIO_STAT(x)
#endif
static void
timer_call_init_abstime(void)
{
int i;
uint64_t result;
timer_coalescing_priority_params_ns_t * tcoal_prio_params_init = timer_call_get_priority_params();
nanoseconds_to_absolutetime(PAST_DEADLINE_TIMER_ADJUSTMENT_NS, &past_deadline_timer_adjustment);
nanoseconds_to_absolutetime(tcoal_prio_params_init->idle_entry_timer_processing_hdeadline_threshold_ns, &result);
tcoal_prio_params.idle_entry_timer_processing_hdeadline_threshold_abstime = (uint32_t)result;
nanoseconds_to_absolutetime(tcoal_prio_params_init->interrupt_timer_coalescing_ilat_threshold_ns, &result);
tcoal_prio_params.interrupt_timer_coalescing_ilat_threshold_abstime = (uint32_t)result;
nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_resort_threshold_ns, &result);
tcoal_prio_params.timer_resort_threshold_abstime = (uint32_t)result;
tcoal_prio_params.timer_coalesce_rt_shift = tcoal_prio_params_init->timer_coalesce_rt_shift;
tcoal_prio_params.timer_coalesce_bg_shift = tcoal_prio_params_init->timer_coalesce_bg_shift;
tcoal_prio_params.timer_coalesce_kt_shift = tcoal_prio_params_init->timer_coalesce_kt_shift;
tcoal_prio_params.timer_coalesce_fp_shift = tcoal_prio_params_init->timer_coalesce_fp_shift;
tcoal_prio_params.timer_coalesce_ts_shift = tcoal_prio_params_init->timer_coalesce_ts_shift;
nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_rt_ns_max,
&tcoal_prio_params.timer_coalesce_rt_abstime_max);
nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_bg_ns_max,
&tcoal_prio_params.timer_coalesce_bg_abstime_max);
nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_kt_ns_max,
&tcoal_prio_params.timer_coalesce_kt_abstime_max);
nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_fp_ns_max,
&tcoal_prio_params.timer_coalesce_fp_abstime_max);
nanoseconds_to_absolutetime(tcoal_prio_params_init->timer_coalesce_ts_ns_max,
&tcoal_prio_params.timer_coalesce_ts_abstime_max);
for (i = 0; i < NUM_LATENCY_QOS_TIERS; i++) {
tcoal_prio_params.latency_qos_scale[i] = tcoal_prio_params_init->latency_qos_scale[i];
nanoseconds_to_absolutetime(tcoal_prio_params_init->latency_qos_ns_max[i],
&tcoal_prio_params.latency_qos_abstime_max[i]);
tcoal_prio_params.latency_tier_rate_limited[i] = tcoal_prio_params_init->latency_tier_rate_limited[i];
}
}
void
timer_call_init(void)
{
lck_attr_setdefault(&timer_call_lck_attr);
lck_grp_attr_setdefault(&timer_call_lck_grp_attr);
lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr);
timer_longterm_init();
timer_call_init_abstime();
}
void
timer_call_queue_init(mpqueue_head_t *queue)
{
DBG("timer_call_queue_init(%p)\n", queue);
mpqueue_init(queue, &timer_call_lck_grp, &timer_call_lck_attr);
}
void
timer_call_setup(
timer_call_t call,
timer_call_func_t func,
timer_call_param_t param0)
{
DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0);
call_entry_setup(TCE(call), func, param0);
simple_lock_init(&(call)->lock, 0);
call->async_dequeue = FALSE;
}
#if TIMER_ASSERT
static __inline__ mpqueue_head_t *
timer_call_entry_dequeue(
timer_call_t entry)
{
mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue);
if (!hw_lock_held((hw_lock_t)&entry->lock))
panic("_call_entry_dequeue() "
"entry %p is not locked\n", entry);
if (!hw_lock_held((hw_lock_t)&old_queue->lock_data))
panic("_call_entry_dequeue() "
"queue %p is not locked\n", old_queue);
call_entry_dequeue(TCE(entry));
old_queue->count--;
return (old_queue);
}
static __inline__ mpqueue_head_t *
timer_call_entry_enqueue_deadline(
timer_call_t entry,
mpqueue_head_t *queue,
uint64_t deadline)
{
mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue);
if (!hw_lock_held((hw_lock_t)&entry->lock))
panic("_call_entry_enqueue_deadline() "
"entry %p is not locked\n", entry);
if (!hw_lock_held((hw_lock_t)&queue->lock_data))
panic("_call_entry_enqueue_deadline() "
"queue %p is not locked\n", queue);
if (old_queue != NULL && old_queue != queue)
panic("_call_entry_enqueue_deadline() "
"old_queue %p != queue", old_queue);
call_entry_enqueue_deadline(TCE(entry), QUEUE(queue), deadline);
timer_call_t thead = (timer_call_t)queue_first(&queue->head);
queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline;
if (old_queue)
old_queue->count--;
queue->count++;
return (old_queue);
}
#else
static __inline__ mpqueue_head_t *
timer_call_entry_dequeue(
timer_call_t entry)
{
mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue);
call_entry_dequeue(TCE(entry));
old_queue->count--;
return old_queue;
}
static __inline__ mpqueue_head_t *
timer_call_entry_enqueue_deadline(
timer_call_t entry,
mpqueue_head_t *queue,
uint64_t deadline)
{
mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue);
call_entry_enqueue_deadline(TCE(entry), QUEUE(queue), deadline);
timer_call_t thead = (timer_call_t)queue_first(&queue->head);
queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline;
if (old_queue)
old_queue->count--;
queue->count++;
return old_queue;
}
#endif
static __inline__ void
timer_call_entry_enqueue_tail(
timer_call_t entry,
mpqueue_head_t *queue)
{
call_entry_enqueue_tail(TCE(entry), QUEUE(queue));
queue->count++;
return;
}
static __inline__ void
timer_call_entry_dequeue_async(
timer_call_t entry)
{
mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue);
if (old_queue) {
old_queue->count--;
(void) remque(qe(entry));
entry->async_dequeue = TRUE;
}
return;
}
#if TIMER_ASSERT
unsigned timer_call_enqueue_deadline_unlocked_async1;
unsigned timer_call_enqueue_deadline_unlocked_async2;
#endif
__inline__ mpqueue_head_t *
timer_call_enqueue_deadline_unlocked(
timer_call_t call,
mpqueue_head_t *queue,
uint64_t deadline,
uint64_t soft_deadline,
uint64_t ttd,
timer_call_param_t param1,
uint32_t callout_flags)
{
call_entry_t entry = TCE(call);
mpqueue_head_t *old_queue;
DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue);
simple_lock(&call->lock);
old_queue = MPQUEUE(entry->queue);
if (old_queue != NULL) {
timer_queue_lock_spin(old_queue);
if (call->async_dequeue) {
#if TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
call,
call->async_dequeue,
TCE(call)->queue,
0x1c, 0);
timer_call_enqueue_deadline_unlocked_async1++;
#endif
call->async_dequeue = FALSE;
entry->queue = NULL;
} else if (old_queue != queue) {
timer_call_entry_dequeue(call);
#if TIMER_ASSERT
timer_call_enqueue_deadline_unlocked_async2++;
#endif
}
if (old_queue == timer_longterm_queue)
timer_longterm_dequeued_locked(call);
if (old_queue != queue) {
timer_queue_unlock(old_queue);
timer_queue_lock_spin(queue);
}
} else {
timer_queue_lock_spin(queue);
}
call->soft_deadline = soft_deadline;
call->flags = callout_flags;
TCE(call)->param1 = param1;
call->ttd = ttd;
timer_call_entry_enqueue_deadline(call, queue, deadline);
timer_queue_unlock(queue);
simple_unlock(&call->lock);
return (old_queue);
}
#if TIMER_ASSERT
unsigned timer_call_dequeue_unlocked_async1;
unsigned timer_call_dequeue_unlocked_async2;
#endif
mpqueue_head_t *
timer_call_dequeue_unlocked(
timer_call_t call)
{
call_entry_t entry = TCE(call);
mpqueue_head_t *old_queue;
DBG("timer_call_dequeue_unlocked(%p)\n", call);
simple_lock(&call->lock);
old_queue = MPQUEUE(entry->queue);
#if TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
call,
call->async_dequeue,
TCE(call)->queue,
0, 0);
#endif
if (old_queue != NULL) {
timer_queue_lock_spin(old_queue);
if (call->async_dequeue) {
#if TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
call,
call->async_dequeue,
TCE(call)->queue,
0x1c, 0);
timer_call_dequeue_unlocked_async1++;
#endif
call->async_dequeue = FALSE;
entry->queue = NULL;
} else {
timer_call_entry_dequeue(call);
}
if (old_queue == timer_longterm_queue)
timer_longterm_dequeued_locked(call);
timer_queue_unlock(old_queue);
}
simple_unlock(&call->lock);
return (old_queue);
}
static boolean_t
timer_call_enter_internal(
timer_call_t call,
timer_call_param_t param1,
uint64_t deadline,
uint64_t leeway,
uint32_t flags,
boolean_t ratelimited)
{
mpqueue_head_t *queue = NULL;
mpqueue_head_t *old_queue;
spl_t s;
uint64_t slop;
uint32_t urgency;
uint64_t sdeadline, ttd;
s = splclock();
sdeadline = deadline;
uint64_t ctime = mach_absolute_time();
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ENTER | DBG_FUNC_START,
call,
param1, deadline, flags, 0);
urgency = (flags & TIMER_CALL_URGENCY_MASK);
boolean_t slop_ratelimited = FALSE;
slop = timer_call_slop(deadline, ctime, urgency, current_thread(), &slop_ratelimited);
if ((flags & TIMER_CALL_LEEWAY) != 0 && leeway > slop)
slop = leeway;
if (UINT64_MAX - deadline <= slop) {
deadline = UINT64_MAX;
} else {
deadline += slop;
}
if (__improbable(deadline < ctime)) {
uint64_t delta = (ctime - deadline);
past_deadline_timers++;
past_deadline_deltas += delta;
if (delta > past_deadline_longest)
past_deadline_longest = deadline;
if (delta < past_deadline_shortest)
past_deadline_shortest = delta;
deadline = ctime + past_deadline_timer_adjustment;
sdeadline = deadline;
}
if (ratelimited || slop_ratelimited) {
flags |= TIMER_CALL_RATELIMITED;
} else {
flags &= ~TIMER_CALL_RATELIMITED;
}
ttd = sdeadline - ctime;
#if CONFIG_DTRACE
DTRACE_TMR7(callout__create, timer_call_func_t, TCE(call)->func,
timer_call_param_t, TCE(call)->param0, uint32_t, flags,
(deadline - sdeadline),
(ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call);
#endif
if (!ratelimited && !slop_ratelimited) {
queue = timer_longterm_enqueue_unlocked(call, ctime, deadline, &old_queue, sdeadline, ttd, param1, flags);
}
if (queue == NULL) {
queue = timer_queue_assign(deadline);
old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline, sdeadline, ttd, param1, flags);
}
#if TIMER_TRACE
TCE(call)->entry_time = ctime;
#endif
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ENTER | DBG_FUNC_END,
call,
(old_queue != NULL), deadline, queue->count, 0);
splx(s);
return (old_queue != NULL);
}
boolean_t
timer_call_enter(
timer_call_t call,
uint64_t deadline,
uint32_t flags)
{
return timer_call_enter_internal(call, NULL, deadline, 0, flags, FALSE);
}
boolean_t
timer_call_enter1(
timer_call_t call,
timer_call_param_t param1,
uint64_t deadline,
uint32_t flags)
{
return timer_call_enter_internal(call, param1, deadline, 0, flags, FALSE);
}
boolean_t
timer_call_enter_with_leeway(
timer_call_t call,
timer_call_param_t param1,
uint64_t deadline,
uint64_t leeway,
uint32_t flags,
boolean_t ratelimited)
{
return timer_call_enter_internal(call, param1, deadline, leeway, flags, ratelimited);
}
boolean_t
timer_call_cancel(
timer_call_t call)
{
mpqueue_head_t *old_queue;
spl_t s;
s = splclock();
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_CANCEL | DBG_FUNC_START,
call,
TCE(call)->deadline, call->soft_deadline, call->flags, 0);
old_queue = timer_call_dequeue_unlocked(call);
if (old_queue != NULL) {
timer_queue_lock_spin(old_queue);
if (!queue_empty(&old_queue->head)) {
timer_queue_cancel(old_queue, TCE(call)->deadline, CE(queue_first(&old_queue->head))->deadline);
timer_call_t thead = (timer_call_t)queue_first(&old_queue->head);
old_queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline;
}
else {
timer_queue_cancel(old_queue, TCE(call)->deadline, UINT64_MAX);
old_queue->earliest_soft_deadline = UINT64_MAX;
}
timer_queue_unlock(old_queue);
}
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_CANCEL | DBG_FUNC_END,
call,
old_queue,
TCE(call)->deadline - mach_absolute_time(),
TCE(call)->deadline - TCE(call)->entry_time, 0);
splx(s);
#if CONFIG_DTRACE
DTRACE_TMR6(callout__cancel, timer_call_func_t, TCE(call)->func,
timer_call_param_t, TCE(call)->param0, uint32_t, call->flags, 0,
(call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF));
#endif
return (old_queue != NULL);
}
static uint32_t timer_queue_shutdown_lock_skips;
static uint32_t timer_queue_shutdown_discarded;
void
timer_queue_shutdown(
mpqueue_head_t *queue)
{
timer_call_t call;
mpqueue_head_t *new_queue;
spl_t s;
DBG("timer_queue_shutdown(%p)\n", queue);
s = splclock();
while (timer_queue_lock_spin(queue), !queue_empty(&queue->head)) {
call = TIMER_CALL(queue_first(&queue->head));
if (!simple_lock_try(&call->lock)) {
timer_queue_shutdown_lock_skips++;
timer_call_entry_dequeue_async(call);
#if TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
call,
call->async_dequeue,
TCE(call)->queue,
0x2b, 0);
#endif
timer_queue_unlock(queue);
continue;
}
boolean_t call_local = ((call->flags & TIMER_CALL_LOCAL) != 0);
timer_call_entry_dequeue(call);
timer_queue_unlock(queue);
if (call_local == FALSE) {
new_queue = timer_queue_assign(TCE(call)->deadline);
timer_queue_lock_spin(new_queue);
timer_call_entry_enqueue_deadline(
call, new_queue, TCE(call)->deadline);
timer_queue_unlock(new_queue);
} else {
timer_queue_shutdown_discarded++;
}
assert((call_local == FALSE) ||
(TCE(call)->func == thread_quantum_expire));
simple_unlock(&call->lock);
}
timer_queue_unlock(queue);
splx(s);
}
static uint32_t timer_queue_expire_lock_skips;
uint64_t
timer_queue_expire_with_options(
mpqueue_head_t *queue,
uint64_t deadline,
boolean_t rescan)
{
timer_call_t call = NULL;
uint32_t tc_iterations = 0;
DBG("timer_queue_expire(%p,)\n", queue);
uint64_t cur_deadline = deadline;
timer_queue_lock_spin(queue);
while (!queue_empty(&queue->head)) {
if (++tc_iterations > 1)
cur_deadline = mach_absolute_time();
if (call == NULL)
call = TIMER_CALL(queue_first(&queue->head));
if (call->soft_deadline <= cur_deadline) {
timer_call_func_t func;
timer_call_param_t param0, param1;
TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->soft_deadline, 0, 0, 0);
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_EXPIRE | DBG_FUNC_NONE,
call,
call->soft_deadline,
TCE(call)->deadline,
TCE(call)->entry_time, 0);
if ((call->flags & TIMER_CALL_RATELIMITED) &&
(TCE(call)->deadline > cur_deadline)) {
if (rescan == FALSE)
break;
}
if (!simple_lock_try(&call->lock)) {
timer_queue_expire_lock_skips++;
timer_call_entry_dequeue_async(call);
call = NULL;
continue;
}
timer_call_entry_dequeue(call);
func = TCE(call)->func;
param0 = TCE(call)->param0;
param1 = TCE(call)->param1;
simple_unlock(&call->lock);
timer_queue_unlock(queue);
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_CALLOUT | DBG_FUNC_START,
call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);
#if CONFIG_DTRACE
DTRACE_TMR7(callout__start, timer_call_func_t, func,
timer_call_param_t, param0, unsigned, call->flags,
0, (call->ttd >> 32),
(unsigned) (call->ttd & 0xFFFFFFFF), call);
#endif
uint64_t *ttdp = &(PROCESSOR_DATA(current_processor(), timer_call_ttd));
*ttdp = call->ttd;
(*func)(param0, param1);
*ttdp = 0;
#if CONFIG_DTRACE
DTRACE_TMR4(callout__end, timer_call_func_t, func,
param0, param1, call);
#endif
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_CALLOUT | DBG_FUNC_END,
call, VM_KERNEL_UNSLIDE(func), param0, param1, 0);
call = NULL;
timer_queue_lock_spin(queue);
} else {
if (__probable(rescan == FALSE)) {
break;
} else {
int64_t skew = TCE(call)->deadline - call->soft_deadline;
assert(TCE(call)->deadline >= call->soft_deadline);
if (timer_resort_threshold(skew)) {
if (__probable(simple_lock_try(&call->lock))) {
timer_call_entry_dequeue(call);
timer_call_entry_enqueue_deadline(call, queue, call->soft_deadline);
simple_unlock(&call->lock);
call = NULL;
}
}
if (call) {
call = TIMER_CALL(queue_next(qe(call)));
if (queue_end(&queue->head, qe(call)))
break;
}
}
}
}
if (!queue_empty(&queue->head)) {
call = TIMER_CALL(queue_first(&queue->head));
cur_deadline = TCE(call)->deadline;
queue->earliest_soft_deadline = (call->flags & TIMER_CALL_RATELIMITED) ? TCE(call)->deadline: call->soft_deadline;
} else {
queue->earliest_soft_deadline = cur_deadline = UINT64_MAX;
}
timer_queue_unlock(queue);
return (cur_deadline);
}
uint64_t
timer_queue_expire(
mpqueue_head_t *queue,
uint64_t deadline)
{
return timer_queue_expire_with_options(queue, deadline, FALSE);
}
extern int serverperfmode;
static uint32_t timer_queue_migrate_lock_skips;
int
timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to)
{
timer_call_t call;
timer_call_t head_to;
int timers_migrated = 0;
DBG("timer_queue_migrate(%p,%p)\n", queue_from, queue_to);
assert(!ml_get_interrupts_enabled());
assert(queue_from != queue_to);
if (serverperfmode) {
return -4;
}
timer_queue_lock_spin(queue_to);
head_to = TIMER_CALL(queue_first(&queue_to->head));
if (queue_empty(&queue_to->head)) {
timers_migrated = -1;
goto abort1;
}
timer_queue_lock_spin(queue_from);
if (queue_empty(&queue_from->head)) {
timers_migrated = -2;
goto abort2;
}
call = TIMER_CALL(queue_first(&queue_from->head));
if (TCE(call)->deadline < TCE(head_to)->deadline) {
timers_migrated = 0;
goto abort2;
}
do {
if (call->flags & TIMER_CALL_LOCAL) {
timers_migrated = -3;
goto abort2;
}
call = TIMER_CALL(queue_next(qe(call)));
} while (!queue_end(&queue_from->head, qe(call)));
while (!queue_empty(&queue_from->head)) {
call = TIMER_CALL(queue_first(&queue_from->head));
if (!simple_lock_try(&call->lock)) {
#ifdef TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
call,
TCE(call)->queue,
call->lock.interlock.lock_data,
0x2b, 0);
#endif
timer_queue_migrate_lock_skips++;
timer_call_entry_dequeue_async(call);
continue;
}
timer_call_entry_dequeue(call);
timer_call_entry_enqueue_deadline(
call, queue_to, TCE(call)->deadline);
timers_migrated++;
simple_unlock(&call->lock);
}
queue_from->earliest_soft_deadline = UINT64_MAX;
abort2:
timer_queue_unlock(queue_from);
abort1:
timer_queue_unlock(queue_to);
return timers_migrated;
}
void
timer_queue_trace_cpu(int ncpu)
{
timer_call_nosync_cpu(
ncpu,
(void(*)())timer_queue_trace,
(void*) timer_queue_cpu(ncpu));
}
void
timer_queue_trace(
mpqueue_head_t *queue)
{
timer_call_t call;
spl_t s;
if (!kdebug_enable)
return;
s = splclock();
timer_queue_lock_spin(queue);
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_QUEUE | DBG_FUNC_START,
queue->count, mach_absolute_time(), 0, 0, 0);
if (!queue_empty(&queue->head)) {
call = TIMER_CALL(queue_first(&queue->head));
do {
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_QUEUE | DBG_FUNC_NONE,
call->soft_deadline,
TCE(call)->deadline,
TCE(call)->entry_time,
TCE(call)->func,
0);
call = TIMER_CALL(queue_next(qe(call)));
} while (!queue_end(&queue->head, qe(call)));
}
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_QUEUE | DBG_FUNC_END,
queue->count, mach_absolute_time(), 0, 0, 0);
timer_queue_unlock(queue);
splx(s);
}
void
timer_longterm_dequeued_locked(timer_call_t call)
{
timer_longterm_t *tlp = &timer_longterm;
tlp->dequeues++;
if (call == tlp->threshold.call)
tlp->threshold.call = NULL;
}
mpqueue_head_t *
timer_longterm_enqueue_unlocked(timer_call_t call,
uint64_t now,
uint64_t deadline,
mpqueue_head_t **old_queue,
uint64_t soft_deadline,
uint64_t ttd,
timer_call_param_t param1,
uint32_t callout_flags)
{
timer_longterm_t *tlp = &timer_longterm;
boolean_t update_required = FALSE;
uint64_t longterm_threshold;
longterm_threshold = now + tlp->threshold.interval;
if ((callout_flags & TIMER_CALL_LOCAL) != 0 ||
(tlp->threshold.interval == TIMER_LONGTERM_NONE) ||
(deadline <= longterm_threshold))
return NULL;
*old_queue = timer_call_dequeue_unlocked(call);
assert(!ml_get_interrupts_enabled());
simple_lock(&call->lock);
timer_queue_lock_spin(timer_longterm_queue);
TCE(call)->deadline = deadline;
TCE(call)->param1 = param1;
call->ttd = ttd;
call->soft_deadline = soft_deadline;
call->flags = callout_flags;
timer_call_entry_enqueue_tail(call, timer_longterm_queue);
tlp->enqueues++;
if (deadline < tlp->threshold.deadline &&
deadline < tlp->threshold.preempted) {
tlp->threshold.preempted = deadline;
tlp->threshold.call = call;
update_required = TRUE;
}
timer_queue_unlock(timer_longterm_queue);
simple_unlock(&call->lock);
if (update_required) {
timer_call_nosync_cpu(
master_cpu,
(void (*)(void *)) timer_longterm_update,
(void *)tlp);
}
return timer_longterm_queue;
}
void
timer_longterm_scan(timer_longterm_t *tlp,
uint64_t now)
{
queue_entry_t qe;
timer_call_t call;
uint64_t threshold;
uint64_t deadline;
mpqueue_head_t *timer_master_queue;
assert(!ml_get_interrupts_enabled());
assert(cpu_number() == master_cpu);
if (tlp->threshold.interval != TIMER_LONGTERM_NONE)
threshold = now + tlp->threshold.interval;
else
threshold = TIMER_LONGTERM_NONE;
tlp->threshold.deadline = TIMER_LONGTERM_NONE;
tlp->threshold.call = NULL;
if (queue_empty(&timer_longterm_queue->head))
return;
timer_master_queue = timer_queue_cpu(master_cpu);
timer_queue_lock_spin(timer_master_queue);
qe = queue_first(&timer_longterm_queue->head);
while (!queue_end(&timer_longterm_queue->head, qe)) {
call = TIMER_CALL(qe);
deadline = call->soft_deadline;
qe = queue_next(qe);
if (!simple_lock_try(&call->lock)) {
#ifdef TIMER_ASSERT
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE,
call,
TCE(call)->queue,
call->lock.interlock.lock_data,
0x2c, 0);
#endif
timer_call_entry_dequeue_async(call);
continue;
}
if (deadline < threshold) {
#ifdef TIMER_ASSERT
if (deadline < now)
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_OVERDUE | DBG_FUNC_NONE,
call,
deadline,
now,
threshold,
0);
#endif
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_ESCALATE | DBG_FUNC_NONE,
call,
TCE(call)->deadline,
TCE(call)->entry_time,
TCE(call)->func,
0);
tlp->escalates++;
timer_call_entry_dequeue(call);
timer_call_entry_enqueue_deadline(
call, timer_master_queue, TCE(call)->deadline);
(void) timer_queue_assign(deadline);
} else {
if (deadline < tlp->threshold.deadline) {
tlp->threshold.deadline = deadline;
tlp->threshold.call = call;
}
}
simple_unlock(&call->lock);
}
timer_queue_unlock(timer_master_queue);
}
void
timer_longterm_callout(timer_call_param_t p0, __unused timer_call_param_t p1)
{
timer_longterm_t *tlp = (timer_longterm_t *) p0;
timer_longterm_update(tlp);
}
void
timer_longterm_update_locked(timer_longterm_t *tlp)
{
uint64_t latency;
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_UPDATE | DBG_FUNC_START,
&tlp->queue,
tlp->threshold.deadline,
tlp->threshold.preempted,
tlp->queue.count, 0);
tlp->scan_time = mach_absolute_time();
if (tlp->threshold.preempted != TIMER_LONGTERM_NONE) {
tlp->threshold.preempts++;
tlp->threshold.deadline = tlp->threshold.preempted;
tlp->threshold.preempted = TIMER_LONGTERM_NONE;
} else {
tlp->threshold.scans++;
if (tlp->scan_time > tlp->threshold.deadline_set)
latency = tlp->scan_time - tlp->threshold.deadline_set;
else
latency = 0;
if (latency < tlp->threshold.interval) {
tlp->threshold.latency_min =
MIN(tlp->threshold.latency_min, latency);
tlp->threshold.latency_max =
MAX(tlp->threshold.latency_max, latency);
tlp->threshold.latency =
(tlp->threshold.latency*99 + latency) / 100;
}
timer_longterm_scan(tlp, tlp->scan_time);
}
tlp->threshold.deadline_set = tlp->threshold.deadline;
if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) {
tlp->threshold.deadline_set -= tlp->threshold.margin;
tlp->threshold.deadline_set -= tlp->threshold.latency;
}
TIMER_KDEBUG_TRACE(KDEBUG_TRACE,
DECR_TIMER_UPDATE | DBG_FUNC_END,
&tlp->queue,
tlp->threshold.deadline,
tlp->threshold.scans,
tlp->queue.count, 0);
}
void
timer_longterm_update(timer_longterm_t *tlp)
{
spl_t s = splclock();
timer_queue_lock_spin(timer_longterm_queue);
if (cpu_number() != master_cpu)
panic("timer_longterm_update_master() on non-boot cpu");
timer_longterm_update_locked(tlp);
if (tlp->threshold.deadline != TIMER_LONGTERM_NONE)
timer_call_enter(
&tlp->threshold.timer,
tlp->threshold.deadline_set,
TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL);
timer_queue_unlock(timer_longterm_queue);
splx(s);
}
void
timer_longterm_init(void)
{
uint32_t longterm;
timer_longterm_t *tlp = &timer_longterm;
DBG("timer_longterm_init() tlp: %p, queue: %p\n", tlp, &tlp->queue);
tlp->threshold.interval = serverperfmode ? TIMER_LONGTERM_NONE
: TIMER_LONGTERM_THRESHOLD;
if (PE_parse_boot_argn("longterm", &longterm, sizeof (longterm))) {
tlp->threshold.interval = (longterm == 0) ?
TIMER_LONGTERM_NONE :
longterm * NSEC_PER_MSEC;
}
if (tlp->threshold.interval != TIMER_LONGTERM_NONE) {
printf("Longterm timer threshold: %llu ms\n",
tlp->threshold.interval / NSEC_PER_MSEC);
kprintf("Longterm timer threshold: %llu ms\n",
tlp->threshold.interval / NSEC_PER_MSEC);
nanoseconds_to_absolutetime(tlp->threshold.interval,
&tlp->threshold.interval);
tlp->threshold.margin = tlp->threshold.interval / 10;
tlp->threshold.latency_min = EndOfAllTime;
tlp->threshold.latency_max = 0;
}
tlp->threshold.preempted = TIMER_LONGTERM_NONE;
tlp->threshold.deadline = TIMER_LONGTERM_NONE;
lck_attr_setdefault(&timer_longterm_lck_attr);
lck_grp_attr_setdefault(&timer_longterm_lck_grp_attr);
lck_grp_init(&timer_longterm_lck_grp,
"timer_longterm", &timer_longterm_lck_grp_attr);
mpqueue_init(&tlp->queue,
&timer_longterm_lck_grp, &timer_longterm_lck_attr);
timer_call_setup(&tlp->threshold.timer,
timer_longterm_callout, (timer_call_param_t) tlp);
timer_longterm_queue = &tlp->queue;
}
enum {
THRESHOLD, QCOUNT,
ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
LATENCY, LATENCY_MIN, LATENCY_MAX
};
uint64_t
timer_sysctl_get(int oid)
{
timer_longterm_t *tlp = &timer_longterm;
switch (oid) {
case THRESHOLD:
return (tlp->threshold.interval == TIMER_LONGTERM_NONE) ?
0 : tlp->threshold.interval / NSEC_PER_MSEC;
case QCOUNT:
return tlp->queue.count;
case ENQUEUES:
return tlp->enqueues;
case DEQUEUES:
return tlp->dequeues;
case ESCALATES:
return tlp->escalates;
case SCANS:
return tlp->threshold.scans;
case PREEMPTS:
return tlp->threshold.preempts;
case LATENCY:
return tlp->threshold.latency;
case LATENCY_MIN:
return tlp->threshold.latency_min;
case LATENCY_MAX:
return tlp->threshold.latency_max;
default:
return 0;
}
}
static void
timer_master_scan(timer_longterm_t *tlp,
uint64_t now)
{
queue_entry_t qe;
timer_call_t call;
uint64_t threshold;
uint64_t deadline;
mpqueue_head_t *timer_master_queue;
if (tlp->threshold.interval != TIMER_LONGTERM_NONE)
threshold = now + tlp->threshold.interval;
else
threshold = TIMER_LONGTERM_NONE;
timer_master_queue = timer_queue_cpu(master_cpu);
timer_queue_lock_spin(timer_master_queue);
qe = queue_first(&timer_master_queue->head);
while (!queue_end(&timer_master_queue->head, qe)) {
call = TIMER_CALL(qe);
deadline = TCE(call)->deadline;
qe = queue_next(qe);
if ((call->flags & TIMER_CALL_LOCAL) != 0)
continue;
if (!simple_lock_try(&call->lock)) {
timer_call_entry_dequeue_async(call);
continue;
}
if (deadline > threshold) {
timer_call_entry_dequeue(call);
timer_call_entry_enqueue_tail(call, timer_longterm_queue);
if (deadline < tlp->threshold.deadline) {
tlp->threshold.deadline = deadline;
tlp->threshold.call = call;
}
}
simple_unlock(&call->lock);
}
timer_queue_unlock(timer_master_queue);
}
static void
timer_sysctl_set_threshold(uint64_t value)
{
timer_longterm_t *tlp = &timer_longterm;
spl_t s = splclock();
boolean_t threshold_increase;
timer_queue_lock_spin(timer_longterm_queue);
timer_call_cancel(&tlp->threshold.timer);
if (value == 0) {
tlp->threshold.interval = TIMER_LONGTERM_NONE;
threshold_increase = TRUE;
timer_call_cancel(&tlp->threshold.timer);
} else {
uint64_t old_interval = tlp->threshold.interval;
tlp->threshold.interval = value * NSEC_PER_MSEC;
nanoseconds_to_absolutetime(tlp->threshold.interval,
&tlp->threshold.interval);
tlp->threshold.margin = tlp->threshold.interval / 10;
if (old_interval == TIMER_LONGTERM_NONE)
threshold_increase = FALSE;
else
threshold_increase = (tlp->threshold.interval > old_interval);
}
if (threshold_increase ) {
timer_longterm_scan(tlp, mach_absolute_time());
} else {
timer_master_scan(tlp, mach_absolute_time());
}
tlp->threshold.deadline_set = tlp->threshold.deadline;
if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) {
tlp->threshold.deadline_set -= tlp->threshold.margin;
tlp->threshold.deadline_set -= tlp->threshold.latency;
timer_call_enter(
&tlp->threshold.timer,
tlp->threshold.deadline_set,
TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL);
}
tlp->enqueues = 0;
tlp->dequeues = 0;
tlp->escalates = 0;
tlp->threshold.scans = 0;
tlp->threshold.preempts = 0;
tlp->threshold.latency = 0;
tlp->threshold.latency_min = EndOfAllTime;
tlp->threshold.latency_max = 0;
timer_queue_unlock(timer_longterm_queue);
splx(s);
}
int
timer_sysctl_set(int oid, uint64_t value)
{
switch (oid) {
case THRESHOLD:
timer_call_cpu(
master_cpu,
(void (*)(void *)) timer_sysctl_set_threshold,
(void *) value);
return KERN_SUCCESS;
default:
return KERN_INVALID_ARGUMENT;
}
}
static boolean_t tcoal_qos_adjust(thread_t t, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited) {
uint32_t latency_qos;
boolean_t adjusted = FALSE;
task_t ctask = t->task;
if (ctask) {
latency_qos = proc_get_effective_thread_policy(t, TASK_POLICY_LATENCY_QOS);
assert(latency_qos <= NUM_LATENCY_QOS_TIERS);
if (latency_qos) {
*tshift = tcoal_prio_params.latency_qos_scale[latency_qos - 1];
*tmax_abstime = tcoal_prio_params.latency_qos_abstime_max[latency_qos - 1];
*pratelimited = tcoal_prio_params.latency_tier_rate_limited[latency_qos - 1];
adjusted = TRUE;
}
}
return adjusted;
}
static void
timer_compute_leeway(thread_t cthread, int32_t urgency, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited) {
int16_t tpri = cthread->sched_pri;
if ((urgency & TIMER_CALL_USER_MASK) != 0) {
if (tpri >= BASEPRI_RTQUEUES ||
urgency == TIMER_CALL_USER_CRITICAL) {
*tshift = tcoal_prio_params.timer_coalesce_rt_shift;
*tmax_abstime = tcoal_prio_params.timer_coalesce_rt_abstime_max;
TCOAL_PRIO_STAT(rt_tcl);
} else if (proc_get_effective_thread_policy(cthread, TASK_POLICY_DARWIN_BG) ||
(urgency == TIMER_CALL_USER_BACKGROUND)) {
if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) {
if (*tmax_abstime > tcoal_prio_params.timer_coalesce_bg_abstime_max) {
return;
} else {
*pratelimited = FALSE;
}
}
*tshift = tcoal_prio_params.timer_coalesce_bg_shift;
*tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max;
TCOAL_PRIO_STAT(bg_tcl);
} else if (tpri >= MINPRI_KERNEL) {
*tshift = tcoal_prio_params.timer_coalesce_kt_shift;
*tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max;
TCOAL_PRIO_STAT(kt_tcl);
} else if (cthread->sched_mode == TH_MODE_FIXED) {
*tshift = tcoal_prio_params.timer_coalesce_fp_shift;
*tmax_abstime = tcoal_prio_params.timer_coalesce_fp_abstime_max;
TCOAL_PRIO_STAT(fp_tcl);
} else if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) {
TCOAL_PRIO_STAT(qos_tcl);
} else if (cthread->sched_mode == TH_MODE_TIMESHARE) {
*tshift = tcoal_prio_params.timer_coalesce_ts_shift;
*tmax_abstime = tcoal_prio_params.timer_coalesce_ts_abstime_max;
TCOAL_PRIO_STAT(ts_tcl);
} else {
TCOAL_PRIO_STAT(nc_tcl);
}
} else if (urgency == TIMER_CALL_SYS_BACKGROUND) {
*tshift = tcoal_prio_params.timer_coalesce_bg_shift;
*tmax_abstime = tcoal_prio_params.timer_coalesce_bg_abstime_max;
TCOAL_PRIO_STAT(bg_tcl);
} else {
*tshift = tcoal_prio_params.timer_coalesce_kt_shift;
*tmax_abstime = tcoal_prio_params.timer_coalesce_kt_abstime_max;
TCOAL_PRIO_STAT(kt_tcl);
}
}
int timer_user_idle_level;
uint64_t
timer_call_slop(uint64_t deadline, uint64_t now, uint32_t flags, thread_t cthread, boolean_t *pratelimited)
{
int32_t tcs_shift = 0;
uint64_t tcs_max_abstime = 0;
uint64_t adjval;
uint32_t urgency = (flags & TIMER_CALL_URGENCY_MASK);
if (mach_timer_coalescing_enabled &&
(deadline > now) && (urgency != TIMER_CALL_SYS_CRITICAL)) {
timer_compute_leeway(cthread, urgency, &tcs_shift, &tcs_max_abstime, pratelimited);
if (tcs_shift >= 0)
adjval = MIN((deadline - now) >> tcs_shift, tcs_max_abstime);
else
adjval = MIN((deadline - now) << (-tcs_shift), tcs_max_abstime);
adjval += (adjval * timer_user_idle_level) >> 7;
return adjval;
} else {
return 0;
}
}
int
timer_get_user_idle_level(void) {
return timer_user_idle_level;
}
kern_return_t timer_set_user_idle_level(int ilevel) {
boolean_t do_reeval = FALSE;
if ((ilevel < 0) || (ilevel > 128))
return KERN_INVALID_ARGUMENT;
if (ilevel < timer_user_idle_level) {
do_reeval = TRUE;
}
timer_user_idle_level = ilevel;
if (do_reeval)
ml_timer_evaluate();
return KERN_SUCCESS;
}