#include <mach/mach_types.h>
#include <kern/clock.h>
#include <kern/processor.h>
#include <kern/etimer.h>
#include <kern/timer_call.h>
#include <kern/timer_queue.h>
#include <kern/call_entry.h>
#include <sys/kdebug.h>
#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
#include <mach/sdt.h>
#endif
#if DEBUG
#define TIMER_ASSERT 1
#endif
#if TIMER_DBG
#define DBG(x...) kprintf("DBG: " x);
#else
#define DBG(x...)
#endif
lck_grp_t timer_call_lck_grp;
lck_attr_t timer_call_lck_attr;
lck_grp_attr_t timer_call_lck_grp_attr;
#define timer_call_lock_spin(queue) \
lck_mtx_lock_spin_always(&queue->lock_data)
#define timer_call_unlock(queue) \
lck_mtx_unlock_always(&queue->lock_data)
#define QUEUE(x) ((queue_t)(x))
#define MPQUEUE(x) ((mpqueue_head_t *)(x))
#define TIMER_CALL(x) ((timer_call_t)(x))
uint64_t past_deadline_timers;
uint64_t past_deadline_deltas;
uint64_t past_deadline_longest;
uint64_t past_deadline_shortest = ~0ULL;
enum {PAST_DEADLINE_TIMER_ADJUSTMENT_NS = 10 * 1000};
uint64_t past_deadline_timer_adjustment;
static boolean_t timer_call_enter_internal(timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint32_t flags);
boolean_t mach_timer_coalescing_enabled = TRUE;
mpqueue_head_t *timer_call_enqueue_deadline_unlocked(
timer_call_t call,
mpqueue_head_t *queue,
uint64_t deadline);
mpqueue_head_t *timer_call_dequeue_unlocked(
timer_call_t call);
void
timer_call_initialize(void)
{
lck_attr_setdefault(&timer_call_lck_attr);
lck_grp_attr_setdefault(&timer_call_lck_grp_attr);
lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr);
nanotime_to_absolutetime(0, PAST_DEADLINE_TIMER_ADJUSTMENT_NS, &past_deadline_timer_adjustment);
}
void
timer_call_initialize_queue(mpqueue_head_t *queue)
{
DBG("timer_call_initialize_queue(%p)\n", queue);
mpqueue_init(queue, &timer_call_lck_grp, &timer_call_lck_attr);
}
void
timer_call_setup(
timer_call_t call,
timer_call_func_t func,
timer_call_param_t param0)
{
DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0);
call_entry_setup(CE(call), func, param0);
simple_lock_init(&(call)->lock, 0);
call->async_dequeue = FALSE;
}
#if TIMER_ASSERT
static __inline__ mpqueue_head_t *
timer_call_entry_dequeue(
timer_call_t entry)
{
mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue);
if (!hw_lock_held((hw_lock_t)&entry->lock))
panic("_call_entry_dequeue() "
"entry %p is not locked\n", entry);
if (!hw_lock_held((hw_lock_t)&old_queue->lock_data))
panic("_call_entry_dequeue() "
"queue %p is not locked\n", old_queue);
call_entry_dequeue(CE(entry));
return (old_queue);
}
static __inline__ mpqueue_head_t *
timer_call_entry_enqueue_deadline(
timer_call_t entry,
mpqueue_head_t *queue,
uint64_t deadline)
{
mpqueue_head_t *old_queue = MPQUEUE(CE(entry)->queue);
if (!hw_lock_held((hw_lock_t)&entry->lock))
panic("_call_entry_enqueue_deadline() "
"entry %p is not locked\n", entry);
if (!hw_lock_held((hw_lock_t)&queue->lock_data))
panic("_call_entry_enqueue_deadline() "
"queue %p is not locked\n", queue);
if (old_queue != NULL && old_queue != queue)
panic("_call_entry_enqueue_deadline() "
"old_queue %p != queue", old_queue);
call_entry_enqueue_deadline(CE(entry), QUEUE(queue), deadline);
return (old_queue);
}
#else
static __inline__ mpqueue_head_t *
timer_call_entry_dequeue(
timer_call_t entry)
{
return MPQUEUE(call_entry_dequeue(CE(entry)));
}
static __inline__ mpqueue_head_t *
timer_call_entry_enqueue_deadline(
timer_call_t entry,
mpqueue_head_t *queue,
uint64_t deadline)
{
return MPQUEUE(call_entry_enqueue_deadline(CE(entry),
QUEUE(queue), deadline));
}
#endif
#if TIMER_ASSERT
unsigned timer_call_enqueue_deadline_unlocked_async1;
unsigned timer_call_enqueue_deadline_unlocked_async2;
#endif
__inline__ mpqueue_head_t *
timer_call_enqueue_deadline_unlocked(
timer_call_t call,
mpqueue_head_t *queue,
uint64_t deadline)
{
call_entry_t entry = CE(call);
mpqueue_head_t *old_queue;
DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue);
simple_lock(&call->lock);
old_queue = MPQUEUE(entry->queue);
if (old_queue != NULL) {
timer_call_lock_spin(old_queue);
if (call->async_dequeue) {
call->async_dequeue = FALSE;
entry->queue = NULL;
#if TIMER_ASSERT
timer_call_enqueue_deadline_unlocked_async1++;
#endif
} else if (old_queue != queue) {
(void)remque(qe(entry));
entry->queue = NULL;
#if TIMER_ASSERT
timer_call_enqueue_deadline_unlocked_async2++;
#endif
}
if (old_queue != queue) {
timer_call_unlock(old_queue);
timer_call_lock_spin(queue);
}
} else {
timer_call_lock_spin(queue);
}
timer_call_entry_enqueue_deadline(call, queue, deadline);
timer_call_unlock(queue);
simple_unlock(&call->lock);
return (old_queue);
}
#if TIMER_ASSERT
unsigned timer_call_dequeue_unlocked_async1;
unsigned timer_call_dequeue_unlocked_async2;
#endif
mpqueue_head_t *
timer_call_dequeue_unlocked(
timer_call_t call)
{
call_entry_t entry = CE(call);
mpqueue_head_t *old_queue;
DBG("timer_call_dequeue_unlocked(%p)\n", call);
simple_lock(&call->lock);
old_queue = MPQUEUE(entry->queue);
if (old_queue != NULL) {
timer_call_lock_spin(old_queue);
if (call->async_dequeue) {
call->async_dequeue = FALSE;
#if TIMER_ASSERT
timer_call_dequeue_unlocked_async1++;
#endif
} else {
(void)remque(qe(entry));
#if TIMER_ASSERT
timer_call_dequeue_unlocked_async2++;
#endif
}
entry->queue = NULL;
timer_call_unlock(old_queue);
}
simple_unlock(&call->lock);
return (old_queue);
}
static boolean_t
timer_call_enter_internal(
timer_call_t call,
timer_call_param_t param1,
uint64_t deadline,
uint32_t flags)
{
mpqueue_head_t *queue;
mpqueue_head_t *old_queue;
spl_t s;
uint64_t slop = 0;
s = splclock();
call->soft_deadline = deadline;
call->flags = flags;
if ((flags & TIMER_CALL_CRITICAL) == 0 &&
mach_timer_coalescing_enabled) {
slop = timer_call_slop(deadline);
deadline += slop;
}
#if defined(__i386__) || defined(__x86_64__)
uint64_t ctime = mach_absolute_time();
if (__improbable(deadline < ctime)) {
uint64_t delta = (ctime - deadline);
past_deadline_timers++;
past_deadline_deltas += delta;
if (delta > past_deadline_longest)
past_deadline_longest = deadline;
if (delta < past_deadline_shortest)
past_deadline_shortest = delta;
deadline = ctime + past_deadline_timer_adjustment;
call->soft_deadline = deadline;
}
#endif
queue = timer_queue_assign(deadline);
old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline);
CE(call)->param1 = param1;
splx(s);
return (old_queue != NULL);
}
boolean_t
timer_call_enter(
timer_call_t call,
uint64_t deadline,
uint32_t flags)
{
return timer_call_enter_internal(call, NULL, deadline, flags);
}
boolean_t
timer_call_enter1(
timer_call_t call,
timer_call_param_t param1,
uint64_t deadline,
uint32_t flags)
{
return timer_call_enter_internal(call, param1, deadline, flags);
}
boolean_t
timer_call_cancel(
timer_call_t call)
{
mpqueue_head_t *old_queue;
spl_t s;
s = splclock();
old_queue = timer_call_dequeue_unlocked(call);
if (old_queue != NULL) {
timer_call_lock_spin(old_queue);
if (!queue_empty(&old_queue->head))
timer_queue_cancel(old_queue, CE(call)->deadline, CE(queue_first(&old_queue->head))->deadline);
else
timer_queue_cancel(old_queue, CE(call)->deadline, UINT64_MAX);
timer_call_unlock(old_queue);
}
splx(s);
return (old_queue != NULL);
}
uint32_t timer_queue_shutdown_lock_skips;
void
timer_queue_shutdown(
mpqueue_head_t *queue)
{
timer_call_t call;
mpqueue_head_t *new_queue;
spl_t s;
DBG("timer_queue_shutdown(%p)\n", queue);
s = splclock();
while (timer_call_lock_spin(queue), !queue_empty(&queue->head)) {
call = TIMER_CALL(queue_first(&queue->head));
if (!simple_lock_try(&call->lock)) {
timer_queue_shutdown_lock_skips++;
(void) remque(qe(call));
call->async_dequeue = TRUE;
timer_call_unlock(queue);
continue;
}
timer_call_entry_dequeue(call);
timer_call_unlock(queue);
new_queue = timer_queue_assign(CE(call)->deadline);
timer_call_lock_spin(new_queue);
timer_call_entry_enqueue_deadline(
call, new_queue, CE(call)->deadline);
timer_call_unlock(new_queue);
simple_unlock(&call->lock);
}
timer_call_unlock(queue);
splx(s);
}
uint32_t timer_queue_expire_lock_skips;
uint64_t
timer_queue_expire(
mpqueue_head_t *queue,
uint64_t deadline)
{
timer_call_t call;
DBG("timer_queue_expire(%p,)\n", queue);
timer_call_lock_spin(queue);
while (!queue_empty(&queue->head)) {
call = TIMER_CALL(queue_first(&queue->head));
if (call->soft_deadline <= deadline) {
timer_call_func_t func;
timer_call_param_t param0, param1;
if (!simple_lock_try(&call->lock)) {
timer_queue_expire_lock_skips++;
(void) remque(qe(call));
call->async_dequeue = TRUE;
continue;
}
timer_call_entry_dequeue(call);
func = CE(call)->func;
param0 = CE(call)->param0;
param1 = CE(call)->param1;
simple_unlock(&call->lock);
timer_call_unlock(queue);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
DECR_TIMER_CALLOUT | DBG_FUNC_START,
VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);
#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
DTRACE_TMR3(callout__start, timer_call_func_t, func,
timer_call_param_t, param0,
timer_call_param_t, param1);
#endif
(*func)(param0, param1);
#if CONFIG_DTRACE && (DEVELOPMENT || DEBUG )
DTRACE_TMR3(callout__end, timer_call_func_t, func,
timer_call_param_t, param0,
timer_call_param_t, param1);
#endif
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
DECR_TIMER_CALLOUT | DBG_FUNC_END,
VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0);
timer_call_lock_spin(queue);
}
else
break;
}
if (!queue_empty(&queue->head))
deadline = CE(call)->deadline;
else
deadline = UINT64_MAX;
timer_call_unlock(queue);
return (deadline);
}
extern int serverperfmode;
uint32_t timer_queue_migrate_lock_skips;
int
timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to)
{
timer_call_t call;
timer_call_t head_to;
int timers_migrated = 0;
DBG("timer_queue_migrate(%p,%p)\n", queue_from, queue_to);
assert(!ml_get_interrupts_enabled());
assert(queue_from != queue_to);
if (serverperfmode) {
return -4;
}
timer_call_lock_spin(queue_to);
head_to = TIMER_CALL(queue_first(&queue_to->head));
if (queue_empty(&queue_to->head)) {
timers_migrated = -1;
goto abort1;
}
timer_call_lock_spin(queue_from);
if (queue_empty(&queue_from->head)) {
timers_migrated = -2;
goto abort2;
}
call = TIMER_CALL(queue_first(&queue_from->head));
if (CE(call)->deadline < CE(head_to)->deadline) {
timers_migrated = 0;
goto abort2;
}
do {
if (call->flags & TIMER_CALL_LOCAL) {
timers_migrated = -3;
goto abort2;
}
call = TIMER_CALL(queue_next(qe(call)));
} while (!queue_end(&queue_from->head, qe(call)));
while (!queue_empty(&queue_from->head)) {
call = TIMER_CALL(queue_first(&queue_from->head));
if (!simple_lock_try(&call->lock)) {
timer_queue_migrate_lock_skips++;
(void) remque(qe(call));
call->async_dequeue = TRUE;
continue;
}
timer_call_entry_dequeue(call);
timer_call_entry_enqueue_deadline(
call, queue_to, CE(call)->deadline);
timers_migrated++;
simple_unlock(&call->lock);
}
abort2:
timer_call_unlock(queue_from);
abort1:
timer_call_unlock(queue_to);
return timers_migrated;
}