#include "internal.h"
#pragma mark unote generic functions
static void _dispatch_timer_unote_register(dispatch_timer_source_refs_t dt,
dispatch_wlh_t wlh, dispatch_priority_t pri);
static void _dispatch_timer_unote_resume(dispatch_timer_source_refs_t dt);
static void _dispatch_timer_unote_unregister(dispatch_timer_source_refs_t dt);
DISPATCH_NOINLINE
static dispatch_unote_t
_dispatch_unote_create(dispatch_source_type_t dst,
uintptr_t handle, unsigned long mask)
{
dispatch_unote_linkage_t dul;
dispatch_unote_class_t du;
if (mask & ~dst->dst_mask) {
return DISPATCH_UNOTE_NULL;
}
if (dst->dst_mask && !mask) {
return DISPATCH_UNOTE_NULL;
}
if (dst->dst_flags & EV_UDATA_SPECIFIC) {
du = _dispatch_calloc(1u, dst->dst_size);
} else {
dul = _dispatch_calloc(1u, sizeof(*dul) + dst->dst_size);
du = _dispatch_unote_linkage_get_unote(dul)._du;
}
du->du_type = dst;
du->du_can_be_wlh = dst->dst_per_trigger_qos;
du->du_ident = (uint32_t)handle;
du->du_filter = dst->dst_filter;
du->du_fflags = (typeof(du->du_fflags))mask;
if (dst->dst_flags & EV_UDATA_SPECIFIC) {
du->du_is_direct = true;
}
return (dispatch_unote_t){ ._du = du };
}
DISPATCH_NOINLINE
dispatch_unote_t
_dispatch_unote_create_with_handle(dispatch_source_type_t dst,
uintptr_t handle, unsigned long mask)
{
if (!handle) {
return DISPATCH_UNOTE_NULL;
}
return _dispatch_unote_create(dst, handle, mask);
}
DISPATCH_NOINLINE
dispatch_unote_t
_dispatch_unote_create_with_fd(dispatch_source_type_t dst,
uintptr_t handle, unsigned long mask)
{
#if !TARGET_OS_MAC // <rdar://problem/27756657>
if (handle > INT_MAX) {
return DISPATCH_UNOTE_NULL;
}
#endif
return _dispatch_unote_create(dst, handle, mask);
}
DISPATCH_NOINLINE
dispatch_unote_t
_dispatch_unote_create_without_handle(dispatch_source_type_t dst,
uintptr_t handle, unsigned long mask)
{
if (handle) {
return DISPATCH_UNOTE_NULL;
}
return _dispatch_unote_create(dst, handle, mask);
}
DISPATCH_NOINLINE
void
_dispatch_unote_dispose(dispatch_unote_t du)
{
void *ptr = du._du;
#if HAVE_MACH
if (du._du->dmrr_handler_is_block) {
Block_release(du._dmrr->dmrr_handler_ctxt);
}
#endif
if (du._du->du_is_timer) {
if (unlikely(du._dt->dt_heap_entry[DTH_TARGET_ID] != DTH_INVALID_ID ||
du._dt->dt_heap_entry[DTH_DEADLINE_ID] != DTH_INVALID_ID)) {
DISPATCH_INTERNAL_CRASH(0, "Disposing of timer still in its heap");
}
if (unlikely(du._dt->dt_pending_config)) {
free(du._dt->dt_pending_config);
du._dt->dt_pending_config = NULL;
}
} else if (!du._du->du_is_direct) {
ptr = _dispatch_unote_get_linkage(du);
}
free(ptr);
}
bool
_dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh,
dispatch_priority_t pri)
{
dispatch_assert(du._du->du_is_timer || !_dispatch_unote_registered(du));
dispatch_priority_t masked_pri;
masked_pri = pri & (DISPATCH_PRIORITY_FLAG_MANAGER |
DISPATCH_PRIORITY_FLAG_FALLBACK |
DISPATCH_PRIORITY_FLAG_FLOOR |
DISPATCH_PRIORITY_FALLBACK_QOS_MASK |
DISPATCH_PRIORITY_REQUESTED_MASK);
dispatch_assert(wlh == DISPATCH_WLH_ANON || masked_pri);
if (masked_pri == _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT)) {
_dispatch_ktrace1(DISPATCH_PERF_source_registration_without_qos,
_dispatch_wref2ptr(du._du->du_owner_wref));
}
du._du->du_priority = pri;
switch (du._du->du_filter) {
case DISPATCH_EVFILT_CUSTOM_ADD:
case DISPATCH_EVFILT_CUSTOM_OR:
case DISPATCH_EVFILT_CUSTOM_REPLACE:
_dispatch_unote_state_set(du, DISPATCH_WLH_ANON, DU_STATE_ARMED);
return true;
}
if (du._du->du_is_timer) {
_dispatch_timer_unote_register(du._dt, wlh, pri);
return true;
}
#if DISPATCH_HAVE_DIRECT_KNOTES
if (du._du->du_is_direct) {
return _dispatch_unote_register_direct(du, wlh);
}
#endif
return _dispatch_unote_register_muxed(du);
}
void
_dispatch_unote_resume(dispatch_unote_t du)
{
dispatch_assert(du._du->du_is_timer || _dispatch_unote_needs_rearm(du));
if (du._du->du_is_timer) {
_dispatch_timer_unote_resume(du._dt);
#if DISPATCH_HAVE_DIRECT_KNOTES
} else if (du._du->du_is_direct) {
_dispatch_unote_resume_direct(du);
#endif
} else {
_dispatch_unote_resume_muxed(du);
}
}
bool
_dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags)
{
if (!_dispatch_unote_registered(du)) {
return true;
}
switch (du._du->du_filter) {
case DISPATCH_EVFILT_CUSTOM_ADD:
case DISPATCH_EVFILT_CUSTOM_OR:
case DISPATCH_EVFILT_CUSTOM_REPLACE:
_dispatch_unote_state_set(du, DU_STATE_UNREGISTERED);
return true;
}
if (du._du->du_is_timer) {
_dispatch_timer_unote_unregister(du._dt);
return true;
}
#if DISPATCH_HAVE_DIRECT_KNOTES
if (du._du->du_is_direct) {
return _dispatch_unote_unregister_direct(du, flags);
}
#endif
dispatch_assert(flags & DUU_DELETE_ACK);
return _dispatch_unote_unregister_muxed(du);
}
#pragma mark data or / add
static dispatch_unote_t
_dispatch_source_data_create(dispatch_source_type_t dst, uintptr_t handle,
unsigned long mask)
{
if (handle || mask) {
return DISPATCH_UNOTE_NULL;
}
dispatch_unote_class_t du = _dispatch_calloc(1u, dst->dst_size);
du->du_type = dst;
du->du_filter = dst->dst_filter;
du->du_is_direct = true;
return (dispatch_unote_t){ ._du = du };
}
const dispatch_source_type_s _dispatch_source_type_data_add = {
.dst_kind = "data-add",
.dst_filter = DISPATCH_EVFILT_CUSTOM_ADD,
.dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR,
.dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA,
.dst_size = sizeof(struct dispatch_source_refs_s),
.dst_strict = false,
.dst_create = _dispatch_source_data_create,
.dst_merge_evt = NULL,
};
const dispatch_source_type_s _dispatch_source_type_data_or = {
.dst_kind = "data-or",
.dst_filter = DISPATCH_EVFILT_CUSTOM_OR,
.dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR,
.dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA,
.dst_size = sizeof(struct dispatch_source_refs_s),
.dst_strict = false,
.dst_create = _dispatch_source_data_create,
.dst_merge_evt = NULL,
};
const dispatch_source_type_s _dispatch_source_type_data_replace = {
.dst_kind = "data-replace",
.dst_filter = DISPATCH_EVFILT_CUSTOM_REPLACE,
.dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR,
.dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA,
.dst_size = sizeof(struct dispatch_source_refs_s),
.dst_strict = false,
.dst_create = _dispatch_source_data_create,
.dst_merge_evt = NULL,
};
#pragma mark file descriptors
const dispatch_source_type_s _dispatch_source_type_read = {
.dst_kind = "read",
.dst_filter = EVFILT_READ,
.dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
#if DISPATCH_EVENT_BACKEND_KEVENT
#if HAVE_DECL_NOTE_LOWAT
.dst_fflags = NOTE_LOWAT,
#endif
.dst_data = 1,
#endif // DISPATCH_EVENT_BACKEND_KEVENT
.dst_action = DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA,
.dst_size = sizeof(struct dispatch_source_refs_s),
.dst_strict = false,
.dst_create = _dispatch_unote_create_with_fd,
.dst_merge_evt = _dispatch_source_merge_evt,
};
const dispatch_source_type_s _dispatch_source_type_write = {
.dst_kind = "write",
.dst_filter = EVFILT_WRITE,
.dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED,
#if DISPATCH_EVENT_BACKEND_KEVENT
#if HAVE_DECL_NOTE_LOWAT
.dst_fflags = NOTE_LOWAT,
#endif
.dst_data = 1,
#endif // DISPATCH_EVENT_BACKEND_KEVENT
.dst_action = DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA,
.dst_size = sizeof(struct dispatch_source_refs_s),
.dst_strict = false,
.dst_create = _dispatch_unote_create_with_fd,
.dst_merge_evt = _dispatch_source_merge_evt,
};
#pragma mark signals
static dispatch_unote_t
_dispatch_source_signal_create(dispatch_source_type_t dst, uintptr_t handle,
unsigned long mask)
{
if (handle >= NSIG) {
return DISPATCH_UNOTE_NULL;
}
return _dispatch_unote_create_with_handle(dst, handle, mask);
}
const dispatch_source_type_s _dispatch_source_type_signal = {
.dst_kind = "signal",
.dst_filter = EVFILT_SIGNAL,
.dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR,
.dst_action = DISPATCH_UNOTE_ACTION_SOURCE_ADD_DATA,
.dst_size = sizeof(struct dispatch_source_refs_s),
.dst_strict = false,
.dst_create = _dispatch_source_signal_create,
.dst_merge_evt = _dispatch_source_merge_evt,
};
#pragma mark -
#pragma mark timer globals
DISPATCH_GLOBAL(struct dispatch_timer_heap_s
_dispatch_timers_heap[DISPATCH_TIMER_COUNT]);
#if DISPATCH_USE_DTRACE
DISPATCH_STATIC_GLOBAL(dispatch_timer_source_refs_t
_dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT]);
#define _dispatch_trace_next_timer_set(x, q) \
_dispatch_trace_next_timer[(q)] = (x)
#define _dispatch_trace_next_timer_program(d, q) \
_dispatch_trace_timer_program(_dispatch_trace_next_timer[(q)], (d))
#else
#define _dispatch_trace_next_timer_set(x, q)
#define _dispatch_trace_next_timer_program(d, q)
#endif
#pragma mark timer heap
#define DISPATCH_HEAP_INIT_SEGMENT_CAPACITY 8u
#define DTH_HEAP_ID_MASK (DTH_ID_COUNT - 1)
#define DTH_HEAP_ID(idx) ((idx) & DTH_HEAP_ID_MASK)
#define DTH_IDX_FOR_HEAP_ID(idx, heap_id) \
(((idx) & ~DTH_HEAP_ID_MASK) | (heap_id))
DISPATCH_ALWAYS_INLINE
static inline uint32_t
_dispatch_timer_heap_capacity(uint32_t segments)
{
if (segments == 0) return 2;
uint32_t seg_no = segments - 1;
return 2 + (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << seg_no) - seg_no;
}
static void
_dispatch_timer_heap_grow(dispatch_timer_heap_t dth)
{
uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY;
uint32_t seg_no = dth->dth_segments++;
void **heap, **heap_prev = dth->dth_heap;
if (seg_no > 0) {
seg_capacity <<= (seg_no - 1);
}
heap = _dispatch_calloc(seg_capacity, sizeof(void *));
if (seg_no > 1) {
uint32_t prev_seg_no = seg_no - 1;
uint32_t prev_seg_capacity = seg_capacity >> 1;
memcpy(&heap[seg_capacity - prev_seg_no],
&heap_prev[prev_seg_capacity - prev_seg_no],
prev_seg_no * sizeof(void *));
}
if (seg_no > 0) {
heap[seg_capacity - seg_no] = heap_prev;
}
dth->dth_heap = heap;
}
static void
_dispatch_timer_heap_shrink(dispatch_timer_heap_t dth)
{
uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY;
uint32_t seg_no = --dth->dth_segments;
void **heap = dth->dth_heap, **heap_prev = NULL;
if (seg_no > 0) {
seg_capacity <<= (seg_no - 1);
heap_prev = heap[seg_capacity - seg_no];
}
if (seg_no > 1) {
uint32_t prev_seg_no = seg_no - 1;
uint32_t prev_seg_capacity = seg_capacity >> 1;
memcpy(&heap_prev[prev_seg_capacity - prev_seg_no],
&heap[seg_capacity - prev_seg_no],
prev_seg_no * sizeof(void *));
}
dth->dth_heap = heap_prev;
free(heap);
}
DISPATCH_ALWAYS_INLINE
static inline dispatch_timer_source_refs_t *
_dispatch_timer_heap_get_slot(dispatch_timer_heap_t dth, uint32_t idx)
{
uint32_t seg_no, segments = dth->dth_segments;
void **segment;
if (idx < DTH_ID_COUNT) {
return &dth->dth_min[idx];
}
idx -= DTH_ID_COUNT;
seg_no = (uint32_t)(__builtin_clz(DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1) -
__builtin_clz(idx | (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1)));
if (seg_no + 1 == segments) {
segment = dth->dth_heap;
} else {
uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY;
seg_capacity <<= (segments - 2);
segment = dth->dth_heap[seg_capacity - seg_no - 1];
}
if (seg_no) {
idx -= DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (seg_no - 1);
}
return (dispatch_timer_source_refs_t *)(segment + idx);
}
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_timer_heap_set(dispatch_timer_heap_t dth,
dispatch_timer_source_refs_t *slot,
dispatch_timer_source_refs_t dt, uint32_t idx)
{
if (idx < DTH_ID_COUNT) {
dth->dth_needs_program = true;
}
*slot = dt;
dt->dt_heap_entry[DTH_HEAP_ID(idx)] = idx;
}
DISPATCH_ALWAYS_INLINE
static inline uint32_t
_dispatch_timer_heap_parent(uint32_t idx)
{
uint32_t heap_id = DTH_HEAP_ID(idx);
idx = (idx - DTH_ID_COUNT) / 2; return DTH_IDX_FOR_HEAP_ID(idx, heap_id);
}
DISPATCH_ALWAYS_INLINE
static inline uint32_t
_dispatch_timer_heap_left_child(uint32_t idx)
{
uint32_t heap_id = DTH_HEAP_ID(idx);
return 2 * idx + DTH_ID_COUNT - heap_id;
}
#if DISPATCH_HAVE_TIMER_COALESCING
DISPATCH_ALWAYS_INLINE
static inline uint32_t
_dispatch_timer_heap_walk_skip(uint32_t idx, uint32_t count)
{
uint32_t heap_id = DTH_HEAP_ID(idx);
idx -= heap_id;
if (unlikely(idx + DTH_ID_COUNT == count)) {
idx = _dispatch_timer_heap_parent(idx);
}
dispatch_static_assert(powerof2(DTH_ID_COUNT));
idx += DTH_ID_COUNT + DTH_ID_COUNT - 1;
idx >>= __builtin_ctz(~idx);
return likely(idx) ? DTH_ID_COUNT * idx + heap_id : UINT32_MAX;
}
DISPATCH_ALWAYS_INLINE
static inline uint32_t
_dispatch_timer_heap_walk_next(uint32_t idx, uint32_t count)
{
uint32_t lchild = _dispatch_timer_heap_left_child(idx);
if (lchild < count) {
return lchild;
}
return _dispatch_timer_heap_walk_skip(idx, count);
}
static uint64_t
_dispatch_timer_heap_max_target_before(dispatch_timer_heap_t dth, uint64_t limit)
{
dispatch_timer_source_refs_t dri;
uint32_t idx = _dispatch_timer_heap_left_child(DTH_TARGET_ID);
uint32_t count = dth->dth_count;
uint64_t tmp, target = dth->dth_min[DTH_TARGET_ID]->dt_timer.target;
while (idx < count) {
dri = *_dispatch_timer_heap_get_slot(dth, idx);
tmp = dri->dt_timer.target;
if (tmp > limit) {
idx = _dispatch_timer_heap_walk_skip(idx, count);
} else {
target = tmp;
idx = _dispatch_timer_heap_walk_next(idx, count);
}
}
return target;
}
#endif // DISPATCH_HAVE_TIMER_COALESCING
static void
_dispatch_timer_heap_resift(dispatch_timer_heap_t dth,
dispatch_timer_source_refs_t dt, uint32_t idx)
{
dispatch_static_assert(offsetof(struct dispatch_timer_source_s, target) ==
offsetof(struct dispatch_timer_source_s, heap_key[DTH_TARGET_ID]));
dispatch_static_assert(offsetof(struct dispatch_timer_source_s, deadline) ==
offsetof(struct dispatch_timer_source_s, heap_key[DTH_DEADLINE_ID]));
#define dth_cmp(hid, dt1, op, dt2) \
(((dt1)->dt_timer.heap_key)[hid] op ((dt2)->dt_timer.heap_key)[hid])
dispatch_timer_source_refs_t *pslot, pdt;
dispatch_timer_source_refs_t *cslot, cdt;
dispatch_timer_source_refs_t *rslot, rdt;
uint32_t cidx, dth_count = dth->dth_count;
dispatch_timer_source_refs_t *slot;
int heap_id = DTH_HEAP_ID(idx);
bool sifted_up = false;
slot = _dispatch_timer_heap_get_slot(dth, idx);
while (idx >= DTH_ID_COUNT) {
uint32_t pidx = _dispatch_timer_heap_parent(idx);
pslot = _dispatch_timer_heap_get_slot(dth, pidx);
pdt = *pslot;
if (dth_cmp(heap_id, pdt, <=, dt)) {
break;
}
_dispatch_timer_heap_set(dth, slot, pdt, idx);
slot = pslot;
idx = pidx;
sifted_up = true;
}
if (sifted_up) {
goto done;
}
while ((cidx = _dispatch_timer_heap_left_child(idx)) < dth_count) {
uint32_t ridx = cidx + DTH_ID_COUNT;
cslot = _dispatch_timer_heap_get_slot(dth, cidx);
cdt = *cslot;
if (ridx < dth_count) {
rslot = _dispatch_timer_heap_get_slot(dth, ridx);
rdt = *rslot;
if (dth_cmp(heap_id, cdt, >, rdt)) {
cidx = ridx;
cdt = rdt;
cslot = rslot;
}
}
if (dth_cmp(heap_id, dt, <=, cdt)) {
break;
}
_dispatch_timer_heap_set(dth, slot, cdt, idx);
slot = cslot;
idx = cidx;
}
done:
_dispatch_timer_heap_set(dth, slot, dt, idx);
#undef dth_cmp
}
DISPATCH_ALWAYS_INLINE
static void
_dispatch_timer_heap_insert(dispatch_timer_heap_t dth,
dispatch_timer_source_refs_t dt)
{
uint32_t idx = (dth->dth_count += DTH_ID_COUNT) - DTH_ID_COUNT;
DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], ==,
DTH_INVALID_ID, "target idx");
DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], ==,
DTH_INVALID_ID, "deadline idx");
dispatch_qos_t qos = MAX(_dispatch_priority_qos(dt->du_priority),
_dispatch_priority_fallback_qos(dt->du_priority));
if (dth->dth_max_qos < qos) {
dth->dth_max_qos = (uint8_t)qos;
dth->dth_needs_program = true;
}
if (idx == 0) {
dth->dth_needs_program = true;
dt->dt_heap_entry[DTH_TARGET_ID] = DTH_TARGET_ID;
dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_DEADLINE_ID;
dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = dt;
return;
}
if (unlikely(idx + DTH_ID_COUNT >
_dispatch_timer_heap_capacity(dth->dth_segments))) {
_dispatch_timer_heap_grow(dth);
}
_dispatch_timer_heap_resift(dth, dt, idx + DTH_TARGET_ID);
_dispatch_timer_heap_resift(dth, dt, idx + DTH_DEADLINE_ID);
}
static void
_dispatch_timer_heap_remove(dispatch_timer_heap_t dth,
dispatch_timer_source_refs_t dt)
{
uint32_t idx = (dth->dth_count -= DTH_ID_COUNT);
DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=,
DTH_INVALID_ID, "target idx");
DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=,
DTH_INVALID_ID, "deadline idx");
if (idx == 0) {
DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_TARGET_ID], ==, dt,
"target slot");
DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_DEADLINE_ID], ==, dt,
"deadline slot");
dth->dth_needs_program = true;
dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = NULL;
goto clear_heap_entry;
}
for (uint32_t heap_id = 0; heap_id < DTH_ID_COUNT; heap_id++) {
dispatch_timer_source_refs_t *slot, last_dt;
slot = _dispatch_timer_heap_get_slot(dth, idx + heap_id);
last_dt = *slot; *slot = NULL;
if (last_dt != dt) {
uint32_t removed_idx = dt->dt_heap_entry[heap_id];
_dispatch_timer_heap_resift(dth, last_dt, removed_idx);
}
}
if (unlikely(idx <= _dispatch_timer_heap_capacity(dth->dth_segments - 1))) {
_dispatch_timer_heap_shrink(dth);
}
clear_heap_entry:
dt->dt_heap_entry[DTH_TARGET_ID] = DTH_INVALID_ID;
dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_INVALID_ID;
}
DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_timer_heap_update(dispatch_timer_heap_t dth,
dispatch_timer_source_refs_t dt)
{
DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=,
DTH_INVALID_ID, "target idx");
DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=,
DTH_INVALID_ID, "deadline idx");
_dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_TARGET_ID]);
_dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_DEADLINE_ID]);
}
#pragma mark timer unote
#define _dispatch_timer_du_debug(what, du) \
_dispatch_debug("kevent-source[%p]: %s kevent[%p] { ident = 0x%x }", \
_dispatch_wref2ptr((du)->du_owner_wref), what, \
(du), (du)->du_ident)
DISPATCH_ALWAYS_INLINE
static inline unsigned int
_dispatch_timer_unote_idx(dispatch_timer_source_refs_t dt)
{
dispatch_clock_t clock = _dispatch_timer_flags_to_clock(dt->du_timer_flags);
uint32_t qos = 0;
#if DISPATCH_HAVE_TIMER_QOS
dispatch_assert(DISPATCH_TIMER_STRICT == DISPATCH_TIMER_QOS_CRITICAL);
dispatch_assert(DISPATCH_TIMER_BACKGROUND == DISPATCH_TIMER_QOS_BACKGROUND);
qos = dt->du_timer_flags & (DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND);
dispatch_assert(qos < DISPATCH_TIMER_QOS_COUNT);
#endif
return DISPATCH_TIMER_INDEX(clock, qos);
}
static void
_dispatch_timer_unote_disarm(dispatch_timer_source_refs_t dt,
dispatch_timer_heap_t dth)
{
uint32_t tidx = dt->du_ident;
dispatch_assert(_dispatch_unote_armed(dt));
_dispatch_timer_heap_remove(&dth[tidx], dt);
_dispatch_timers_heap_dirty(dth, tidx);
_dispatch_unote_state_clear_bit(dt, DU_STATE_ARMED);
_dispatch_timer_du_debug("disarmed", dt);
}
static void
_dispatch_timer_unote_arm(dispatch_timer_source_refs_t dt,
dispatch_timer_heap_t dth, uint32_t tidx)
{
if (_dispatch_unote_armed(dt)) {
DISPATCH_TIMER_ASSERT(dt->du_ident, ==, tidx, "tidx");
_dispatch_timer_heap_update(&dth[tidx], dt);
_dispatch_timer_du_debug("updated", dt);
} else {
dt->du_ident = tidx;
_dispatch_timer_heap_insert(&dth[tidx], dt);
_dispatch_unote_state_set_bit(dt, DU_STATE_ARMED);
_dispatch_timer_du_debug("armed", dt);
}
_dispatch_timers_heap_dirty(dth, tidx);
}
#define DISPATCH_TIMER_UNOTE_TRACE_SUSPENSION 0x1
DISPATCH_ALWAYS_INLINE
static inline bool
_dispatch_timer_unote_needs_rearm(dispatch_timer_source_refs_t dr, int flags)
{
dispatch_source_t ds = _dispatch_source_from_refs(dr);
if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) {
if (flags & DISPATCH_TIMER_UNOTE_TRACE_SUSPENSION) {
_dispatch_ktrace1(DISPATCH_PERF_suspended_timer_fire, ds);
}
return false;
}
return dr->du_ident != DISPATCH_TIMER_IDENT_CANCELED &&
dr->dt_timer.target < INT64_MAX;
}
DISPATCH_NOINLINE
static void
_dispatch_timer_unote_register(dispatch_timer_source_refs_t dt,
dispatch_wlh_t wlh, dispatch_priority_t pri)
{
if (_dispatch_qos_is_background(_dispatch_priority_qos(pri))) {
if (dt->du_timer_flags & DISPATCH_TIMER_STRICT) {
_dispatch_ktrace1(DISPATCH_PERF_strict_bg_timer,
_dispatch_source_from_refs(dt));
} else {
dt->du_timer_flags |= DISPATCH_TIMER_BACKGROUND;
dt->du_ident = _dispatch_timer_unote_idx(dt);
}
}
if (_dispatch_unote_wlh(dt) != wlh) {
dispatch_assert(_dispatch_unote_wlh(dt) == NULL);
_dispatch_unote_state_set(dt, DISPATCH_WLH_ANON, 0);
}
if (os_atomic_load2o(dt, dt_pending_config, relaxed)) {
_dispatch_timer_unote_configure(dt);
}
}
void
_dispatch_timer_unote_configure(dispatch_timer_source_refs_t dt)
{
dispatch_timer_config_t dtc;
dtc = os_atomic_xchg2o(dt, dt_pending_config, NULL, dependency);
if (dtc->dtc_clock != _dispatch_timer_flags_to_clock(dt->du_timer_flags)) {
dt->du_timer_flags &= ~_DISPATCH_TIMER_CLOCK_MASK;
dt->du_timer_flags |= _dispatch_timer_flags_from_clock(dtc->dtc_clock);
}
dt->dt_timer = dtc->dtc_timer;
free(dtc);
os_atomic_store2o(dt, ds_pending_data, 0, relaxed);
if (_dispatch_unote_armed(dt)) {
return _dispatch_timer_unote_resume(dt);
}
}
static inline dispatch_timer_heap_t
_dispatch_timer_unote_heap(dispatch_timer_source_refs_t dt)
{
dispatch_wlh_t wlh = _dispatch_unote_wlh(dt);
if (wlh == DISPATCH_WLH_ANON) {
return _dispatch_timers_heap;
}
return ((dispatch_workloop_t)wlh)->dwl_timer_heap;
}
DISPATCH_NOINLINE
static void
_dispatch_timer_unote_resume(dispatch_timer_source_refs_t dt)
{
bool will_arm = _dispatch_timer_unote_needs_rearm(dt, 0);
bool was_armed = _dispatch_unote_armed(dt);
uint32_t tidx = _dispatch_timer_unote_idx(dt);
dispatch_timer_heap_t dth = _dispatch_timer_unote_heap(dt);
if (unlikely(was_armed && (!will_arm || dt->du_ident != tidx))) {
_dispatch_timer_unote_disarm(dt, dth);
}
if (will_arm) {
if (!was_armed) _dispatch_retain_unote_owner(dt);
_dispatch_timer_unote_arm(dt, dth, tidx);
} else if (was_armed) {
_dispatch_release_unote_owner_tailcall(dt);
}
}
DISPATCH_NOINLINE
static void
_dispatch_timer_unote_unregister(dispatch_timer_source_refs_t dt)
{
dispatch_timer_heap_t dth = _dispatch_timer_unote_heap(dt);
if (_dispatch_unote_armed(dt)) {
_dispatch_timer_unote_disarm(dt, dth);
_dispatch_release_2_no_dispose(_dispatch_source_from_refs(dt));
}
_dispatch_wlh_release(_dispatch_unote_wlh(dt));
_dispatch_unote_state_set(dt, DU_STATE_UNREGISTERED);
dt->du_ident = DISPATCH_TIMER_IDENT_CANCELED;
}
static dispatch_unote_t
_dispatch_source_timer_create(dispatch_source_type_t dst,
uintptr_t handle, unsigned long mask)
{
dispatch_timer_source_refs_t dt;
if (mask & DISPATCH_TIMER_STRICT) {
mask &= ~(unsigned long)DISPATCH_TIMER_BACKGROUND;
}
if (mask & ~dst->dst_mask) {
return DISPATCH_UNOTE_NULL;
}
if (dst->dst_timer_flags & DISPATCH_TIMER_INTERVAL) {
if (!handle) return DISPATCH_UNOTE_NULL;
} else if (dst->dst_filter == DISPATCH_EVFILT_TIMER_WITH_CLOCK) {
if (handle) return DISPATCH_UNOTE_NULL;
} else switch (handle) {
case 0:
break;
case DISPATCH_CLOCKID_UPTIME:
dst = &_dispatch_source_type_timer_with_clock;
mask |= DISPATCH_TIMER_CLOCK_UPTIME;
break;
case DISPATCH_CLOCKID_MONOTONIC:
dst = &_dispatch_source_type_timer_with_clock;
mask |= DISPATCH_TIMER_CLOCK_MONOTONIC;
break;
case DISPATCH_CLOCKID_WALLTIME:
dst = &_dispatch_source_type_timer_with_clock;
mask |= DISPATCH_TIMER_CLOCK_WALL;
break;
default:
return DISPATCH_UNOTE_NULL;
}
dt = _dispatch_calloc(1u, dst->dst_size);
dt->du_type = dst;
dt->du_filter = dst->dst_filter;
dt->du_is_timer = true;
dt->du_timer_flags |= (uint8_t)(mask | dst->dst_timer_flags);
dt->du_ident = _dispatch_timer_unote_idx(dt);
dt->dt_timer.target = UINT64_MAX;
dt->dt_timer.deadline = UINT64_MAX;
dt->dt_timer.interval = UINT64_MAX;
dt->dt_heap_entry[DTH_TARGET_ID] = DTH_INVALID_ID;
dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_INVALID_ID;
return (dispatch_unote_t){ ._dt = dt };
}
const dispatch_source_type_s _dispatch_source_type_timer = {
.dst_kind = "timer",
.dst_filter = DISPATCH_EVFILT_TIMER,
.dst_flags = EV_DISPATCH,
.dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND,
.dst_timer_flags = 0,
.dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER,
.dst_size = sizeof(struct dispatch_timer_source_refs_s),
.dst_strict = false,
.dst_create = _dispatch_source_timer_create,
.dst_merge_evt = _dispatch_source_merge_evt,
};
const dispatch_source_type_s _dispatch_source_type_timer_with_clock = {
.dst_kind = "timer (fixed-clock)",
.dst_filter = DISPATCH_EVFILT_TIMER_WITH_CLOCK,
.dst_flags = EV_DISPATCH,
.dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND,
.dst_timer_flags = 0,
.dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER,
.dst_size = sizeof(struct dispatch_timer_source_refs_s),
.dst_create = _dispatch_source_timer_create,
.dst_merge_evt = _dispatch_source_merge_evt,
};
const dispatch_source_type_s _dispatch_source_type_after = {
.dst_kind = "timer (after)",
.dst_filter = DISPATCH_EVFILT_TIMER_WITH_CLOCK,
.dst_flags = EV_DISPATCH,
.dst_mask = 0,
.dst_timer_flags = DISPATCH_TIMER_AFTER,
.dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER,
.dst_size = sizeof(struct dispatch_timer_source_refs_s),
.dst_create = _dispatch_source_timer_create,
.dst_merge_evt = _dispatch_source_merge_evt,
};
const dispatch_source_type_s _dispatch_source_type_interval = {
.dst_kind = "timer (interval)",
.dst_filter = DISPATCH_EVFILT_TIMER_WITH_CLOCK,
.dst_flags = EV_DISPATCH,
.dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND|
DISPATCH_INTERVAL_UI_ANIMATION,
.dst_timer_flags = DISPATCH_TIMER_INTERVAL|DISPATCH_TIMER_CLOCK_UPTIME,
.dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER,
.dst_size = sizeof(struct dispatch_timer_source_refs_s),
.dst_create = _dispatch_source_timer_create,
.dst_merge_evt = _dispatch_source_merge_evt,
};
#pragma mark timer draining
static void
_dispatch_timers_run(dispatch_timer_heap_t dth, uint32_t tidx,
dispatch_clock_now_cache_t nows)
{
dispatch_timer_source_refs_t dr;
uint64_t pending, now;
while ((dr = dth[tidx].dth_min[DTH_TARGET_ID])) {
DISPATCH_TIMER_ASSERT(dr->du_ident, ==, tidx, "tidx");
DISPATCH_TIMER_ASSERT(dr->dt_timer.target, !=, 0, "missing target");
now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows);
if (dr->dt_timer.target > now) {
break;
}
if (dr->du_timer_flags & DISPATCH_TIMER_AFTER) {
_dispatch_timer_unote_disarm(dr, dth); _dispatch_wlh_release(_dispatch_unote_wlh(dr));
_dispatch_unote_state_set(dr, DU_STATE_UNREGISTERED);
os_atomic_store2o(dr, ds_pending_data, 2, relaxed);
_dispatch_trace_timer_fire(dr, 1, 1);
dux_merge_evt(dr, EV_ONESHOT, 0, 0);
continue;
}
if (os_atomic_load2o(dr, dt_pending_config, relaxed)) {
_dispatch_timer_unote_configure(dr);
continue;
}
if (unlikely(os_atomic_load2o(dr, ds_pending_data, relaxed))) {
_dispatch_timer_unote_disarm(dr, dth);
pending = os_atomic_or_orig2o(dr, ds_pending_data,
DISPATCH_TIMER_DISARMED_MARKER, relaxed);
} else {
pending = _dispatch_timer_unote_compute_missed(dr, now, 0) << 1;
if (_dispatch_timer_unote_needs_rearm(dr,
DISPATCH_TIMER_UNOTE_TRACE_SUSPENSION)) {
_dispatch_retain_unote_owner(dr);
_dispatch_timer_unote_arm(dr, dth, tidx);
os_atomic_store2o(dr, ds_pending_data, pending, relaxed);
} else {
_dispatch_timer_unote_disarm(dr, dth);
pending |= DISPATCH_TIMER_DISARMED_MARKER;
os_atomic_store2o(dr, ds_pending_data, pending, release);
}
}
_dispatch_trace_timer_fire(dr, pending >> 1, pending >> 1);
dux_merge_evt(dr, EV_ONESHOT, 0, 0);
}
}
#if DISPATCH_HAVE_TIMER_COALESCING
#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \
[DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC
static const uint64_t _dispatch_kevent_coalescing_window[] = {
DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75),
#if DISPATCH_HAVE_TIMER_QOS
DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1),
DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100),
#endif
};
#endif // DISPATCH_HAVE_TIMER_COALESCING
DISPATCH_ALWAYS_INLINE
static inline dispatch_timer_delay_s
_dispatch_timers_get_delay(dispatch_timer_heap_t dth, uint32_t tidx,
uint32_t qos, dispatch_clock_now_cache_t nows)
{
uint64_t target, deadline;
dispatch_timer_delay_s rc;
if (!dth[tidx].dth_min[DTH_TARGET_ID]) {
rc.delay = rc.leeway = INT64_MAX;
return rc;
}
target = dth[tidx].dth_min[DTH_TARGET_ID]->dt_timer.target;
deadline = dth[tidx].dth_min[DTH_DEADLINE_ID]->dt_timer.deadline;
dispatch_assert(target <= deadline && target < INT64_MAX);
uint64_t now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows);
if (target <= now) {
rc.delay = rc.leeway = 0;
return rc;
}
if (qos < DISPATCH_TIMER_QOS_COUNT && dth[tidx].dth_count > 2) {
#if DISPATCH_HAVE_TIMER_COALESCING
uint64_t window = _dispatch_kevent_coalescing_window[qos];
if (target + window < deadline) {
uint64_t latest = deadline - window;
target = _dispatch_timer_heap_max_target_before(&dth[tidx], latest);
}
#endif
}
rc.delay = MIN(target - now, INT64_MAX);
rc.leeway = MIN(deadline - target, INT64_MAX);
return rc;
}
static void
_dispatch_timers_program(dispatch_timer_heap_t dth, uint32_t tidx,
dispatch_clock_now_cache_t nows)
{
uint32_t qos = DISPATCH_TIMER_QOS(tidx);
dispatch_timer_delay_s range;
range = _dispatch_timers_get_delay(dth, tidx, qos, nows);
if (range.delay == 0) {
_dispatch_timers_heap_dirty(dth, tidx);
}
if (range.delay == 0 || range.delay >= INT64_MAX) {
_dispatch_trace_next_timer_set(NULL, qos);
if (dth[tidx].dth_armed) {
_dispatch_event_loop_timer_delete(dth, tidx);
}
dth[tidx].dth_armed = false;
dth[tidx].dth_needs_program = false;
} else {
_dispatch_trace_next_timer_set(dth[tidx].dth_min[DTH_TARGET_ID], qos);
_dispatch_trace_next_timer_program(range.delay, qos);
_dispatch_event_loop_timer_arm(dth, tidx, range, nows);
dth[tidx].dth_armed = true;
dth[tidx].dth_needs_program = false;
}
}
void
_dispatch_event_loop_drain_timers(dispatch_timer_heap_t dth, uint32_t count)
{
dispatch_clock_now_cache_s nows = { };
uint32_t tidx;
do {
for (tidx = 0; tidx < count; tidx++) {
_dispatch_timers_run(dth, tidx, &nows);
}
#if DISPATCH_USE_DTRACE
uint32_t mask = dth[0].dth_dirty_bits & DTH_DIRTY_QOS_MASK;
while (mask && DISPATCH_TIMER_WAKE_ENABLED()) {
int qos = __builtin_ctz(mask);
mask -= 1 << qos;
_dispatch_trace_timer_wake(_dispatch_trace_next_timer[qos]);
}
#endif // DISPATCH_USE_DTRACE
dth[0].dth_dirty_bits = 0;
for (tidx = 0; tidx < count; tidx++) {
if (dth[tidx].dth_needs_program) {
_dispatch_timers_program(dth, tidx, &nows);
}
}
} while (unlikely(dth[0].dth_dirty_bits));
}