#ifndef __DISPATCH_QUEUE_INTERNAL__
#define __DISPATCH_QUEUE_INTERNAL__
#ifndef __DISPATCH_INDIRECT__
#error "Please #include <dispatch/dispatch.h> instead of this file directly."
#include <dispatch/base.h> // for HeaderDoc
#endif
#pragma mark -
#pragma mark dispatch_queue_flags, dq_state
DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t,
DQF_NONE = 0x00000000,
DQF_AUTORELEASE_ALWAYS = 0x00010000,
DQF_AUTORELEASE_NEVER = 0x00020000,
#define _DQF_AUTORELEASE_MASK 0x00030000
DQF_THREAD_BOUND = 0x00040000, DQF_BARRIER_BIT = 0x00080000, DQF_TARGETED = 0x00100000, DQF_LABEL_NEEDS_FREE = 0x00200000, DQF_MUTABLE = 0x00400000,
DQF_RELEASED = 0x00800000,
DSF_STRICT = 0x04000000,
DSF_WLH_CHANGED = 0x08000000,
DSF_CANCELED = 0x10000000,
DSF_CANCEL_WAITER = 0x20000000,
DSF_NEEDS_EVENT = 0x40000000,
DSF_DELETED = 0x80000000,
#define DQF_FLAGS_MASK ((dispatch_queue_flags_t)0xffff0000)
#define DQF_WIDTH_MASK ((dispatch_queue_flags_t)0x0000ffff)
#define DQF_WIDTH(n) ((dispatch_queue_flags_t)(uint16_t)(n))
);
#define DISPATCH_QUEUE_SUSPEND_INTERVAL 0x0400000000000000ull
#define DISPATCH_QUEUE_SUSPEND_HALF 0x20u
#define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0200000000000000ull
#define DISPATCH_QUEUE_INACTIVE 0x0180000000000000ull
#define DISPATCH_QUEUE_ACTIVATED 0x0100000000000000ull
#define DISPATCH_QUEUE_ACTIVATING 0x0080000000000000ull
#define DISPATCH_QUEUE_INACTIVE_BITS_MASK 0x0180000000000000ull
#define DISPATCH_QUEUE_SUSPEND_BITS_MASK 0xff80000000000000ull
#define DISPATCH_QUEUE_IN_BARRIER 0x0040000000000000ull
#define DISPATCH_QUEUE_WIDTH_FULL_BIT 0x0020000000000000ull
#define DISPATCH_QUEUE_WIDTH_FULL 0x1000ull
#define DISPATCH_QUEUE_WIDTH_POOL (DISPATCH_QUEUE_WIDTH_FULL - 1)
#define DISPATCH_QUEUE_WIDTH_MAX (DISPATCH_QUEUE_WIDTH_FULL - 2)
#define DISPATCH_QUEUE_USES_REDIRECTION(width) \
({ uint16_t _width = (width); \
_width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; })
#define DISPATCH_QUEUE_WIDTH_INTERVAL 0x0000020000000000ull
#define DISPATCH_QUEUE_WIDTH_MASK 0x003ffe0000000000ull
#define DISPATCH_QUEUE_WIDTH_SHIFT 41
#define DISPATCH_QUEUE_PENDING_BARRIER 0x0000010000000000ull
#define DISPATCH_QUEUE_DIRTY 0x0000008000000000ull
#define DISPATCH_QUEUE_ENQUEUED_ON_MGR 0x0000004000000000ull
#define DISPATCH_QUEUE_ROLE_MASK 0x0000003000000000ull
#define DISPATCH_QUEUE_ROLE_BASE_WLH 0x0000002000000000ull
#define DISPATCH_QUEUE_ROLE_BASE_ANON 0x0000001000000000ull
#define DISPATCH_QUEUE_ROLE_INNER 0x0000000000000000ull
#define DISPATCH_QUEUE_RECEIVED_OVERRIDE 0x0000000800000000ull
#define DISPATCH_QUEUE_RECEIVED_SYNC_WAIT 0x0000000800000000ull
#define DISPATCH_QUEUE_MAX_QOS_MASK 0x0000000700000000ull
#define DISPATCH_QUEUE_MAX_QOS_SHIFT 32
#define DISPATCH_QUEUE_DRAIN_OWNER_MASK ((uint64_t)DLOCK_OWNER_MASK)
#define DISPATCH_QUEUE_SYNC_TRANSFER ((uint64_t)DLOCK_FAILED_TRYLOCK_BIT)
#define DISPATCH_QUEUE_ENQUEUED ((uint64_t)DLOCK_WAITERS_BIT)
#define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \
(DISPATCH_QUEUE_ENQUEUED_ON_MGR | DISPATCH_QUEUE_ENQUEUED | \
DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_MAX_QOS_MASK)
#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK \
(DISPATCH_QUEUE_DRAIN_OWNER_MASK | DISPATCH_QUEUE_RECEIVED_OVERRIDE | \
DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_SYNC_TRANSFER)
#define DISPATCH_QUEUE_STATE_INIT_VALUE(width) \
((DISPATCH_QUEUE_WIDTH_FULL - (width)) << DISPATCH_QUEUE_WIDTH_SHIFT)
#define DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE \
(DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER)
#define DISPATCH_QUEUE_SERIAL_DRAIN_OWNED \
(DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL)
#pragma mark -
#pragma mark dispatch_queue_t
typedef struct dispatch_queue_specific_s {
const void *dqs_key;
void *dqs_ctxt;
dispatch_function_t dqs_destructor;
TAILQ_ENTRY(dispatch_queue_specific_s) dqs_entry;
} *dispatch_queue_specific_t;
typedef struct dispatch_queue_specific_head_s {
dispatch_unfair_lock_s dqsh_lock;
TAILQ_HEAD(, dispatch_queue_specific_s) dqsh_entries;
} *dispatch_queue_specific_head_t;
#define DISPATCH_WORKLOOP_ATTR_HAS_SCHED 0x0001u
#define DISPATCH_WORKLOOP_ATTR_HAS_POLICY 0x0002u
#define DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT 0x0004u
#define DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS 0x0008u
#define DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY 0x0010u
#define DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS 0x0020u
typedef struct dispatch_workloop_attr_s *dispatch_workloop_attr_t;
typedef struct dispatch_workloop_attr_s {
uint32_t dwla_flags;
dispatch_priority_t dwla_pri;
struct sched_param dwla_sched;
int dwla_policy;
struct {
uint8_t percent;
uint32_t refillms;
} dwla_cpupercent;
dispatch_pthread_root_queue_observer_hooks_s dwla_observers;
} dispatch_workloop_attr_s;
#if OS_OBJECT_HAVE_OBJC1
#define _DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__) \
DISPATCH_OBJECT_HEADER(x); \
DISPATCH_UNION_LE(uint64_t volatile dq_state, \
dispatch_lock dq_state_lock, \
uint32_t dq_state_bits \
); \
__pointer_sized_field__
#else
#define _DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__) \
DISPATCH_OBJECT_HEADER(x); \
__pointer_sized_field__; \
DISPATCH_UNION_LE(uint64_t volatile dq_state, \
dispatch_lock dq_state_lock, \
uint32_t dq_state_bits \
)
#endif
#define DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__) \
_DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__); \
\
unsigned long dq_serialnum; \
const char *dq_label; \
DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, \
const uint16_t dq_width, \
const uint16_t __dq_opaque2 \
); \
dispatch_priority_t dq_priority; \
union { \
struct dispatch_queue_specific_head_s *dq_specific_head; \
struct dispatch_source_refs_s *ds_refs; \
struct dispatch_timer_source_refs_s *ds_timer_refs; \
struct dispatch_mach_recv_refs_s *dm_recv_refs; \
struct dispatch_channel_callbacks_s const *dch_callbacks; \
}; \
int volatile dq_sref_cnt
struct dispatch_queue_s {
DISPATCH_QUEUE_CLASS_HEADER(queue, void *__dq_opaque1);
} DISPATCH_ATOMIC64_ALIGN;
struct dispatch_workloop_s {
struct dispatch_queue_s _as_dq[0];
DISPATCH_QUEUE_CLASS_HEADER(workloop, dispatch_timer_heap_t dwl_timer_heap);
uint8_t dwl_drained_qos;
struct dispatch_object_s *dwl_heads[DISPATCH_QOS_NBUCKETS];
struct dispatch_object_s *dwl_tails[DISPATCH_QOS_NBUCKETS];
dispatch_workloop_attr_t dwl_attr;
} DISPATCH_ATOMIC64_ALIGN;
#define DISPATCH_LANE_CLASS_HEADER(x) \
struct dispatch_queue_s _as_dq[0]; \
DISPATCH_QUEUE_CLASS_HEADER(x, \
struct dispatch_object_s *volatile dq_items_tail); \
dispatch_unfair_lock_s dq_sidelock; \
struct dispatch_object_s *volatile dq_items_head; \
uint32_t dq_side_suspend_cnt
typedef struct dispatch_lane_s {
DISPATCH_LANE_CLASS_HEADER(lane);
} DISPATCH_ATOMIC64_ALIGN *dispatch_lane_t;
struct dispatch_queue_static_s {
struct dispatch_lane_s _as_dl[0]; \
DISPATCH_LANE_CLASS_HEADER(lane);
} DISPATCH_CACHELINE_ALIGN;
#define DISPATCH_QUEUE_ROOT_CLASS_HEADER(x) \
struct dispatch_queue_s _as_dq[0]; \
DISPATCH_QUEUE_CLASS_HEADER(x, \
struct dispatch_object_s *volatile dq_items_tail); \
int volatile dgq_thread_pool_size; \
struct dispatch_object_s *volatile dq_items_head; \
int volatile dgq_pending
struct dispatch_queue_global_s {
DISPATCH_QUEUE_ROOT_CLASS_HEADER(lane);
} DISPATCH_CACHELINE_ALIGN;
typedef struct dispatch_pthread_root_queue_observer_hooks_s {
void (*queue_will_execute)(dispatch_queue_t queue);
void (*queue_did_execute)(dispatch_queue_t queue);
} dispatch_pthread_root_queue_observer_hooks_s;
typedef dispatch_pthread_root_queue_observer_hooks_s
*dispatch_pthread_root_queue_observer_hooks_t;
#ifdef __APPLE__
#define DISPATCH_IOHID_SPI 1
DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
DISPATCH_NOTHROW DISPATCH_NONNULL4
dispatch_queue_global_t
_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID(
const char *label, unsigned long flags, const pthread_attr_t *attr,
dispatch_pthread_root_queue_observer_hooks_t observer_hooks,
dispatch_block_t configure);
DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW
bool
_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID(
dispatch_queue_t queue);
DISPATCH_EXPORT DISPATCH_NOTHROW
void
_dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t workloop,
dispatch_pthread_root_queue_observer_hooks_t observer_hooks);
#endif // __APPLE__
#if DISPATCH_USE_PTHREAD_POOL
typedef struct dispatch_pthread_root_queue_context_s {
#if !defined(_WIN32)
pthread_attr_t dpq_thread_attr;
#endif
dispatch_block_t dpq_thread_configure;
struct dispatch_semaphore_s dpq_thread_mediator;
dispatch_pthread_root_queue_observer_hooks_s dpq_observer_hooks;
} *dispatch_pthread_root_queue_context_t;
#endif // DISPATCH_USE_PTHREAD_POOL
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
typedef struct dispatch_queue_pthread_root_s {
struct dispatch_queue_global_s _as_dgq[0];
DISPATCH_QUEUE_ROOT_CLASS_HEADER(lane);
struct dispatch_pthread_root_queue_context_s dpq_ctxt;
} *dispatch_queue_pthread_root_t;
#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES
dispatch_static_assert(sizeof(struct dispatch_queue_s) <= 128);
dispatch_static_assert(sizeof(struct dispatch_lane_s) <= 128);
dispatch_static_assert(sizeof(struct dispatch_queue_global_s) <= 128);
dispatch_static_assert(offsetof(struct dispatch_queue_s, dq_state) %
sizeof(uint64_t) == 0, "dq_state must be 8-byte aligned");
#define dispatch_assert_valid_queue_type(type) \
dispatch_static_assert(sizeof(struct dispatch_queue_s) <= \
sizeof(struct type), #type " smaller than dispatch_queue_s"); \
dispatch_static_assert(_Alignof(struct type) >= sizeof(uint64_t), \
#type " is not 8-byte aligned"); \
dispatch_assert_aliases(dispatch_queue_s, type, dq_state); \
dispatch_assert_aliases(dispatch_queue_s, type, dq_serialnum); \
dispatch_assert_aliases(dispatch_queue_s, type, dq_label); \
dispatch_assert_aliases(dispatch_queue_s, type, dq_atomic_flags); \
dispatch_assert_aliases(dispatch_queue_s, type, dq_sref_cnt); \
dispatch_assert_aliases(dispatch_queue_s, type, dq_specific_head); \
dispatch_assert_aliases(dispatch_queue_s, type, dq_priority)
#define dispatch_assert_valid_lane_type(type) \
dispatch_assert_valid_queue_type(type); \
dispatch_assert_aliases(dispatch_lane_s, type, dq_items_head); \
dispatch_assert_aliases(dispatch_lane_s, type, dq_items_tail)
dispatch_assert_valid_queue_type(dispatch_lane_s);
dispatch_assert_valid_lane_type(dispatch_queue_static_s);
dispatch_assert_valid_lane_type(dispatch_queue_global_s);
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
dispatch_assert_valid_lane_type(dispatch_queue_pthread_root_s);
#endif
DISPATCH_CLASS_DECL(queue, QUEUE);
DISPATCH_CLASS_DECL_BARE(lane, QUEUE);
DISPATCH_CLASS_DECL(workloop, QUEUE);
DISPATCH_SUBCLASS_DECL(queue_serial, queue, lane);
DISPATCH_SUBCLASS_DECL(queue_main, queue_serial, lane);
DISPATCH_SUBCLASS_DECL(queue_concurrent, queue, lane);
DISPATCH_SUBCLASS_DECL(queue_global, queue, lane);
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_pthread_root, queue, lane);
#endif
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue_serial, lane);
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue_serial, lane);
struct firehose_client_s;
typedef struct dispatch_thread_context_s *dispatch_thread_context_t;
typedef struct dispatch_thread_context_s {
dispatch_thread_context_t dtc_prev;
const void *dtc_key;
union {
size_t dtc_apply_nesting;
dispatch_io_t dtc_io_in_barrier;
union firehose_buffer_u *dtc_fb;
void *dtc_mig_demux_ctx;
dispatch_mach_msg_t dtc_dmsg;
struct dispatch_ipc_handoff_s *dtc_dih;
};
} dispatch_thread_context_s;
typedef union dispatch_thread_frame_s *dispatch_thread_frame_t;
typedef union dispatch_thread_frame_s {
struct {
dispatch_queue_t dtf_queue;
dispatch_thread_frame_t dtf_prev;
};
void *dtf_pair[2];
} dispatch_thread_frame_s;
typedef dispatch_queue_t dispatch_queue_wakeup_target_t;
#define DISPATCH_QUEUE_WAKEUP_NONE ((dispatch_queue_wakeup_target_t)0)
#define DISPATCH_QUEUE_WAKEUP_TARGET ((dispatch_queue_wakeup_target_t)1)
#define DISPATCH_QUEUE_WAKEUP_MGR (_dispatch_mgr_q._as_dq)
#define DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT ((dispatch_queue_wakeup_target_t)-1)
void _dispatch_queue_xref_dispose(dispatch_queue_class_t dq);
void _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target);
void _dispatch_queue_invoke_finish(dispatch_queue_t dq,
dispatch_invoke_context_t dic, dispatch_queue_t tq, uint64_t owned);
dispatch_priority_t _dispatch_queue_compute_priority_and_wlh(
dispatch_queue_class_t dq, dispatch_wlh_t *wlh_out);
DISPATCH_ENUM(dispatch_resume_op, int,
DISPATCH_RESUME,
DISPATCH_ACTIVATE,
DISPATCH_ACTIVATION_DONE,
);
void _dispatch_lane_resume(dispatch_lane_class_t dq, dispatch_resume_op_t how);
void _dispatch_lane_set_target_queue(dispatch_lane_t dq, dispatch_queue_t tq);
void _dispatch_lane_class_dispose(dispatch_queue_class_t dq, bool *allow_free);
void _dispatch_lane_dispose(dispatch_lane_class_t dq, bool *allow_free);
void _dispatch_lane_suspend(dispatch_lane_class_t dq);
void _dispatch_lane_activate(dispatch_lane_class_t dq);
void _dispatch_lane_invoke(dispatch_lane_class_t dq,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
void _dispatch_lane_push(dispatch_lane_class_t dq, dispatch_object_t dou,
dispatch_qos_t qos);
void _dispatch_lane_concurrent_push(dispatch_lane_class_t dq,
dispatch_object_t dou, dispatch_qos_t qos);
void _dispatch_lane_wakeup(dispatch_lane_class_t dq, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags);
dispatch_queue_wakeup_target_t _dispatch_lane_serial_drain(
dispatch_lane_class_t dq, dispatch_invoke_context_t dic,
dispatch_invoke_flags_t flags, uint64_t *owned);
void _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free);
void _dispatch_workloop_activate(dispatch_workloop_t dwl);
void _dispatch_workloop_invoke(dispatch_workloop_t dwl,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags);
void _dispatch_workloop_push(dispatch_workloop_t dwl, dispatch_object_t dou,
dispatch_qos_t qos);
void _dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags);
void _dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor);
void _dispatch_root_queue_wakeup(dispatch_queue_global_t dq, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags);
void _dispatch_root_queue_push(dispatch_queue_global_t dq,
dispatch_object_t dou, dispatch_qos_t qos);
#if DISPATCH_USE_KEVENT_WORKQUEUE
void _dispatch_kevent_workqueue_init(void);
#endif
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
void _dispatch_pthread_root_queue_dispose(dispatch_lane_class_t dq,
bool *allow_free);
#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES
void _dispatch_main_queue_push(dispatch_queue_main_t dq, dispatch_object_t dou,
dispatch_qos_t qos);
void _dispatch_main_queue_wakeup(dispatch_queue_main_t dq, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags);
#if DISPATCH_COCOA_COMPAT
void _dispatch_runloop_queue_wakeup(dispatch_lane_t dq,
dispatch_qos_t qos, dispatch_wakeup_flags_t flags);
void _dispatch_runloop_queue_xref_dispose(dispatch_lane_t dq);
void _dispatch_runloop_queue_dispose(dispatch_lane_t dq, bool *allow_free);
#endif // DISPATCH_COCOA_COMPAT
void _dispatch_mgr_queue_push(dispatch_lane_t dq, dispatch_object_t dou,
dispatch_qos_t qos);
void _dispatch_mgr_queue_wakeup(dispatch_lane_t dq, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags);
#if DISPATCH_USE_MGR_THREAD
void _dispatch_mgr_thread(dispatch_lane_t dq, dispatch_invoke_context_t dic,
dispatch_invoke_flags_t flags);
#endif
void _dispatch_apply_invoke(void *ctxt);
void _dispatch_apply_redirect_invoke(void *ctxt);
void _dispatch_barrier_async_detached_f(dispatch_queue_class_t dq, void *ctxt,
dispatch_function_t func);
#define DISPATCH_BARRIER_TRYSYNC_SUSPEND 0x1
void _dispatch_barrier_trysync_or_async_f(dispatch_lane_class_t dq, void *ctxt,
dispatch_function_t func, uint32_t flags);
void _dispatch_queue_atfork_child(void);
DISPATCH_COLD
size_t _dispatch_queue_debug(dispatch_queue_class_t dq,
char *buf, size_t bufsiz);
DISPATCH_COLD
size_t _dispatch_queue_debug_attr(dispatch_queue_t dq,
char *buf, size_t bufsiz);
#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_NBUCKETS * 2)
enum {
DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0,
DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS,
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS,
DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS,
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS,
DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS,
DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT,
_DISPATCH_ROOT_QUEUE_IDX_COUNT,
};
#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 17
extern unsigned long volatile _dispatch_queue_serial_numbers;
#define DISPATCH_QUEUE_SERIAL_NUMBER_WLF 16
extern struct dispatch_queue_static_s _dispatch_mgr_q; #if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES
extern struct dispatch_queue_global_s _dispatch_mgr_root_queue; #endif
extern struct dispatch_queue_global_s _dispatch_root_queues[];
#if DISPATCH_DEBUG
#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \
dispatch_assert_queue(_dispatch_mgr_q._as_dq)
#else
#define DISPATCH_ASSERT_ON_MANAGER_QUEUE()
#endif
#pragma mark -
#pragma mark dispatch_queue_attr_t
DISPATCH_CLASS_DECL(queue_attr, OBJECT);
struct dispatch_queue_attr_s {
OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr);
};
typedef struct dispatch_queue_attr_info_s {
dispatch_qos_t dqai_qos : 8;
int dqai_relpri : 8;
uint16_t dqai_overcommit:2;
uint16_t dqai_autorelease_frequency:2;
uint16_t dqai_concurrent:1;
uint16_t dqai_inactive:1;
} dispatch_queue_attr_info_t;
typedef enum {
_dispatch_queue_attr_overcommit_unspecified = 0,
_dispatch_queue_attr_overcommit_enabled,
_dispatch_queue_attr_overcommit_disabled,
} _dispatch_queue_attr_overcommit_t;
#define DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT 3
#define DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT 3
#define DISPATCH_QUEUE_ATTR_QOS_COUNT (DISPATCH_QOS_MAX + 1)
#define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY)
#define DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT 2
#define DISPATCH_QUEUE_ATTR_INACTIVE_COUNT 2
#define DISPATCH_QUEUE_ATTR_COUNT ( \
DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT * \
DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT * \
DISPATCH_QUEUE_ATTR_QOS_COUNT * \
DISPATCH_QUEUE_ATTR_PRIO_COUNT * \
DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT * \
DISPATCH_QUEUE_ATTR_INACTIVE_COUNT )
extern const struct dispatch_queue_attr_s
_dispatch_queue_attrs[DISPATCH_QUEUE_ATTR_COUNT];
dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t);
#pragma mark -
#pragma mark dispatch_continuation_t
#if __LP64__
#define DISPATCH_CONTINUATION_HEADER(x) \
union { \
const void *do_vtable; \
uintptr_t dc_flags; \
}; \
union { \
pthread_priority_t dc_priority; \
int dc_cache_cnt; \
uintptr_t dc_pad; \
}; \
struct dispatch_##x##_s *volatile do_next; \
struct voucher_s *dc_voucher; \
dispatch_function_t dc_func; \
void *dc_ctxt; \
void *dc_data; \
void *dc_other
#elif OS_OBJECT_HAVE_OBJC1
#define DISPATCH_CONTINUATION_HEADER(x) \
dispatch_function_t dc_func; \
union { \
pthread_priority_t dc_priority; \
int dc_cache_cnt; \
uintptr_t dc_pad; \
}; \
struct voucher_s *dc_voucher; \
union { \
const void *do_vtable; \
uintptr_t dc_flags; \
}; \
struct dispatch_##x##_s *volatile do_next; \
void *dc_ctxt; \
void *dc_data; \
void *dc_other
#else
#define DISPATCH_CONTINUATION_HEADER(x) \
union { \
const void *do_vtable; \
uintptr_t dc_flags; \
}; \
union { \
pthread_priority_t dc_priority; \
int dc_cache_cnt; \
uintptr_t dc_pad; \
}; \
struct voucher_s *dc_voucher; \
struct dispatch_##x##_s *volatile do_next; \
dispatch_function_t dc_func; \
void *dc_ctxt; \
void *dc_data; \
void *dc_other
#endif
#define _DISPATCH_CONTINUATION_PTRS 8
#if DISPATCH_HW_CONFIG_UP
#define DISPATCH_CONTINUATION_SIZE \
(_DISPATCH_CONTINUATION_PTRS * DISPATCH_SIZEOF_PTR)
#else
#define DISPATCH_CONTINUATION_SIZE ROUND_UP_TO_CACHELINE_SIZE( \
(_DISPATCH_CONTINUATION_PTRS * DISPATCH_SIZEOF_PTR))
#endif
#define ROUND_UP_TO_CONTINUATION_SIZE(x) \
(((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
~(DISPATCH_CONTINUATION_SIZE - 1u))
#define DC_FLAG_SYNC_WAITER 0x001ul
#define DC_FLAG_BARRIER 0x002ul
#define DC_FLAG_CONSUME 0x004ul
#define DC_FLAG_GROUP_ASYNC 0x008ul
#define DC_FLAG_BLOCK 0x010ul
#define DC_FLAG_BLOCK_WITH_PRIVATE_DATA 0x020ul
#define DC_FLAG_FETCH_CONTEXT 0x040ul
#define DC_FLAG_ASYNC_AND_WAIT 0x080ul
#define DC_FLAG_ALLOCATED 0x100ul
#define DC_FLAG_NO_INTROSPECTION 0x200ul
#define DC_FLAG_CHANNEL_ITEM 0x400ul
typedef struct dispatch_continuation_s {
DISPATCH_CONTINUATION_HEADER(continuation);
} *dispatch_continuation_t;
dispatch_assert_aliases(dispatch_continuation_s, dispatch_object_s, do_next);
dispatch_assert_aliases(dispatch_continuation_s, dispatch_object_s, do_vtable);
typedef struct dispatch_sync_context_s {
struct dispatch_continuation_s _as_dc[0];
DISPATCH_CONTINUATION_HEADER(continuation);
dispatch_function_t dsc_func;
void *dsc_ctxt;
dispatch_thread_frame_s dsc_dtf;
dispatch_thread_event_s dsc_event;
dispatch_tid dsc_waiter;
uint8_t dsc_override_qos_floor;
uint8_t dsc_override_qos;
uint16_t dsc_autorelease : 2;
uint16_t dsc_wlh_was_first : 1;
uint16_t dsc_wlh_is_workloop : 1;
uint16_t dsc_waiter_needs_cancel : 1;
uint16_t dsc_release_storage : 1;
#if DISPATCH_INTROSPECTION
uint16_t dsc_from_async : 1;
#endif
} *dispatch_sync_context_t;
typedef struct dispatch_continuation_vtable_s {
_OS_OBJECT_CLASS_HEADER();
DISPATCH_OBJECT_VTABLE_HEADER(dispatch_continuation);
} const *dispatch_continuation_vtable_t;
#ifndef DISPATCH_CONTINUATION_CACHE_LIMIT
#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR
#define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads
#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 16
#else
#define DISPATCH_CONTINUATION_CACHE_LIMIT 1024
#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 128
#endif
#endif
dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void);
void _dispatch_continuation_free_to_heap(dispatch_continuation_t c);
void _dispatch_continuation_pop(dispatch_object_t dou,
dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
dispatch_queue_class_t dqu);
#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
extern int _dispatch_continuation_cache_limit;
void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c);
#else
#define _dispatch_continuation_cache_limit DISPATCH_CONTINUATION_CACHE_LIMIT
#define _dispatch_continuation_free_to_cache_limit(c) \
_dispatch_continuation_free_to_heap(c)
#endif
#pragma mark -
#pragma mark dispatch_continuation vtables
enum {
_DC_USER_TYPE = 0,
DC_ASYNC_REDIRECT_TYPE,
DC_MACH_SEND_BARRRIER_DRAIN_TYPE,
DC_MACH_SEND_BARRIER_TYPE,
DC_MACH_RECV_BARRIER_TYPE,
DC_MACH_ASYNC_REPLY_TYPE,
#if HAVE_PTHREAD_WORKQUEUE_QOS
DC_WORKLOOP_STEALING_TYPE,
DC_OVERRIDE_STEALING_TYPE,
DC_OVERRIDE_OWNING_TYPE,
#endif
#if HAVE_MACH
DC_MACH_IPC_HANDOFF_TYPE,
#endif
_DC_MAX_TYPE,
};
DISPATCH_ALWAYS_INLINE
static inline unsigned long
dc_type(dispatch_continuation_t dc)
{
return dx_type((struct dispatch_object_s *)dc);
}
extern const struct dispatch_continuation_vtable_s
_dispatch_continuation_vtables[_DC_MAX_TYPE];
#define DC_VTABLE(name) (&_dispatch_continuation_vtables[DC_##name##_TYPE])
#define DC_VTABLE_ENTRY(name, ...) \
[DC_##name##_TYPE] = { \
.do_type = DISPATCH_CONTINUATION_TYPE(name), \
__VA_ARGS__ \
}
#pragma mark -
#pragma mark _dispatch_set_priority_and_voucher
#if HAVE_PTHREAD_WORKQUEUE_QOS
void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri,
mach_voucher_t kv);
voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t pri,
voucher_t voucher, dispatch_thread_set_self_t flags);
#else
static inline void
_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri,
mach_voucher_t kv)
{
(void)pri; (void)kv;
}
#endif
#pragma mark -
#pragma mark dispatch_apply_t
struct dispatch_apply_s {
#if !OS_OBJECT_HAVE_OBJC1
dispatch_continuation_t da_dc;
#endif
size_t volatile da_index, da_todo;
size_t da_iterations;
#if OS_OBJECT_HAVE_OBJC1
dispatch_continuation_t da_dc;
#endif
size_t da_nested;
dispatch_thread_event_s da_event;
dispatch_invoke_flags_t da_flags;
int32_t da_thr_cnt;
};
dispatch_static_assert(offsetof(struct dispatch_continuation_s, dc_flags) ==
offsetof(struct dispatch_apply_s, da_dc),
"These fields must alias so that leaks instruments work");
typedef struct dispatch_apply_s *dispatch_apply_t;
#pragma mark -
#pragma mark dispatch_block_t
#ifdef __BLOCKS__
#define DISPATCH_BLOCK_API_MASK (0x100u - 1)
#define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31)
#define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30)
#define DISPATCH_BLOCK_PRIVATE_DATA_HEADER() \
unsigned long dbpd_magic; \
dispatch_block_flags_t dbpd_flags; \
unsigned int volatile dbpd_atomic_flags; \
int volatile dbpd_performed; \
pthread_priority_t dbpd_priority; \
voucher_t dbpd_voucher; \
dispatch_block_t dbpd_block; \
dispatch_group_t dbpd_group; \
dispatch_queue_t dbpd_queue; \
mach_port_t dbpd_thread;
#if !defined(__cplusplus)
struct dispatch_block_private_data_s {
DISPATCH_BLOCK_PRIVATE_DATA_HEADER();
};
#endif
typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t;
#define DBF_CANCELED 1u // block has been cancelled
#define DBF_WAITING 2u // dispatch_block_wait has begun
#define DBF_WAITED 4u // dispatch_block_wait has finished without timeout
#define DBF_PERFORM 8u // dispatch_block_perform: don't group_leave
#define DISPATCH_BLOCK_PRIVATE_DATA_MAGIC 0xD159B10C // 0xDISPatch_BLOCk
#define DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block, voucher) \
{ \
.dbpd_magic = DISPATCH_BLOCK_PRIVATE_DATA_MAGIC, \
.dbpd_flags = (flags), \
.dbpd_atomic_flags = DBF_PERFORM, \
.dbpd_block = (block), \
.dbpd_voucher = (voucher), \
}
extern void (*const _dispatch_block_special_invoke)(void*);
dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags,
voucher_t voucher, pthread_priority_t priority, dispatch_block_t block);
void _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd);
void _dispatch_block_sync_invoke(void *block);
void *_dispatch_continuation_get_function_symbol(dispatch_continuation_t dc);
dispatch_qos_t _dispatch_continuation_init_slow(dispatch_continuation_t dc,
dispatch_queue_class_t dqu, dispatch_block_flags_t flags);
#endif
#endif