#ifndef __DISPATCH_QUEUE_INTERNAL__
#define __DISPATCH_QUEUE_INTERNAL__
#ifndef __DISPATCH_INDIRECT__
#error "Please #include <dispatch/dispatch.h> instead of this file directly."
#include <dispatch/base.h> // for HeaderDoc
#endif
#if defined(__BLOCKS__) && !defined(DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES)
#define DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 1 // <rdar://problem/10719357>
#endif
#define DISPATCH_CACHELINE_SIZE 64u
#define ROUND_UP_TO_CACHELINE_SIZE(x) \
(((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \
~(DISPATCH_CACHELINE_SIZE - 1u))
#define DISPATCH_CACHELINE_ALIGN \
__attribute__((__aligned__(DISPATCH_CACHELINE_SIZE)))
#pragma mark -
#pragma mark dispatch_queue_t
DISPATCH_ENUM(dispatch_queue_flags, uint32_t,
DQF_NONE = 0x0000,
DQF_AUTORELEASE_ALWAYS = 0x0001,
DQF_AUTORELEASE_NEVER = 0x0002,
#define _DQF_AUTORELEASE_MASK 0x0003
DQF_THREAD_BOUND = 0x0004, DQF_BARRIER_BIT = 0x0008, DQF_TARGETED = 0x0010, DQF_LABEL_NEEDS_FREE = 0x0020, DQF_CANNOT_TRYSYNC = 0x0040,
DQF_RELEASED = 0x0080,
DSF_CANCEL_WAITER = 0x0800, DSF_CANCELED = 0x1000, DSF_ARMED = 0x2000, DSF_DEFERRED_DELETE = 0x4000, DSF_DELETED = 0x8000, #define DSF_STATE_MASK (DSF_ARMED | DSF_DEFERRED_DELETE | DSF_DELETED)
DQF_WIDTH_MASK = 0xffff0000,
#define DQF_WIDTH_SHIFT 16
);
#define _DISPATCH_QUEUE_HEADER(x) \
struct os_mpsc_queue_s _as_oq[0]; \
DISPATCH_OBJECT_HEADER(x); \
_OS_MPSC_QUEUE_FIELDS(dq, dq_state); \
dispatch_queue_t dq_specific_q; \
union { \
uint32_t volatile dq_atomic_flags; \
DISPATCH_STRUCT_LITTLE_ENDIAN_2( \
uint16_t dq_atomic_bits, \
uint16_t dq_width \
); \
}; \
uint32_t dq_side_suspend_cnt; \
DISPATCH_INTROSPECTION_QUEUE_HEADER; \
dispatch_unfair_lock_s dq_sidelock
#define DISPATCH_QUEUE_HEADER(x) \
struct dispatch_queue_s _as_dq[0]; \
_DISPATCH_QUEUE_HEADER(x)
#define DISPATCH_QUEUE_ALIGN __attribute__((aligned(8)))
#define DISPATCH_QUEUE_WIDTH_POOL 0x7fff
#define DISPATCH_QUEUE_WIDTH_MAX 0x7ffe
#define DISPATCH_QUEUE_USES_REDIRECTION(width) \
({ uint16_t _width = (width); \
_width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; })
#define DISPATCH_QUEUE_CACHELINE_PADDING \
char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]
#ifdef __LP64__
#define DISPATCH_QUEUE_CACHELINE_PAD (( \
(sizeof(uint32_t) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
+ DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
#elif OS_OBJECT_HAVE_OBJC1
#define DISPATCH_QUEUE_CACHELINE_PAD (( \
(11*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
+ DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
#else
#define DISPATCH_QUEUE_CACHELINE_PAD (( \
(12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \
+ DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE)
#endif
#define DISPATCH_QUEUE_SUSPEND_INTERVAL 0x0200000000000000ull
#define DISPATCH_QUEUE_SUSPEND_HALF 0x40u
#define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0100000000000000ull
#define DISPATCH_QUEUE_INACTIVE 0x0080000000000000ull
#define DISPATCH_QUEUE_NEEDS_ACTIVATION 0x0040000000000000ull
#define DISPATCH_QUEUE_SUSPEND_BITS_MASK 0xffc0000000000000ull
#define DISPATCH_QUEUE_IN_BARRIER 0x0020000000000000ull
#define DISPATCH_QUEUE_WIDTH_FULL_BIT 0x0010000000000000ull
#define DISPATCH_QUEUE_WIDTH_FULL 0x8000ull
#define DISPATCH_QUEUE_WIDTH_INTERVAL 0x0000002000000000ull
#define DISPATCH_QUEUE_WIDTH_MASK 0x001fffe000000000ull
#define DISPATCH_QUEUE_WIDTH_SHIFT 37
#define DISPATCH_QUEUE_PENDING_BARRIER 0x0000001000000000ull
#define DISPATCH_QUEUE_DIRTY 0x0000000800000000ull
#define DISPATCH_QUEUE_HAS_OVERRIDE 0x0000000400000000ull
#define DISPATCH_QUEUE_DRAIN_PENDED 0x0000000200000000ull
#define DISPATCH_QUEUE_ENQUEUED 0x0000000100000000ull
#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK 0x00000002ffffffffull
#ifdef DLOCK_NOWAITERS_BIT
#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \
((uint64_t)(DLOCK_OWNER_MASK | DLOCK_NOFAILED_TRYLOCK_BIT))
#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \
(((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))\
^ DLOCK_NOWAITERS_BIT)
#define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \
(DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \
DLOCK_NOWAITERS_BIT)
#else
#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \
((uint64_t)(DLOCK_OWNER_MASK | DLOCK_FAILED_TRYLOCK_BIT))
#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \
((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))
#define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \
(DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \
DLOCK_WAITERS_BIT)
#endif
#define DISPATCH_QUEUE_STATE_INIT_VALUE(width) \
((DISPATCH_QUEUE_WIDTH_FULL - (width)) << DISPATCH_QUEUE_WIDTH_SHIFT)
#define DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE \
(DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER)
#define DISPATCH_QUEUE_SERIAL_DRAIN_OWNED \
(DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL)
DISPATCH_CLASS_DECL(queue);
#if !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
struct dispatch_queue_s {
_DISPATCH_QUEUE_HEADER(queue);
DISPATCH_QUEUE_CACHELINE_PADDING; } DISPATCH_QUEUE_ALIGN;
#endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION)
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_serial, queue);
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_concurrent, queue);
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_main, queue);
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue);
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue);
DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue);
OS_OBJECT_INTERNAL_CLASS_DECL(dispatch_queue_specific_queue, dispatch_queue,
DISPATCH_OBJECT_VTABLE_HEADER(dispatch_queue_specific_queue));
typedef union {
struct os_mpsc_queue_s *_oq;
struct dispatch_queue_s *_dq;
struct dispatch_source_s *_ds;
struct dispatch_mach_s *_dm;
struct dispatch_queue_specific_queue_s *_dqsq;
struct dispatch_timer_aggregate_s *_dta;
#if USE_OBJC
os_mpsc_queue_t _ojbc_oq;
dispatch_queue_t _objc_dq;
dispatch_source_t _objc_ds;
dispatch_mach_t _objc_dm;
dispatch_queue_specific_queue_t _objc_dqsq;
dispatch_timer_aggregate_t _objc_dta;
#endif
} dispatch_queue_class_t __attribute__((__transparent_union__));
typedef struct dispatch_thread_context_s *dispatch_thread_context_t;
typedef struct dispatch_thread_context_s {
dispatch_thread_context_t dtc_prev;
const void *dtc_key;
union {
size_t dtc_apply_nesting;
dispatch_io_t dtc_io_in_barrier;
};
} dispatch_thread_context_s;
typedef struct dispatch_thread_frame_s *dispatch_thread_frame_t;
typedef struct dispatch_thread_frame_s {
dispatch_queue_t dtf_queue;
dispatch_thread_frame_t dtf_prev;
struct dispatch_object_s *dtf_deferred;
} dispatch_thread_frame_s;
DISPATCH_ENUM(dispatch_queue_wakeup_target, long,
DISPATCH_QUEUE_WAKEUP_NONE = 0,
DISPATCH_QUEUE_WAKEUP_TARGET,
DISPATCH_QUEUE_WAKEUP_MGR,
);
void _dispatch_queue_class_override_drainer(dispatch_queue_t dqu,
pthread_priority_t pp, dispatch_wakeup_flags_t flags);
void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, pthread_priority_t pp,
dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target);
void _dispatch_queue_destroy(dispatch_queue_t dq);
void _dispatch_queue_dispose(dispatch_queue_t dq);
void _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq);
void _dispatch_queue_suspend(dispatch_queue_t dq);
void _dispatch_queue_resume(dispatch_queue_t dq, bool activate);
void _dispatch_queue_finalize_activation(dispatch_queue_t dq);
void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags);
void _dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n);
void _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou,
pthread_priority_t pp);
void _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq);
void _dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
dispatch_wakeup_flags_t flags);
dispatch_queue_t _dispatch_queue_serial_drain(dispatch_queue_t dq,
dispatch_invoke_flags_t flags, uint64_t *owned,
struct dispatch_object_s **dc_ptr);
void _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq,
dispatch_invoke_flags_t flags, uint64_t to_unlock,
struct dispatch_object_s *dc);
void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t
dqsq);
void _dispatch_root_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
dispatch_wakeup_flags_t flags);
void _dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq,
struct dispatch_object_s *dou, pthread_priority_t pp);
void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq);
void _dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
dispatch_wakeup_flags_t flags);
void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp,
dispatch_wakeup_flags_t flags);
void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq);
void _dispatch_runloop_queue_dispose(dispatch_queue_t dq);
void _dispatch_mgr_queue_drain(void);
#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
void _dispatch_mgr_priority_init(void);
#else
static inline void _dispatch_mgr_priority_init(void) {}
#endif
#if DISPATCH_USE_KEVENT_WORKQUEUE
void _dispatch_kevent_workqueue_init(void);
#else
static inline void _dispatch_kevent_workqueue_init(void) {}
#endif
void _dispatch_sync_recurse_invoke(void *ctxt);
void _dispatch_apply_invoke(void *ctxt);
void _dispatch_apply_redirect_invoke(void *ctxt);
void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
#if DISPATCH_DEBUG
void dispatch_debug_queue(dispatch_queue_t dq, const char* str);
#else
static inline void dispatch_debug_queue(dispatch_queue_t dq DISPATCH_UNUSED,
const char* str DISPATCH_UNUSED) {}
#endif
size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz);
size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf,
size_t bufsiz);
#define DISPATCH_QUEUE_QOS_COUNT 6
#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_QOS_COUNT * 2)
enum {
DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0,
DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS,
DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS,
DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS,
DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS,
DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT,
DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS,
DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT,
_DISPATCH_ROOT_QUEUE_IDX_COUNT,
};
extern unsigned long volatile _dispatch_queue_serial_numbers;
extern struct dispatch_queue_s _dispatch_root_queues[];
extern struct dispatch_queue_s _dispatch_mgr_q;
void _dispatch_root_queues_init(void);
#if HAVE_PTHREAD_WORKQUEUE_QOS
extern pthread_priority_t _dispatch_background_priority;
extern pthread_priority_t _dispatch_user_initiated_priority;
#endif
typedef uint8_t _dispatch_qos_class_t;
#pragma mark -
#pragma mark dispatch_queue_attr_t
typedef enum {
_dispatch_queue_attr_overcommit_unspecified = 0,
_dispatch_queue_attr_overcommit_enabled,
_dispatch_queue_attr_overcommit_disabled,
} _dispatch_queue_attr_overcommit_t;
DISPATCH_CLASS_DECL(queue_attr);
struct dispatch_queue_attr_s {
OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr);
_dispatch_qos_class_t dqa_qos_class;
int8_t dqa_relative_priority;
uint16_t dqa_overcommit:2;
uint16_t dqa_autorelease_frequency:2;
uint16_t dqa_concurrent:1;
uint16_t dqa_inactive:1;
};
enum {
DQA_INDEX_UNSPECIFIED_OVERCOMMIT = 0,
DQA_INDEX_NON_OVERCOMMIT,
DQA_INDEX_OVERCOMMIT,
};
#define DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT 3
enum {
DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT =
DISPATCH_AUTORELEASE_FREQUENCY_INHERIT,
DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM =
DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM,
DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER =
DISPATCH_AUTORELEASE_FREQUENCY_NEVER,
};
#define DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT 3
enum {
DQA_INDEX_CONCURRENT = 0,
DQA_INDEX_SERIAL,
};
#define DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT 2
enum {
DQA_INDEX_ACTIVE = 0,
DQA_INDEX_INACTIVE,
};
#define DISPATCH_QUEUE_ATTR_INACTIVE_COUNT 2
typedef enum {
DQA_INDEX_QOS_CLASS_UNSPECIFIED = 0,
DQA_INDEX_QOS_CLASS_MAINTENANCE,
DQA_INDEX_QOS_CLASS_BACKGROUND,
DQA_INDEX_QOS_CLASS_UTILITY,
DQA_INDEX_QOS_CLASS_DEFAULT,
DQA_INDEX_QOS_CLASS_USER_INITIATED,
DQA_INDEX_QOS_CLASS_USER_INTERACTIVE,
} _dispatch_queue_attr_index_qos_class_t;
#define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY)
extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[]
[DISPATCH_QUEUE_ATTR_PRIO_COUNT]
[DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT]
[DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT]
[DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT]
[DISPATCH_QUEUE_ATTR_INACTIVE_COUNT];
dispatch_queue_attr_t _dispatch_get_default_queue_attr(void);
#pragma mark -
#pragma mark dispatch_continuation_t
#if __LP64__
#define DISPATCH_CONTINUATION_HEADER(x) \
union { \
const void *do_vtable; \
uintptr_t dc_flags; \
}; \
union { \
pthread_priority_t dc_priority; \
int dc_cache_cnt; \
uintptr_t dc_pad; \
}; \
struct dispatch_##x##_s *volatile do_next; \
struct voucher_s *dc_voucher; \
dispatch_function_t dc_func; \
void *dc_ctxt; \
void *dc_data; \
void *dc_other
#define _DISPATCH_SIZEOF_PTR 8
#elif OS_OBJECT_HAVE_OBJC1
#define DISPATCH_CONTINUATION_HEADER(x) \
dispatch_function_t dc_func; \
union { \
pthread_priority_t dc_priority; \
int dc_cache_cnt; \
uintptr_t dc_pad; \
}; \
struct voucher_s *dc_voucher; \
union { \
const void *do_vtable; \
uintptr_t dc_flags; \
}; \
struct dispatch_##x##_s *volatile do_next; \
void *dc_ctxt; \
void *dc_data; \
void *dc_other
#define _DISPATCH_SIZEOF_PTR 4
#else
#define DISPATCH_CONTINUATION_HEADER(x) \
union { \
const void *do_vtable; \
uintptr_t dc_flags; \
}; \
union { \
pthread_priority_t dc_priority; \
int dc_cache_cnt; \
uintptr_t dc_pad; \
}; \
struct voucher_s *dc_voucher; \
struct dispatch_##x##_s *volatile do_next; \
dispatch_function_t dc_func; \
void *dc_ctxt; \
void *dc_data; \
void *dc_other
#define _DISPATCH_SIZEOF_PTR 4
#endif
#define _DISPATCH_CONTINUATION_PTRS 8
#if DISPATCH_HW_CONFIG_UP
#define DISPATCH_CONTINUATION_SIZE \
(_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR)
#else
#define DISPATCH_CONTINUATION_SIZE ROUND_UP_TO_CACHELINE_SIZE( \
(_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR))
#endif
#define ROUND_UP_TO_CONTINUATION_SIZE(x) \
(((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \
~(DISPATCH_CONTINUATION_SIZE - 1u))
#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x001ul
#define DISPATCH_OBJ_BARRIER_BIT 0x002ul
#define DISPATCH_OBJ_CONSUME_BIT 0x004ul
#define DISPATCH_OBJ_GROUP_BIT 0x008ul
#define DISPATCH_OBJ_BLOCK_BIT 0x010ul
#define DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT 0x020ul
#define DISPATCH_OBJ_CTXT_FETCH_BIT 0x040ul
#define DISPATCH_OBJ_ENFORCE_VOUCHER 0x080ul
struct dispatch_continuation_s {
struct dispatch_object_s _as_do[0];
DISPATCH_CONTINUATION_HEADER(continuation);
};
typedef struct dispatch_continuation_s *dispatch_continuation_t;
typedef struct dispatch_continuation_vtable_s {
_OS_OBJECT_CLASS_HEADER();
DISPATCH_INVOKABLE_VTABLE_HEADER(dispatch_continuation);
} *dispatch_continuation_vtable_t;
#ifndef DISPATCH_CONTINUATION_CACHE_LIMIT
#if TARGET_OS_EMBEDDED
#define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads
#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 16
#else
#define DISPATCH_CONTINUATION_CACHE_LIMIT 1024
#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 128
#endif
#endif
dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void);
void _dispatch_continuation_free_to_heap(dispatch_continuation_t c);
void _dispatch_continuation_async(dispatch_queue_t dq,
dispatch_continuation_t dc);
void _dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq,
dispatch_invoke_flags_t flags);
void _dispatch_continuation_invoke(dispatch_object_t dou,
voucher_t override_voucher, dispatch_invoke_flags_t flags);
#if DISPATCH_USE_MEMORYPRESSURE_SOURCE
extern int _dispatch_continuation_cache_limit;
void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c);
#else
#define _dispatch_continuation_cache_limit DISPATCH_CONTINUATION_CACHE_LIMIT
#define _dispatch_continuation_free_to_cache_limit(c) \
_dispatch_continuation_free_to_heap(c)
#endif
#pragma mark -
#pragma mark dispatch_continuation vtables
enum {
_DC_USER_TYPE = 0,
DC_ASYNC_REDIRECT_TYPE,
DC_MACH_SEND_BARRRIER_DRAIN_TYPE,
DC_MACH_SEND_BARRIER_TYPE,
DC_MACH_RECV_BARRIER_TYPE,
#if HAVE_PTHREAD_WORKQUEUE_QOS
DC_OVERRIDE_STEALING_TYPE,
DC_OVERRIDE_OWNING_TYPE,
#endif
_DC_MAX_TYPE,
};
DISPATCH_ALWAYS_INLINE
static inline unsigned long
dc_type(dispatch_continuation_t dc)
{
return dx_type(dc->_as_do);
}
DISPATCH_ALWAYS_INLINE
static inline unsigned long
dc_subtype(dispatch_continuation_t dc)
{
return dx_subtype(dc->_as_do);
}
extern const struct dispatch_continuation_vtable_s
_dispatch_continuation_vtables[_DC_MAX_TYPE];
void
_dispatch_async_redirect_invoke(dispatch_continuation_t dc,
dispatch_invoke_flags_t flags);
#if HAVE_PTHREAD_WORKQUEUE_QOS
void
_dispatch_queue_override_invoke(dispatch_continuation_t dc,
dispatch_invoke_flags_t flags);
#endif
#define DC_VTABLE(name) (&_dispatch_continuation_vtables[DC_##name##_TYPE])
#define DC_VTABLE_ENTRY(name, ...) \
[DC_##name##_TYPE] = { \
.do_type = DISPATCH_CONTINUATION_TYPE(name), \
__VA_ARGS__ \
}
#pragma mark -
#pragma mark _dispatch_set_priority_and_voucher
#if HAVE_PTHREAD_WORKQUEUE_QOS
void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri,
mach_voucher_t kv);
voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t pri,
voucher_t voucher, _dispatch_thread_set_self_t flags);
#endif
#pragma mark -
#pragma mark dispatch_apply_t
struct dispatch_apply_s {
size_t volatile da_index, da_todo;
size_t da_iterations, da_nested;
dispatch_continuation_t da_dc;
dispatch_thread_event_s da_event;
dispatch_invoke_flags_t da_flags;
uint32_t da_thr_cnt;
};
typedef struct dispatch_apply_s *dispatch_apply_t;
#pragma mark -
#pragma mark dispatch_block_t
#ifdef __BLOCKS__
#define DISPATCH_BLOCK_API_MASK (0x80u - 1)
#define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31)
#define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30)
#define DISPATCH_BLOCK_PRIVATE_DATA_HEADER() \
unsigned long dbpd_magic; \
dispatch_block_flags_t dbpd_flags; \
unsigned int volatile dbpd_atomic_flags; \
int volatile dbpd_performed; \
pthread_priority_t dbpd_priority; \
voucher_t dbpd_voucher; \
dispatch_block_t dbpd_block; \
dispatch_group_t dbpd_group; \
os_mpsc_queue_t volatile dbpd_queue; \
mach_port_t dbpd_thread;
#if !defined(__cplusplus)
struct dispatch_block_private_data_s {
DISPATCH_BLOCK_PRIVATE_DATA_HEADER();
};
#endif
typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t;
#define DBF_CANCELED 1u // block has been cancelled
#define DBF_WAITING 2u // dispatch_block_wait has begun
#define DBF_WAITED 4u // dispatch_block_wait has finished without timeout
#define DBF_PERFORM 8u // dispatch_block_perform: don't group_leave
#define DISPATCH_BLOCK_PRIVATE_DATA_MAGIC 0xD159B10C // 0xDISPatch_BLOCk
#define DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block) \
{ \
.dbpd_magic = DISPATCH_BLOCK_PRIVATE_DATA_MAGIC, \
.dbpd_flags = (flags), \
.dbpd_atomic_flags = DBF_PERFORM, \
.dbpd_block = (block), \
}
dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags,
voucher_t voucher, pthread_priority_t priority, dispatch_block_t block);
void _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd);
void _dispatch_block_sync_invoke(void *block);
void _dispatch_continuation_init_slow(dispatch_continuation_t dc,
dispatch_queue_class_t dqu, dispatch_block_flags_t flags);
void _dispatch_continuation_update_bits(dispatch_continuation_t dc,
uintptr_t dc_flags);
bool _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func);
DISPATCH_EXPORT DISPATCH_NOTHROW
bool _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt,
dispatch_function_t f);
#endif
typedef struct dispatch_pthread_root_queue_observer_hooks_s {
void (*queue_will_execute)(dispatch_queue_t queue);
void (*queue_did_execute)(dispatch_queue_t queue);
} dispatch_pthread_root_queue_observer_hooks_s;
typedef dispatch_pthread_root_queue_observer_hooks_s
*dispatch_pthread_root_queue_observer_hooks_t;
#ifdef __APPLE__
#define DISPATCH_IOHID_SPI 1
DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT
DISPATCH_NOTHROW DISPATCH_NONNULL4
dispatch_queue_t
_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID(
const char *label, unsigned long flags, const pthread_attr_t *attr,
dispatch_pthread_root_queue_observer_hooks_t observer_hooks,
dispatch_block_t configure);
DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW
bool
_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID(
dispatch_queue_t queue);
#endif // __APPLE__
#endif