#include "internal.h"
#include "allocator_internal.h"
#if DISPATCH_ALLOCATOR
#ifndef VM_MEMORY_LIBDISPATCH
#define VM_MEMORY_LIBDISPATCH 74
#endif
static dispatch_heap_t _dispatch_main_heap;
DISPATCH_ALWAYS_INLINE
static void
set_last_found_page(bitmap_t *val)
{
dispatch_assert(_dispatch_main_heap);
unsigned int cpu = _dispatch_cpu_number();
_dispatch_main_heap[cpu].header.last_found_page = val;
}
DISPATCH_ALWAYS_INLINE
static bitmap_t *
last_found_page(void)
{
dispatch_assert(_dispatch_main_heap);
unsigned int cpu = _dispatch_cpu_number();
return _dispatch_main_heap[cpu].header.last_found_page;
}
#pragma mark -
#pragma mark dispatch_alloc_bitmaps
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static bitmap_t *
supermap_address(struct dispatch_magazine_s *magazine, unsigned int supermap)
{
return &magazine->supermaps[supermap];
}
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static bitmap_t *
bitmap_address(struct dispatch_magazine_s *magazine, unsigned int supermap,
unsigned int map)
{
return &magazine->maps[supermap][map];
}
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static dispatch_continuation_t
continuation_address(struct dispatch_magazine_s *magazine,
unsigned int supermap, unsigned int map, unsigned int index)
{
#if DISPATCH_DEBUG
dispatch_assert(supermap < SUPERMAPS_PER_MAGAZINE);
dispatch_assert(map < BITMAPS_PER_SUPERMAP);
dispatch_assert(index < CONTINUATIONS_PER_BITMAP);
#endif
return (dispatch_continuation_t)&magazine->conts[supermap][map][index];
}
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static struct dispatch_magazine_s *
magazine_for_continuation(dispatch_continuation_t c)
{
return (struct dispatch_magazine_s *)((uintptr_t)c & MAGAZINE_MASK);
}
DISPATCH_ALWAYS_INLINE_NDEBUG
static void
get_cont_and_indices_for_bitmap_and_index(bitmap_t *bitmap,
unsigned int index, dispatch_continuation_t *continuation_out,
bitmap_t **supermap_out, unsigned int *bitmap_index_out)
{
struct dispatch_magazine_s *m = magazine_for_continuation((void *)bitmap);
unsigned int mindex = (unsigned int)(bitmap - m->maps[0]);
unsigned int bindex = mindex % BITMAPS_PER_SUPERMAP;
unsigned int sindex = mindex / BITMAPS_PER_SUPERMAP;
dispatch_assert(&m->maps[sindex][bindex] == bitmap);
if (fastpath(continuation_out)) {
*continuation_out = continuation_address(m, sindex, bindex, index);
}
if (fastpath(supermap_out)) *supermap_out = supermap_address(m, sindex);
if (fastpath(bitmap_index_out)) *bitmap_index_out = bindex;
}
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static bool
continuation_is_in_first_page(dispatch_continuation_t c)
{
#if PACK_FIRST_PAGE_WITH_CONTINUATIONS
return (((uintptr_t)c & MAGAZINE_MASK) ==
((uintptr_t)c & ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK));
#else
(void)c;
return false;
#endif
}
DISPATCH_ALWAYS_INLINE_NDEBUG
static void
get_maps_and_indices_for_continuation(dispatch_continuation_t c,
bitmap_t **supermap_out, unsigned int *bitmap_index_out,
bitmap_t **bitmap_out, unsigned int *index_out)
{
unsigned int cindex, sindex, index, mindex;
padded_continuation *p = (padded_continuation *)c;
struct dispatch_magazine_s *m = magazine_for_continuation(c);
#if PACK_FIRST_PAGE_WITH_CONTINUATIONS
if (fastpath(continuation_is_in_first_page(c))) {
cindex = (unsigned int)(p - m->fp_conts);
index = cindex % CONTINUATIONS_PER_BITMAP;
mindex = cindex / CONTINUATIONS_PER_BITMAP;
if (fastpath(supermap_out)) *supermap_out = NULL;
if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex;
if (fastpath(bitmap_out)) *bitmap_out = &m->fp_maps[mindex];
if (fastpath(index_out)) *index_out = index;
return;
}
#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
cindex = (unsigned int)(p - (padded_continuation *)m->conts);
sindex = cindex / (BITMAPS_PER_SUPERMAP * CONTINUATIONS_PER_BITMAP);
mindex = (cindex / CONTINUATIONS_PER_BITMAP) % BITMAPS_PER_SUPERMAP;
index = cindex % CONTINUATIONS_PER_BITMAP;
if (fastpath(supermap_out)) *supermap_out = &m->supermaps[sindex];
if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex;
if (fastpath(bitmap_out)) *bitmap_out = &m->maps[sindex][mindex];
if (fastpath(index_out)) *index_out = index;
}
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static void *
madvisable_page_base_for_continuation(dispatch_continuation_t c)
{
if (fastpath(continuation_is_in_first_page(c))) {
return NULL;
}
void *page_base = (void *)((uintptr_t)c &
~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK);
#if DISPATCH_DEBUG
struct dispatch_magazine_s *m = magazine_for_continuation(c);
if (slowpath(page_base < (void *)&m->conts)) {
DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too low");
}
if (slowpath(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1]
[BITMAPS_PER_SUPERMAP-1][CONTINUATIONS_PER_BITMAP-1])) {
DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too high");
}
#endif
return page_base;
}
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static bitmap_t *
first_bitmap_in_same_page(bitmap_t *b)
{
#if DISPATCH_DEBUG
struct dispatch_magazine_s *m;
m = magazine_for_continuation((void*)b);
dispatch_assert(b >= &m->maps[0][0]);
dispatch_assert(b < &m->maps[SUPERMAPS_PER_MAGAZINE]
[BITMAPS_PER_SUPERMAP]);
#endif
const uintptr_t PAGE_BITMAP_MASK = (BITMAPS_PER_PAGE *
BYTES_PER_BITMAP) - 1;
return (bitmap_t *)((uintptr_t)b & ~PAGE_BITMAP_MASK);
}
DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST
static bool
bitmap_is_full(bitmap_t bits)
{
return (bits == BITMAP_ALL_ONES);
}
#define NO_BITS_WERE_UNSET (UINT_MAX)
DISPATCH_ALWAYS_INLINE_NDEBUG
static unsigned int
bitmap_set_first_unset_bit_upto_index(volatile bitmap_t *bitmap,
unsigned int max_index)
{
dispatch_assert(sizeof(*bitmap) == sizeof(unsigned long));
return os_atomic_set_first_bit(bitmap, max_index);
}
DISPATCH_ALWAYS_INLINE
static unsigned int
bitmap_set_first_unset_bit(volatile bitmap_t *bitmap)
{
return bitmap_set_first_unset_bit_upto_index(bitmap, UINT_MAX);
}
#define CLEAR_EXCLUSIVELY true
#define CLEAR_NONEXCLUSIVELY false
DISPATCH_ALWAYS_INLINE_NDEBUG
static bool
bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index,
bool exclusively)
{
#if DISPATCH_DEBUG
dispatch_assert(index < CONTINUATIONS_PER_BITMAP);
#endif
const bitmap_t mask = BITMAP_C(1) << index;
bitmap_t b;
if (exclusively == CLEAR_EXCLUSIVELY) {
if (slowpath((*bitmap & mask) == 0)) {
DISPATCH_CLIENT_CRASH(*bitmap,
"Corruption: failed to clear bit exclusively");
}
}
b = os_atomic_and(bitmap, ~mask, release);
return b == 0;
}
DISPATCH_ALWAYS_INLINE_NDEBUG
static void
mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap,
unsigned int bitmap_index, volatile bitmap_t *bitmap)
{
#if DISPATCH_DEBUG
dispatch_assert(bitmap_index < BITMAPS_PER_SUPERMAP);
#endif
const bitmap_t mask = BITMAP_C(1) << bitmap_index;
bitmap_t s, s_new;
os_atomic_rmw_loop(supermap, s, s_new, relaxed, {
if (!bitmap_is_full(*bitmap)) {
os_atomic_rmw_loop_give_up(return);
}
s_new = s | mask;
});
}
#pragma mark -
#pragma mark dispatch_alloc_continuation_alloc
#if PACK_FIRST_PAGE_WITH_CONTINUATIONS
DISPATCH_ALWAYS_INLINE_NDEBUG
static dispatch_continuation_t
alloc_continuation_from_first_page(struct dispatch_magazine_s *magazine)
{
unsigned int i, index, continuation_index;
for (i = 0; i < FULL_BITMAPS_IN_FIRST_PAGE; i++) {
index = bitmap_set_first_unset_bit(&magazine->fp_maps[i]);
if (fastpath(index != NO_BITS_WERE_UNSET)) goto found;
}
if (REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE) {
index = bitmap_set_first_unset_bit_upto_index(&magazine->fp_maps[i],
REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE - 1);
if (fastpath(index != NO_BITS_WERE_UNSET)) goto found;
}
return NULL;
found:
continuation_index = (i * CONTINUATIONS_PER_BITMAP) + index;
return (dispatch_continuation_t)&magazine->fp_conts[continuation_index];
}
#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
DISPATCH_ALWAYS_INLINE_NDEBUG
static dispatch_continuation_t
alloc_continuation_from_magazine(struct dispatch_magazine_s *magazine)
{
unsigned int s, b, index;
for (s = 0; s < SUPERMAPS_PER_MAGAZINE; s++) {
volatile bitmap_t *supermap = supermap_address(magazine, s);
if (bitmap_is_full(*supermap)) {
continue;
}
for (b = 0; b < BITMAPS_PER_SUPERMAP; b++) {
volatile bitmap_t *bitmap = bitmap_address(magazine, s, b);
index = bitmap_set_first_unset_bit(bitmap);
if (index != NO_BITS_WERE_UNSET) {
set_last_found_page(
first_bitmap_in_same_page((bitmap_t *)bitmap));
mark_bitmap_as_full_if_still_full(supermap, b, bitmap);
return continuation_address(magazine, s, b, index);
}
}
}
return NULL;
}
DISPATCH_NOINLINE
static void
_dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr)
{
#if HAVE_MACH
kern_return_t kr;
mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE;
mach_vm_offset_t vm_mask = ~MAGAZINE_MASK;
mach_vm_address_t vm_addr = vm_page_size;
while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size,
vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH),
MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT))) {
if (kr != KERN_NO_SPACE) {
DISPATCH_CLIENT_CRASH(kr, "Could not allocate heap");
}
_dispatch_temporary_resource_shortage();
vm_addr = vm_page_size;
}
uintptr_t aligned_region = (uintptr_t)vm_addr;
#else // HAVE_MACH
const size_t region_sz = (1 + MAGAZINES_PER_HEAP) * BYTES_PER_MAGAZINE;
void *region_p;
while (!dispatch_assume((region_p = mmap(NULL, region_sz,
PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE,
VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), 0)) != MAP_FAILED)) {
_dispatch_temporary_resource_shortage();
}
uintptr_t region = (uintptr_t)region_p;
uintptr_t region_end = region + region_sz;
uintptr_t aligned_region, aligned_region_end;
uintptr_t bottom_slop_len, top_slop_len;
if ((region & ~(MAGAZINE_MASK)) == 0) {
bottom_slop_len = 0;
aligned_region = region;
aligned_region_end = region_end - BYTES_PER_MAGAZINE;
top_slop_len = BYTES_PER_MAGAZINE;
} else {
aligned_region = (region & MAGAZINE_MASK) + BYTES_PER_MAGAZINE;
aligned_region_end = aligned_region +
(MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
bottom_slop_len = aligned_region - region;
top_slop_len = BYTES_PER_MAGAZINE - bottom_slop_len;
}
#if DISPATCH_DEBUG
dispatch_assert(aligned_region % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
dispatch_assert(aligned_region % vm_kernel_page_size == 0);
dispatch_assert(aligned_region_end % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
dispatch_assert(aligned_region_end % vm_kernel_page_size == 0);
dispatch_assert(aligned_region_end > aligned_region);
dispatch_assert(top_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
dispatch_assert(bottom_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0);
dispatch_assert(aligned_region_end + top_slop_len == region_end);
dispatch_assert(region + bottom_slop_len == aligned_region);
dispatch_assert(region_sz == bottom_slop_len + top_slop_len +
MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE);
if (bottom_slop_len) {
(void)dispatch_assume_zero(mprotect((void *)region, bottom_slop_len,
PROT_NONE));
}
if (top_slop_len) {
(void)dispatch_assume_zero(mprotect((void *)aligned_region_end,
top_slop_len, PROT_NONE));
}
#else
if (bottom_slop_len) {
(void)dispatch_assume_zero(munmap((void *)region, bottom_slop_len));
}
if (top_slop_len) {
(void)dispatch_assume_zero(munmap((void *)aligned_region_end,
top_slop_len));
}
#endif // DISPATCH_DEBUG
#endif // HAVE_MACH
if (!os_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region,
relaxed)) {
#if DISPATCH_DEBUG
(void)dispatch_assume_zero(mprotect((void *)aligned_region,
MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE, PROT_NONE));
#else
(void)dispatch_assume_zero(munmap((void *)aligned_region,
MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE));
#endif
}
}
DISPATCH_NOINLINE
static dispatch_continuation_t
_dispatch_alloc_continuation_from_heap(dispatch_heap_t heap)
{
dispatch_continuation_t cont;
unsigned int cpu_number = _dispatch_cpu_number();
#ifdef DISPATCH_DEBUG
dispatch_assert(cpu_number < NUM_CPU);
#endif
#if PACK_FIRST_PAGE_WITH_CONTINUATIONS
cont = alloc_continuation_from_first_page(&(heap[cpu_number]));
if (fastpath(cont)) {
return cont;
}
#endif
cont = alloc_continuation_from_magazine(&(heap[cpu_number]));
return cont;
}
DISPATCH_NOINLINE
static dispatch_continuation_t
_dispatch_alloc_continuation_from_heap_slow(void)
{
dispatch_heap_t *heap = &_dispatch_main_heap;
dispatch_continuation_t cont;
for (;;) {
if (!fastpath(*heap)) {
_dispatch_alloc_try_create_heap(heap);
}
cont = _dispatch_alloc_continuation_from_heap(*heap);
if (fastpath(cont)) {
return cont;
}
heap = &(*heap)->header.dh_next;
}
}
DISPATCH_ALLOC_NOINLINE
static dispatch_continuation_t
_dispatch_alloc_continuation_alloc(void)
{
dispatch_continuation_t cont;
if (fastpath(_dispatch_main_heap)) {
bitmap_t *last = last_found_page();
if (fastpath(last)) {
unsigned int i;
for (i = 0; i < BITMAPS_PER_PAGE; i++) {
bitmap_t *cur = last + i;
unsigned int index = bitmap_set_first_unset_bit(cur);
if (fastpath(index != NO_BITS_WERE_UNSET)) {
bitmap_t *supermap;
unsigned int bindex;
get_cont_and_indices_for_bitmap_and_index(cur,
index, &cont, &supermap, &bindex);
mark_bitmap_as_full_if_still_full(supermap, bindex,
cur);
return cont;
}
}
}
cont = _dispatch_alloc_continuation_from_heap(_dispatch_main_heap);
if (fastpath(cont)) {
return cont;
}
}
return _dispatch_alloc_continuation_from_heap_slow();
}
#pragma mark -
#pragma mark dispatch_alloc_continuation_free
DISPATCH_NOINLINE
static void
_dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c)
{
void *page = madvisable_page_base_for_continuation(c);
if (!page) {
return;
}
volatile bitmap_t *page_bitmaps;
get_maps_and_indices_for_continuation((dispatch_continuation_t)page, NULL,
NULL, (bitmap_t **)&page_bitmaps, NULL);
unsigned int i;
for (i = 0; i < BITMAPS_PER_PAGE; i++) {
if (page_bitmaps[i] != 0) {
return;
}
}
int last_locked = 0;
do {
if (!os_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0),
BITMAP_ALL_ONES, relaxed)) {
goto unlock;
}
} while (++last_locked < (signed)BITMAPS_PER_PAGE);
#if DISPATCH_DEBUG
memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE);
#endif
(void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE,
MADV_FREE));
unlock:
while (last_locked > 1) {
page_bitmaps[--last_locked] = BITMAP_C(0);
}
if (last_locked) {
os_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed);
}
return;
}
DISPATCH_ALLOC_NOINLINE
static void
_dispatch_alloc_continuation_free(dispatch_continuation_t c)
{
bitmap_t *b, *s;
unsigned int b_idx, idx;
get_maps_and_indices_for_continuation(c, &s, &b_idx, &b, &idx);
bool bitmap_now_empty = bitmap_clear_bit(b, idx, CLEAR_EXCLUSIVELY);
if (slowpath(s)) {
(void)bitmap_clear_bit(s, b_idx, CLEAR_NONEXCLUSIVELY);
}
if (slowpath(bitmap_now_empty) && slowpath(s)) {
return _dispatch_alloc_maybe_madvise_page(c);
}
}
#pragma mark -
#pragma mark dispatch_alloc_init
#if DISPATCH_DEBUG
static void
_dispatch_alloc_init(void)
{
dispatch_assert(sizeof(bitmap_t) == BYTES_PER_BITMAP);
dispatch_assert(sizeof(bitmap_t) == BYTES_PER_SUPERMAP);
dispatch_assert(sizeof(struct dispatch_magazine_header_s) ==
SIZEOF_HEADER);
dispatch_assert(sizeof(struct dispatch_continuation_s) <=
DISPATCH_CONTINUATION_SIZE);
dispatch_assert(sizeof(struct dispatch_magazine_s) == BYTES_PER_MAGAZINE);
dispatch_assert(SIZEOF_HEADER ==
sizeof(((struct dispatch_magazine_s *)0x0)->header));
dispatch_assert(SIZEOF_MAPS ==
sizeof(((struct dispatch_magazine_s *)0x0)->maps));
dispatch_assert(offsetof(struct dispatch_magazine_s, conts) %
(CONTINUATIONS_PER_BITMAP * DISPATCH_CONTINUATION_SIZE) == 0);
dispatch_assert(offsetof(struct dispatch_magazine_s, conts) ==
DISPATCH_ALLOCATOR_PAGE_SIZE);
#if PACK_FIRST_PAGE_WITH_CONTINUATIONS
dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) <
DISPATCH_ALLOCATOR_PAGE_SIZE);
dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) %
DISPATCH_CONTINUATION_SIZE == 0);
dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) +
sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) ==
DISPATCH_ALLOCATOR_PAGE_SIZE);
#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS
dispatch_assert(ROUND_UP_TO_BITMAP_ALIGNMENT(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
dispatch_assert(ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) ==
ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1));
}
#elif (DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC) \
|| (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE)
static inline void _dispatch_alloc_init(void) {}
#endif
#endif // DISPATCH_ALLOCATOR
#pragma mark -
#pragma mark dispatch_malloc
#if DISPATCH_CONTINUATION_MALLOC
#if DISPATCH_USE_MALLOCZONE
static malloc_zone_t *_dispatch_ccache_zone;
#define calloc(n, s) malloc_zone_calloc(_dispatch_ccache_zone, (n), (s))
#define free(c) malloc_zone_free(_dispatch_ccache_zone, (c))
static void
_dispatch_malloc_init(void)
{
_dispatch_ccache_zone = malloc_create_zone(0, 0);
dispatch_assert(_dispatch_ccache_zone);
malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations");
}
#else
#define _dispatch_malloc_init() ((void)0)
#endif // DISPATCH_USE_MALLOCZONE
static dispatch_continuation_t
_dispatch_malloc_continuation_alloc(void)
{
dispatch_continuation_t dc;
while (!(dc = fastpath(calloc(1,
ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) {
_dispatch_temporary_resource_shortage();
}
return dc;
}
static inline void
_dispatch_malloc_continuation_free(dispatch_continuation_t c)
{
free(c);
}
#endif // DISPATCH_CONTINUATION_MALLOC
#pragma mark -
#pragma mark dispatch_continuation_alloc
#if DISPATCH_ALLOCATOR
#if DISPATCH_CONTINUATION_MALLOC
#if DISPATCH_USE_NANOZONE
extern boolean_t malloc_engaged_nano(void);
#else
#define malloc_engaged_nano() false
#endif // DISPATCH_USE_NANOZONE
static int _dispatch_use_dispatch_alloc;
#else
#define _dispatch_use_dispatch_alloc 1
#endif // DISPATCH_CONTINUATION_MALLOC
#endif // DISPATCH_ALLOCATOR
#if (DISPATCH_ALLOCATOR && (DISPATCH_CONTINUATION_MALLOC || DISPATCH_DEBUG)) \
|| (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE)
static void
_dispatch_continuation_alloc_init(void *ctxt DISPATCH_UNUSED)
{
#if DISPATCH_ALLOCATOR
#if DISPATCH_CONTINUATION_MALLOC
bool use_dispatch_alloc = !malloc_engaged_nano();
char *e = getenv("LIBDISPATCH_CONTINUATION_ALLOCATOR");
if (e) {
use_dispatch_alloc = atoi(e);
}
_dispatch_use_dispatch_alloc = use_dispatch_alloc;
#endif // DISPATCH_CONTINUATION_MALLOC
if (_dispatch_use_dispatch_alloc)
return _dispatch_alloc_init();
#endif // DISPATCH_ALLOCATOR
#if DISPATCH_CONTINUATION_MALLOC
return _dispatch_malloc_init();
#endif // DISPATCH_ALLOCATOR
}
static void
_dispatch_continuation_alloc_once()
{
static dispatch_once_t pred;
dispatch_once_f(&pred, NULL, _dispatch_continuation_alloc_init);
}
#else
static inline void _dispatch_continuation_alloc_once(void) {}
#endif // DISPATCH_ALLOCATOR ... || DISPATCH_CONTINUATION_MALLOC ...
dispatch_continuation_t
_dispatch_continuation_alloc_from_heap(void)
{
_dispatch_continuation_alloc_once();
#if DISPATCH_ALLOCATOR
if (_dispatch_use_dispatch_alloc)
return _dispatch_alloc_continuation_alloc();
#endif
#if DISPATCH_CONTINUATION_MALLOC
return _dispatch_malloc_continuation_alloc();
#endif
}
void
_dispatch_continuation_free_to_heap(dispatch_continuation_t c)
{
#if DISPATCH_ALLOCATOR
if (_dispatch_use_dispatch_alloc)
return _dispatch_alloc_continuation_free(c);
#endif
#if DISPATCH_CONTINUATION_MALLOC
return _dispatch_malloc_continuation_free(c);
#endif
}