#include "internal.h"
static volatile uintptr_t entropic_address = 0;
static volatile uintptr_t entropic_limit = 0;
#define ENTROPIC_KABILLION 0x10000000
#if MALLOC_TARGET_IOS && MALLOC_TARGET_64BIT
#define ENTROPIC_SHIFT 25
#else // MALLOC_TARGET_IOS && MALLOC_TARGET_64BIT
#define ENTROPIC_SHIFT SMALL_BLOCKS_ALIGN
#endif
#if DEBUG_MALLOC
#define LOG(szone, ptr) (szone->log_address && (((uintptr_t)szone->log_address == -1) || (szone->log_address == (void *)(ptr))))
#else
#define LOG(szone, ptr) 0
#endif
static void
szone_sleep(void)
{
if (getenv("MallocErrorStop")) {
_malloc_printf(ASL_LEVEL_NOTICE, "*** sending SIGSTOP to help debug\n");
kill(getpid(), SIGSTOP);
} else if (getenv("MallocErrorSleep")) {
_malloc_printf(ASL_LEVEL_NOTICE, "*** sleeping to help debug\n");
sleep(3600); }
}
void
szone_error(szone_t *szone, int is_corruption, const char *msg, const void *ptr, const char *fmt, ...)
{
va_list ap;
_SIMPLE_STRING b = _simple_salloc();
if (b) {
if (fmt) {
va_start(ap, fmt);
_simple_vsprintf(b, fmt, ap);
va_end(ap);
}
if (ptr) {
_simple_sprintf(b, "*** error for object %p: %s\n", ptr, msg);
} else {
_simple_sprintf(b, "*** error: %s\n", msg);
}
malloc_printf("%s*** set a breakpoint in malloc_error_break to debug\n", _simple_string(b));
} else {
if (fmt) {
va_start(ap, fmt);
_malloc_vprintf(MALLOC_PRINTF_NOLOG, fmt, ap);
va_end(ap);
}
if (ptr) {
_malloc_printf(MALLOC_PRINTF_NOLOG, "*** error for object %p: %s\n", ptr, msg);
} else {
_malloc_printf(MALLOC_PRINTF_NOLOG, "*** error: %s\n", msg);
}
_malloc_printf(MALLOC_PRINTF_NOLOG, "*** set a breakpoint in malloc_error_break to debug\n");
}
malloc_error_break();
#if DEBUG_MALLOC
szone_print(szone, 1);
#endif
szone_sleep();
if ((is_corruption && (szone->debug_flags & MALLOC_ABORT_ON_CORRUPTION)) ||
(szone->debug_flags & MALLOC_ABORT_ON_ERROR)) {
_os_set_crash_log_message_dynamic(b ? _simple_string(b) : msg);
abort();
} else if (b) {
_simple_sfree(b);
}
}
void
protect(void *address, size_t size, unsigned protection, unsigned debug_flags)
{
kern_return_t err;
if (!(debug_flags & MALLOC_DONT_PROTECT_PRELUDE)) {
err = mprotect((void *)((uintptr_t)address - vm_page_quanta_size), vm_page_quanta_size, protection);
if (err) {
malloc_printf("*** can't protect(%p) region for prelude guard page at %p\n", protection,
(uintptr_t)address - vm_page_quanta_size);
}
}
if (!(debug_flags & MALLOC_DONT_PROTECT_POSTLUDE)) {
err = mprotect((void *)(round_page_quanta(((uintptr_t)address + size))), vm_page_quanta_size, protection);
if (err) {
malloc_printf("*** can't protect(%p) region for postlude guard page at %p\n", protection, (uintptr_t)address + size);
}
}
}
void *
allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label)
{
boolean_t add_guard_pages = debug_flags & MALLOC_ADD_GUARD_PAGES;
boolean_t purgeable = debug_flags & MALLOC_PURGEABLE;
mach_vm_address_t vm_addr;
uintptr_t addr;
mach_vm_size_t allocation_size = round_page_quanta(size);
mach_vm_offset_t allocation_mask = ((mach_vm_offset_t)1 << align) - 1;
int alloc_flags = VM_FLAGS_ANYWHERE | VM_MAKE_TAG(vm_page_label);
kern_return_t kr;
if (!allocation_size) {
allocation_size = vm_page_quanta_size;
}
if (add_guard_pages) {
if (align > vm_page_quanta_shift) {
allocation_size += (1 << align) + vm_page_quanta_size;
} else {
allocation_size += 2 * vm_page_quanta_size;
}
}
if (purgeable) {
alloc_flags |= VM_FLAGS_PURGABLE;
}
if (allocation_size < size) { return NULL;
}
vm_addr = vm_page_quanta_size;
kr = mach_vm_map(mach_task_self(), &vm_addr, allocation_size, allocation_mask, alloc_flags, MEMORY_OBJECT_NULL, 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr) {
szone_error(szone, 0, "can't allocate region", NULL, "*** mach_vm_map(size=%lu) failed (error code=%d)\n", size, kr);
return NULL;
}
addr = (uintptr_t)vm_addr;
if (add_guard_pages) {
if (align > vm_page_quanta_shift) {
uintptr_t alignaddr = ((addr + vm_page_quanta_size) + (1 << align) - 1) & ~((1 << align) - 1);
size_t leading = alignaddr - addr - vm_page_quanta_size;
size_t trailing = (1 << align) - vm_page_quanta_size - leading;
kr = mach_vm_deallocate(mach_task_self(), addr, leading);
if (kr) {
szone_error(szone, 0, "can't unmap excess guard region", NULL,
"*** mach_vm_deallocate(addr=%p, size=%lu) failed (code=%d)", (void *)addr, leading, kr);
return NULL;
}
kr = mach_vm_deallocate(mach_task_self(), addr + allocation_size - trailing, trailing);
if (kr) {
szone_error(szone, 0, "can't unmap excess trailing guard region", NULL,
"*** mach_vm_deallocate(addr=%p, size=%lu) failed (code=%d)", (void *)(addr + allocation_size - trailing),
trailing, kr);
return NULL;
}
addr = alignaddr;
} else {
addr += vm_page_quanta_size;
}
protect((void *)addr, size, PROT_NONE, debug_flags);
}
return (void *)addr;
}
void *
allocate_pages_securely(szone_t *szone, size_t size, unsigned char align, int vm_page_label)
{
mach_vm_address_t vm_addr;
uintptr_t addr;
mach_vm_size_t allocation_size = round_page_quanta(size);
mach_vm_offset_t allocation_mask = ((mach_vm_offset_t)1 << align) - 1;
int alloc_flags = VM_FLAGS_ANYWHERE | VM_MAKE_TAG(vm_page_label);
kern_return_t kr;
if (szone->debug_flags & DISABLE_ASLR) {
return allocate_pages(szone, size, align, 0, vm_page_label);
}
if (!allocation_size) {
allocation_size = vm_page_quanta_size;
}
if (allocation_size < size) { return NULL;
}
retry:
vm_addr = entropic_address;
kr = mach_vm_map(mach_task_self(), &vm_addr, allocation_size, allocation_mask, alloc_flags, MEMORY_OBJECT_NULL, 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
if (kr == KERN_NO_SPACE) {
vm_addr = vm_page_quanta_size;
kr = mach_vm_map(mach_task_self(), &vm_addr, allocation_size, allocation_mask, alloc_flags, MEMORY_OBJECT_NULL, 0, FALSE,
VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
}
if (kr) {
szone_error(
szone, 0, "can't allocate region securely", NULL, "*** mach_vm_map(size=%lu) failed (error code=%d)\n", size, kr);
return NULL;
}
addr = (uintptr_t)vm_addr;
if (addr + allocation_size > entropic_limit) { uintptr_t t = entropic_address;
uintptr_t u = t - ENTROPIC_KABILLION;
if (u < t) { mach_vm_deallocate(mach_task_self(), vm_addr, allocation_size);
(void)__sync_bool_compare_and_swap(&entropic_address, t, u); goto retry;
}
}
if (addr < entropic_address) { uintptr_t t = entropic_address;
uintptr_t u = t - ENTROPIC_KABILLION;
if (u < t) {
(void)__sync_bool_compare_and_swap(&entropic_address, t, u); }
}
return (void *)addr;
}
void
deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags)
{
boolean_t add_guard_pages = debug_flags & MALLOC_ADD_GUARD_PAGES;
mach_vm_address_t vm_addr = (mach_vm_address_t)addr;
mach_vm_size_t allocation_size = size;
kern_return_t kr;
if (add_guard_pages) {
vm_addr -= vm_page_quanta_size;
allocation_size += 2 * vm_page_quanta_size;
}
kr = mach_vm_deallocate(mach_task_self(), vm_addr, allocation_size);
if (kr && szone) {
szone_error(szone, 0, "Can't deallocate_pages region", addr, NULL);
}
}
int
madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last)
{
if (pgHi > pgLo) {
size_t len = pgHi - pgLo;
if (szone->debug_flags & MALLOC_DO_SCRIBBLE) {
memset((void *)pgLo, SCRUBBLE_BYTE, len); }
#if TARGET_OS_EMBEDDED
if (last) {
if (*last == pgLo) {
return 0;
}
*last = pgLo;
}
#endif
MAGMALLOC_MADVFREEREGION((void *)szone, (void *)r, (void *)pgLo, (int)len); if (-1 == madvise((void *)pgLo, len, CONFIG_MADVISE_STYLE)) {
#if DEBUG_MADVISE
szone_error(szone, 0, "madvise_free_range madvise(..., MADV_FREE_REUSABLE) failed", (void *)pgLo, "length=%d\n", len);
#endif
}
}
return 0;
}
int
madvise_reuse_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t phHi)
{
if (phHi > pgLo) {
size_t len = phHi - pgLo;
if (madvise((void *)pgLo, len, MADV_FREE_REUSE) == -1) {
#if DEBUG_MADVISE
szone_error(szone, 0, "madvise_reuse_range madvise(..., MADV_FREE_REUSE) failed", sparse_region, "length=%d\n",
TINY_REGION_PAYLOAD_BYTES);
#endif
return 1;
}
}
return 0;
}
void
szone_free(szone_t *szone, void *ptr)
{
region_t tiny_region;
region_t small_region;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("in szone_free with %p\n", ptr);
}
#endif
if (!ptr) {
return;
}
if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) {
szone_error(szone, 1, "Non-aligned pointer being freed", ptr, NULL);
return;
}
if ((tiny_region = tiny_region_for_ptr_no_lock(szone, ptr)) != NULL) {
if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) {
szone_error(szone, 1, "Pointer to metadata being freed", ptr, NULL);
return;
}
free_tiny(szone, ptr, tiny_region, 0);
return;
}
if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) {
szone_error(szone, 1, "Non-aligned pointer being freed (2)", ptr, NULL);
return;
}
if ((small_region = small_region_for_ptr_no_lock(szone, ptr)) != NULL) {
if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) {
szone_error(szone, 1, "Pointer to metadata being freed (2)", ptr, NULL);
return;
}
free_small(szone, ptr, small_region, 0);
return;
}
if ((uintptr_t)ptr & (vm_page_quanta_size - 1)) {
szone_error(szone, 1, "non-page-aligned, non-allocated pointer being freed", ptr, NULL);
return;
}
free_large(szone, ptr);
}
void
szone_free_definite_size(szone_t *szone, void *ptr, size_t size)
{
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("in szone_free_definite_size with %p\n", ptr);
}
if (0 == size) {
szone_error(szone, 1, "pointer of size zero being freed", ptr, NULL);
return;
}
#endif
if (!ptr) {
return;
}
if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) {
szone_error(szone, 1, "Non-aligned pointer being freed", ptr, NULL);
return;
}
if (size <= SMALL_THRESHOLD) {
if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) {
szone_error(szone, 1, "Pointer to metadata being freed", ptr, NULL);
return;
}
free_tiny(szone, ptr, TINY_REGION_FOR_PTR(ptr), size);
return;
}
if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) {
szone_error(szone, 1, "Non-aligned pointer being freed (2)", ptr, NULL);
return;
}
if (size <= szone->large_threshold) {
if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) {
szone_error(szone, 1, "Pointer to metadata being freed (2)", ptr, NULL);
return;
}
free_small(szone, ptr, SMALL_REGION_FOR_PTR(ptr), size);
return;
}
if ((uintptr_t)ptr & (vm_page_quanta_size - 1)) {
szone_error(szone, 1, "non-page-aligned, non-allocated pointer being freed", ptr, NULL);
return;
}
free_large(szone, ptr);
}
MALLOC_NOINLINE void *
szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested)
{
void *ptr;
msize_t msize;
if (size <= SMALL_THRESHOLD) {
msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
if (!msize) {
msize = 1;
}
ptr = tiny_malloc_should_clear(szone, msize, cleared_requested);
} else if (size <= szone->large_threshold) {
msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
if (!msize) {
msize = 1;
}
ptr = small_malloc_should_clear(szone, msize, cleared_requested);
} else {
size_t num_kernel_pages = round_page_quanta(size) >> vm_page_quanta_shift;
if (num_kernel_pages == 0) {
ptr = 0;
} else {
ptr = large_malloc(szone, num_kernel_pages, 0, cleared_requested);
}
}
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("szone_malloc returned %p\n", ptr);
}
#endif
if ((szone->debug_flags & MALLOC_DO_SCRIBBLE) && ptr && !cleared_requested && size) {
memset(ptr, SCRIBBLE_BYTE, szone_size(szone, ptr));
}
return ptr;
}
void *
szone_malloc(szone_t *szone, size_t size)
{
return szone_malloc_should_clear(szone, size, 0);
}
void *
szone_calloc(szone_t *szone, size_t num_items, size_t size)
{
size_t total_bytes = num_items * size;
if (num_items > 1) {
#if __LP64__
if ((num_items | size) & 0xffffffff00000000ul) {
__uint128_t product = ((__uint128_t)num_items) * ((__uint128_t)size);
if ((uint64_t)(product >> 64)) { return NULL;
}
}
#else
if ((num_items | size) & 0xffff0000ul) {
uint64_t product = ((uint64_t)num_items) * ((uint64_t)size);
if ((uint32_t)(product >> 32)) { return NULL;
}
}
#endif
}
return szone_malloc_should_clear(szone, total_bytes, 1);
}
void *
szone_valloc(szone_t *szone, size_t size)
{
void *ptr;
if (size <= szone->large_threshold) {
ptr = szone_memalign(szone, vm_page_quanta_size, size);
} else {
size_t num_kernel_pages;
num_kernel_pages = round_page_quanta(size) >> vm_page_quanta_shift;
ptr = large_malloc(szone, num_kernel_pages, 0, 0);
}
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("szone_valloc returned %p\n", ptr);
}
#endif
return ptr;
}
size_t
szone_size_try_large(szone_t *szone, const void *ptr)
{
size_t size = 0;
large_entry_t *entry;
SZONE_LOCK(szone);
entry = large_entry_for_pointer_no_lock(szone, ptr);
if (entry) {
size = entry->size;
}
SZONE_UNLOCK(szone);
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("szone_size for %p returned %d\n", ptr, (unsigned)size);
}
#endif
return size;
}
size_t
szone_size(szone_t *szone, const void *ptr)
{
boolean_t is_free;
msize_t msize, msize_and_free;
if (!ptr) {
return 0;
}
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("in szone_size for %p (szone=%p)\n", ptr, szone);
}
#endif
if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) {
return 0;
}
if (tiny_region_for_ptr_no_lock(szone, ptr)) {
if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) {
return 0;
}
msize = get_tiny_meta_header(ptr, &is_free);
if (is_free) {
return 0;
}
#if CONFIG_TINY_CACHE
{
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
if (DEPOT_MAGAZINE_INDEX != mag_index) {
magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
if (msize < TINY_QUANTUM && ptr == (void *)((uintptr_t)(tiny_mag_ptr->mag_last_free) & ~(TINY_QUANTUM - 1))) {
return 0;
}
} else {
for (mag_index = 0; mag_index < szone->num_tiny_magazines; mag_index++) {
magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
if (msize < TINY_QUANTUM && ptr == (void *)((uintptr_t)(tiny_mag_ptr->mag_last_free) & ~(TINY_QUANTUM - 1))) {
return 0;
}
}
}
}
#endif
return TINY_BYTES_FOR_MSIZE(msize);
}
if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) {
return 0;
}
if (small_region_for_ptr_no_lock(szone, ptr)) {
if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) {
return 0;
}
msize_and_free = *SMALL_METADATA_FOR_PTR(ptr);
if (msize_and_free & SMALL_IS_FREE) {
return 0;
}
#if CONFIG_SMALL_CACHE
{
mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
if (DEPOT_MAGAZINE_INDEX != mag_index) {
magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
if (ptr == (void *)((uintptr_t)(small_mag_ptr->mag_last_free) & ~(SMALL_QUANTUM - 1))) {
return 0;
}
} else {
for (mag_index = 0; mag_index < szone->num_small_magazines; mag_index++) {
magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
if (ptr == (void *)((uintptr_t)(small_mag_ptr->mag_last_free) & ~(SMALL_QUANTUM - 1))) {
return 0;
}
}
}
}
#endif
return SMALL_BYTES_FOR_MSIZE(msize_and_free);
}
if ((uintptr_t)ptr & (vm_page_quanta_size - 1)) {
return 0;
}
return szone_size_try_large(szone, ptr);
}
void *
szone_realloc(szone_t *szone, void *ptr, size_t new_size)
{
size_t old_size, new_good_size, valid_size;
void *new_ptr;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("in szone_realloc for %p, %d\n", ptr, (unsigned)new_size);
}
#endif
if (NULL == ptr) {
return szone_malloc(szone, new_size);
} else if (0 == new_size) {
szone_free(szone, ptr);
return szone_malloc(szone, 1);
}
old_size = szone_size(szone, ptr);
if (!old_size) {
szone_error(szone, 1, "pointer being reallocated was not allocated", ptr, NULL);
return NULL;
}
new_good_size = szone_good_size(szone, new_size);
if (new_good_size == old_size) { return ptr;
}
if (new_good_size <= SMALL_THRESHOLD) {
if (old_size <= SMALL_THRESHOLD) {
if (new_good_size <= (old_size >> 1)) {
return tiny_try_shrink_in_place(szone, ptr, old_size, new_good_size);
} else if (new_good_size <= old_size) {
if (szone->debug_flags & MALLOC_DO_SCRIBBLE) {
memset(ptr + new_size, SCRIBBLE_BYTE, old_size - new_size);
}
return ptr;
} else if (tiny_try_realloc_in_place(szone, ptr, old_size, new_good_size)) { if (szone->debug_flags & MALLOC_DO_SCRIBBLE) {
memset(ptr + old_size, SCRIBBLE_BYTE, new_good_size - old_size);
}
return ptr;
}
}
} else if (new_good_size <= szone->large_threshold) {
if (SMALL_THRESHOLD < old_size && old_size <= szone->large_threshold) {
if (new_good_size <= (old_size >> 1)) {
return small_try_shrink_in_place(szone, ptr, old_size, new_good_size);
} else if (new_good_size <= old_size) {
if (szone->debug_flags & MALLOC_DO_SCRIBBLE) {
memset(ptr + new_size, SCRIBBLE_BYTE, old_size - new_size);
}
return ptr;
} else if (small_try_realloc_in_place(szone, ptr, old_size, new_good_size)) {
if (szone->debug_flags & MALLOC_DO_SCRIBBLE) {
memset(ptr + old_size, SCRIBBLE_BYTE, new_good_size - old_size);
}
return ptr;
}
}
} else if (!(szone->debug_flags & MALLOC_PURGEABLE) && (old_size > szone->large_threshold) && (new_good_size > szone->large_threshold)) {
if (new_good_size <= (old_size >> 1)) {
return large_try_shrink_in_place(szone, ptr, old_size, new_good_size);
} else if (new_good_size <= old_size) {
if (szone->debug_flags & MALLOC_DO_SCRIBBLE) {
memset(ptr + new_size, SCRIBBLE_BYTE, old_size - new_size);
}
return ptr;
} else if (large_try_realloc_in_place(szone, ptr, old_size, new_good_size)) {
if (szone->debug_flags & MALLOC_DO_SCRIBBLE) {
memset(ptr + old_size, SCRIBBLE_BYTE, new_good_size - old_size);
}
return ptr;
}
}
if (new_good_size <= (old_size >> 1)) {
} else if (new_good_size <= old_size) {
if (szone->debug_flags & MALLOC_DO_SCRIBBLE) {
memset(ptr + new_size, SCRIBBLE_BYTE, old_size - new_size);
}
return ptr;
}
new_ptr = szone_malloc(szone, new_size);
if (new_ptr == NULL) {
return NULL;
}
valid_size = MIN(old_size, new_size);
if ((valid_size < szone->vm_copy_threshold) ||
vm_copy(mach_task_self(), (vm_address_t)ptr, valid_size, (vm_address_t)new_ptr)) {
memcpy(new_ptr, ptr, valid_size);
}
szone_free(szone, ptr);
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("szone_realloc returned %p for %d\n", new_ptr, (unsigned)new_size);
}
#endif
return new_ptr;
}
void *
szone_memalign(szone_t *szone, size_t alignment, size_t size)
{
if (size == 0) {
size = 1; }
if ((size + alignment) < size) { return NULL;
}
size_t span = size + alignment - 1;
if (alignment <= TINY_QUANTUM) {
return szone_malloc(szone, size);
} else if (span <= SMALL_THRESHOLD) {
return tiny_memalign(szone, alignment, size, span);
} else if (SMALL_THRESHOLD < size && alignment <= SMALL_QUANTUM) {
return szone_malloc(szone, size);
} else if (span <= szone->large_threshold) {
return small_memalign(szone, alignment, size, span);
} else if (szone->large_threshold < size && alignment <= vm_page_quanta_size) {
return szone_malloc(szone, size);
} else {
size_t num_kernel_pages = round_page_quanta(MAX(szone->large_threshold + 1, size)) >> vm_page_quanta_shift;
void *p;
if (num_kernel_pages == 0) {
p = NULL;
} else {
p = large_malloc(szone, num_kernel_pages, MAX(vm_page_quanta_shift, __builtin_ctz((unsigned)alignment)), 0);
}
return p;
}
}
unsigned
szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count)
{
msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
unsigned found = 0;
mag_index_t mag_index = mag_get_thread_index(szone);
magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
if (size > SMALL_THRESHOLD) {
return 0;
}
if (!msize) {
msize = 1;
}
CHECK(szone, __PRETTY_FUNCTION__);
SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
while (found < count) {
void *ptr = tiny_malloc_from_free_list(szone, tiny_mag_ptr, mag_index, msize);
if (!ptr) {
break;
}
*results++ = ptr;
found++;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
return found;
}
void
szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count)
{
unsigned cc = 0;
void *ptr;
region_t tiny_region = NULL;
boolean_t is_free;
msize_t msize;
magazine_t *tiny_mag_ptr = NULL;
mag_index_t mag_index = -1;
if (!count) {
return;
}
CHECK(szone, __PRETTY_FUNCTION__);
while (cc < count) {
ptr = to_be_freed[cc];
if (ptr) {
if (NULL == tiny_region || tiny_region != TINY_REGION_FOR_PTR(ptr)) { if (tiny_mag_ptr) { SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
tiny_mag_ptr = NULL;
}
tiny_region = tiny_region_for_ptr_no_lock(szone, ptr);
if (tiny_region) {
tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
REGION_TRAILER_FOR_TINY_REGION(tiny_region), MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region));
mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region);
}
}
if (tiny_region) {
if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) {
break; }
msize = get_tiny_meta_header(ptr, &is_free);
if (is_free) {
break; }
if (!tiny_free_no_lock(szone, tiny_mag_ptr, mag_index, tiny_region, ptr, msize)) {
tiny_mag_ptr = NULL;
tiny_region = NULL;
}
to_be_freed[cc] = NULL;
} else {
break;
}
}
cc++;
}
if (tiny_mag_ptr) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
tiny_mag_ptr = NULL;
}
CHECK(szone, __PRETTY_FUNCTION__);
while (count--) {
ptr = to_be_freed[count];
if (ptr) {
szone_free(szone, ptr);
}
}
}
static void
szone_destroy(szone_t *szone)
{
size_t index;
large_entry_t *large;
vm_range_t range_to_deallocate;
#if CONFIG_LARGE_CACHE
SZONE_LOCK(szone);
szone->flotsam_enabled = FALSE;
int idx = szone->large_entry_cache_oldest, idx_max = szone->large_entry_cache_newest;
large_entry_t local_entry_cache[LARGE_ENTRY_CACHE_SIZE];
memcpy((void *)local_entry_cache, (void *)szone->large_entry_cache, sizeof(local_entry_cache));
szone->large_entry_cache_oldest = szone->large_entry_cache_newest = 0;
szone->large_entry_cache[0].address = 0x0;
szone->large_entry_cache[0].size = 0;
szone->large_entry_cache_bytes = 0;
szone->large_entry_cache_reserve_bytes = 0;
SZONE_UNLOCK(szone);
while (idx != idx_max) {
deallocate_pages(szone, (void *)local_entry_cache[idx].address, local_entry_cache[idx].size, 0);
if (++idx == LARGE_ENTRY_CACHE_SIZE) {
idx = 0;
}
}
if (0 != local_entry_cache[idx].address && 0 != local_entry_cache[idx].size) {
deallocate_pages(szone, (void *)local_entry_cache[idx].address, local_entry_cache[idx].size, 0);
}
#endif
index = szone->num_large_entries;
while (index--) {
large = szone->large_entries + index;
if (large->address) {
deallocate_pages(szone, (void *)(large->address), large->size, szone->debug_flags);
}
}
large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate);
if (range_to_deallocate.size) {
deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0);
}
for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index) {
if ((HASHRING_OPEN_ENTRY != szone->tiny_region_generation->hashed_regions[index]) &&
(HASHRING_REGION_DEALLOCATED != szone->tiny_region_generation->hashed_regions[index])) {
deallocate_pages(szone, szone->tiny_region_generation->hashed_regions[index], TINY_REGION_SIZE, 0);
}
}
for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index) {
if ((HASHRING_OPEN_ENTRY != szone->small_region_generation->hashed_regions[index]) &&
(HASHRING_REGION_DEALLOCATED != szone->small_region_generation->hashed_regions[index])) {
deallocate_pages(szone, szone->small_region_generation->hashed_regions[index], SMALL_REGION_SIZE, 0);
}
}
if (szone->tiny_region_generation->hashed_regions != szone->initial_tiny_regions) {
size_t size = round_page_quanta(szone->tiny_region_generation->num_regions_allocated * sizeof(region_t));
deallocate_pages(szone, szone->tiny_region_generation->hashed_regions, size, 0);
}
if (szone->small_region_generation->hashed_regions != szone->initial_small_regions) {
size_t size = round_page_quanta(szone->small_region_generation->num_regions_allocated * sizeof(region_t));
deallocate_pages(szone, szone->small_region_generation->hashed_regions, size, 0);
}
deallocate_pages(szone, (void *)&(szone->tiny_magazines[-1]), TINY_MAGAZINE_PAGED_SIZE, MALLOC_ADD_GUARD_PAGES);
deallocate_pages(szone, (void *)&(szone->small_magazines[-1]), SMALL_MAGAZINE_PAGED_SIZE, MALLOC_ADD_GUARD_PAGES);
deallocate_pages(szone, (void *)szone, SZONE_PAGED_SIZE, 0);
}
size_t
szone_good_size(szone_t *szone, size_t size)
{
msize_t msize;
if (size <= SMALL_THRESHOLD) {
msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
if (!msize) {
msize = 1;
}
return TINY_BYTES_FOR_MSIZE(msize);
}
if (size <= szone->large_threshold) {
msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
if (!msize) {
msize = 1;
}
return SMALL_BYTES_FOR_MSIZE(msize);
}
if (size > round_page_quanta(size)) {
return (size_t)(-1LL);
}
#if DEBUG_MALLOC
if (size == 0) {
malloc_printf("szone_good_size() invariant broken %y\n", size);
}
#endif
return round_page_quanta(size);
}
unsigned szone_check_counter = 0;
unsigned szone_check_start = 0;
unsigned szone_check_modulo = 1;
static MALLOC_NOINLINE boolean_t
szone_check_all(szone_t *szone, const char *function)
{
size_t index;
for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index) {
region_t tiny = szone->tiny_region_generation->hashed_regions[index];
if (HASHRING_REGION_DEALLOCATED == tiny) {
continue;
}
if (tiny) {
magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(
szone, szone->tiny_magazines, REGION_TRAILER_FOR_TINY_REGION(tiny), MAGAZINE_INDEX_FOR_TINY_REGION(tiny));
if (!tiny_check_region(szone, tiny)) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
szone->debug_flags &= ~CHECK_REGIONS;
szone_error(szone, 1, "check: tiny region incorrect", NULL,
"*** tiny region %ld incorrect szone_check_all(%s) counter=%d\n", index, function, szone_check_counter);
return 0;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
}
}
for (index = 0; index < NUM_TINY_SLOTS; ++index) {
if (!tiny_free_list_check(szone, (grain_t)index)) {
szone->debug_flags &= ~CHECK_REGIONS;
szone_error(szone, 1, "check: tiny free list incorrect", NULL,
"*** tiny free list incorrect (slot=%ld) szone_check_all(%s) counter=%d\n", index, function,
szone_check_counter);
return 0;
}
}
for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index) {
region_t small = szone->small_region_generation->hashed_regions[index];
if (HASHRING_REGION_DEALLOCATED == small) {
continue;
}
if (small) {
magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(
szone, szone->small_magazines, REGION_TRAILER_FOR_SMALL_REGION(small), MAGAZINE_INDEX_FOR_SMALL_REGION(small));
if (!small_check_region(szone, small)) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
szone->debug_flags &= ~CHECK_REGIONS;
szone_error(szone, 1, "check: small region incorrect", NULL,
"*** small region %ld incorrect szone_check_all(%s) counter=%d\n", index, function, szone_check_counter);
return 0;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
}
}
for (index = 0; index < szone->num_small_slots; ++index) {
if (!small_free_list_check(szone, (grain_t)index)) {
szone->debug_flags &= ~CHECK_REGIONS;
szone_error(szone, 1, "check: small free list incorrect", NULL,
"*** small free list incorrect (slot=%ld) szone_check_all(%s) counter=%d\n", index, function,
szone_check_counter);
return 0;
}
}
return 1;
}
static boolean_t
szone_check(szone_t *szone)
{
if ((++szone_check_counter % 10000) == 0) {
_malloc_printf(ASL_LEVEL_NOTICE, "at szone_check counter=%d\n", szone_check_counter);
}
if (szone_check_counter < szone_check_start) {
return 1;
}
if (szone_check_counter % szone_check_modulo) {
return 1;
}
return szone_check_all(szone, "");
}
static kern_return_t
szone_ptr_in_use_enumerator(task_t task,
void *context,
unsigned type_mask,
vm_address_t zone_address,
memory_reader_t reader,
vm_range_recorder_t recorder)
{
szone_t *szone;
kern_return_t err;
if (!reader) {
reader = _szone_default_reader;
}
err = reader(task, zone_address, sizeof(szone_t), (void **)&szone);
if (err) {
return err;
}
err = tiny_in_use_enumerator(task, context, type_mask, szone, reader, recorder);
if (err) {
return err;
}
err = small_in_use_enumerator(task, context, type_mask, szone, reader, recorder);
if (err) {
return err;
}
err = large_in_use_enumerator(
task, context, type_mask, (vm_address_t)szone->large_entries, szone->num_large_entries, reader, recorder);
return err;
}
void
scalable_zone_info(malloc_zone_t *zone, unsigned *info_to_fill, unsigned count)
{
szone_t *szone = (void *)zone;
unsigned info[13];
size_t s = 0;
unsigned t = 0;
size_t u = 0;
mag_index_t mag_index;
for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_start;
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end;
t += szone->tiny_magazines[mag_index].mag_num_objects;
u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects;
}
info[4] = (unsigned)t;
info[5] = (unsigned)u;
for (t = 0, u = 0, mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
s += szone->small_magazines[mag_index].mag_bytes_free_at_start;
s += szone->small_magazines[mag_index].mag_bytes_free_at_end;
t += szone->small_magazines[mag_index].mag_num_objects;
u += szone->small_magazines[mag_index].mag_num_bytes_in_objects;
}
info[6] = (unsigned)t;
info[7] = (unsigned)u;
info[8] = (unsigned)szone->num_large_objects_in_use;
info[9] = (unsigned)szone->num_bytes_in_large_objects;
info[10] = 0; info[11] = 0;
info[12] = szone->debug_flags;
info[0] = info[4] + info[6] + info[8] + info[10];
info[1] = info[5] + info[7] + info[9] + info[11];
info[3] = (unsigned)(szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE +
(unsigned)(szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE + info[9] + info[11];
info[2] = info[3] - (unsigned)s;
memcpy(info_to_fill, info, sizeof(unsigned) * count);
}
static MALLOC_NOINLINE void
szone_print(szone_t *szone, boolean_t verbose)
{
unsigned info[13];
size_t index;
region_t region;
scalable_zone_info((void *)szone, info, 13);
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
"Scalable zone %p: inUse=%u(%y) touched=%y allocated=%y flags=%d\n", szone, info[0], info[1], info[2], info[3],
info[12]);
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "\ttiny=%u(%y) small=%u(%y) large=%u(%y) huge=%u(%y)\n", info[4],
info[5], info[6], info[7], info[8], info[9], info[10], info[11]);
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%lu tiny regions:\n", szone->num_tiny_regions);
if (szone->num_tiny_regions_dealloc) {
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "[%lu tiny regions have been vm_deallocate'd]\n",
szone->num_tiny_regions_dealloc);
}
for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index) {
region = szone->tiny_region_generation->hashed_regions[index];
if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) {
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(region);
print_tiny_region(verbose, region, (region == szone->tiny_magazines[mag_index].mag_last_region)
? szone->tiny_magazines[mag_index].mag_bytes_free_at_start
: 0,
(region == szone->tiny_magazines[mag_index].mag_last_region)
? szone->tiny_magazines[mag_index].mag_bytes_free_at_end
: 0);
}
}
if (verbose) {
print_tiny_free_list(szone);
}
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%lu small regions:\n", szone->num_small_regions);
if (szone->num_small_regions_dealloc) {
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "[%lu small regions have been vm_deallocate'd]\n",
szone->num_small_regions_dealloc);
}
for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index) {
region = szone->small_region_generation->hashed_regions[index];
if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) {
mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(region);
print_small_region(szone, verbose, region, (region == szone->small_magazines[mag_index].mag_last_region)
? szone->small_magazines[mag_index].mag_bytes_free_at_start
: 0,
(region == szone->small_magazines[mag_index].mag_last_region)
? szone->small_magazines[mag_index].mag_bytes_free_at_end
: 0);
}
}
if (verbose) {
print_small_free_list(szone);
}
}
static void
szone_log(malloc_zone_t *zone, void *log_address)
{
szone_t *szone = (szone_t *)zone;
szone->log_address = log_address;
}
static MALLOC_INLINE void
szone_force_lock_magazine(szone_t *szone, magazine_t *mag)
{
while (1) {
SZONE_MAGAZINE_PTR_LOCK(szone, mag);
if (!mag->alloc_underway) {
return;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, mag);
yield();
}
}
static void
szone_force_lock(szone_t *szone)
{
mag_index_t i;
for (i = 0; i < szone->num_tiny_magazines; ++i) {
szone_force_lock_magazine(szone, &szone->tiny_magazines[i]);
}
szone_force_lock_magazine(szone, &szone->tiny_magazines[DEPOT_MAGAZINE_INDEX]);
for (i = 0; i < szone->num_small_magazines; ++i) {
szone_force_lock_magazine(szone, &szone->small_magazines[i]);
}
szone_force_lock_magazine(szone, &szone->small_magazines[DEPOT_MAGAZINE_INDEX]);
SZONE_LOCK(szone);
}
static void
szone_force_unlock(szone_t *szone)
{
mag_index_t i;
SZONE_UNLOCK(szone);
for (i = -1; i < szone->num_small_magazines; ++i) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->small_magazines[i])));
}
for (i = -1; i < szone->num_tiny_magazines; ++i) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->tiny_magazines[i])));
}
}
static void
szone_reinit_lock(szone_t *szone)
{
mag_index_t i;
SZONE_REINIT_LOCK(szone);
for (i = -1; i < szone->num_small_magazines; ++i) {
SZONE_MAGAZINE_PTR_REINIT_LOCK(szone, (&(szone->small_magazines[i])));
}
for (i = -1; i < szone->num_tiny_magazines; ++i) {
SZONE_MAGAZINE_PTR_REINIT_LOCK(szone, (&(szone->tiny_magazines[i])));
}
}
static boolean_t
szone_locked(szone_t *szone)
{
mag_index_t i;
int tookLock;
tookLock = SZONE_TRY_LOCK(szone);
if (tookLock == 0) {
return 1;
}
SZONE_UNLOCK(szone);
for (i = -1; i < szone->num_small_magazines; ++i) {
tookLock = SZONE_MAGAZINE_PTR_TRY_LOCK(szone, (&(szone->small_magazines[i])));
if (tookLock == 0) {
return 1;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->small_magazines[i])));
}
for (i = -1; i < szone->num_tiny_magazines; ++i) {
tookLock = SZONE_MAGAZINE_PTR_TRY_LOCK(szone, (&(szone->tiny_magazines[i])));
if (tookLock == 0) {
return 1;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->tiny_magazines[i])));
}
return 0;
}
size_t
szone_pressure_relief(szone_t *szone, size_t goal)
{
size_t total = 0;
#if CONFIG_MADVISE_PRESSURE_RELIEF
mag_index_t mag_index;
magazine_t *tiny_depot_ptr = (&szone->tiny_magazines[DEPOT_MAGAZINE_INDEX]);
magazine_t *small_depot_ptr = (&szone->small_magazines[DEPOT_MAGAZINE_INDEX]);
for (mag_index = 0; mag_index < szone->num_tiny_magazines; mag_index++) {
size_t index;
for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index) {
SZONE_LOCK(szone);
region_t tiny = szone->tiny_region_generation->hashed_regions[index];
if (!tiny || tiny == HASHRING_REGION_DEALLOCATED) {
SZONE_UNLOCK(szone);
continue;
}
magazine_t *mag_ptr = mag_lock_zine_for_region_trailer(
szone, szone->tiny_magazines, REGION_TRAILER_FOR_TINY_REGION(tiny), MAGAZINE_INDEX_FOR_TINY_REGION(tiny));
SZONE_UNLOCK(szone);
mag_index_t src_mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny);
if (src_mag_index == DEPOT_MAGAZINE_INDEX) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, mag_ptr);
continue;
}
if (tiny == mag_ptr->mag_last_region && (mag_ptr->mag_bytes_free_at_end || mag_ptr->mag_bytes_free_at_start)) {
tiny_finalize_region(szone, mag_ptr);
}
recirc_list_extract(szone, mag_ptr, REGION_TRAILER_FOR_TINY_REGION(tiny));
int objects_in_use = tiny_free_detach_region(szone, mag_ptr, tiny);
SZONE_MAGAZINE_PTR_LOCK(szone, tiny_depot_ptr);
MAGAZINE_INDEX_FOR_TINY_REGION(tiny) = DEPOT_MAGAZINE_INDEX;
REGION_TRAILER_FOR_TINY_REGION(tiny)->pinned_to_depot = 0;
size_t bytes_inplay = tiny_free_reattach_region(szone, tiny_depot_ptr, tiny);
mag_ptr->mag_num_bytes_in_objects -= bytes_inplay;
mag_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES;
mag_ptr->mag_num_objects -= objects_in_use;
SZONE_MAGAZINE_PTR_UNLOCK(szone, mag_ptr);
tiny_depot_ptr->mag_num_bytes_in_objects += bytes_inplay;
tiny_depot_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES;
tiny_depot_ptr->mag_num_objects -= objects_in_use;
recirc_list_splice_last(szone, tiny_depot_ptr, REGION_TRAILER_FOR_TINY_REGION(tiny));
tiny_free_scan_madvise_free(szone, tiny_depot_ptr, tiny);
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_depot_ptr);
}
}
for (mag_index = 0; mag_index < szone->num_small_magazines; mag_index++) {
size_t index;
for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index) {
SZONE_LOCK(szone);
region_t small = szone->small_region_generation->hashed_regions[index];
if (!small || small == HASHRING_REGION_DEALLOCATED) {
SZONE_UNLOCK(szone);
continue;
}
magazine_t *mag_ptr = mag_lock_zine_for_region_trailer(
szone, szone->small_magazines, REGION_TRAILER_FOR_SMALL_REGION(small), MAGAZINE_INDEX_FOR_SMALL_REGION(small));
SZONE_UNLOCK(szone);
mag_index_t src_mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(small);
if (src_mag_index == DEPOT_MAGAZINE_INDEX) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, mag_ptr);
continue;
}
if (small == mag_ptr->mag_last_region && (mag_ptr->mag_bytes_free_at_end || mag_ptr->mag_bytes_free_at_start)) {
small_finalize_region(szone, mag_ptr);
}
recirc_list_extract(szone, mag_ptr, REGION_TRAILER_FOR_SMALL_REGION(small));
int objects_in_use = small_free_detach_region(szone, mag_ptr, small);
SZONE_MAGAZINE_PTR_LOCK(szone, small_depot_ptr);
MAGAZINE_INDEX_FOR_SMALL_REGION(small) = DEPOT_MAGAZINE_INDEX;
REGION_TRAILER_FOR_SMALL_REGION(small)->pinned_to_depot = 0;
size_t bytes_inplay = small_free_reattach_region(szone, small_depot_ptr, small);
mag_ptr->mag_num_bytes_in_objects -= bytes_inplay;
mag_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES;
mag_ptr->mag_num_objects -= objects_in_use;
SZONE_MAGAZINE_PTR_UNLOCK(szone, mag_ptr);
small_depot_ptr->mag_num_bytes_in_objects += bytes_inplay;
small_depot_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES;
small_depot_ptr->mag_num_objects -= objects_in_use;
recirc_list_splice_last(szone, small_depot_ptr, REGION_TRAILER_FOR_SMALL_REGION(small));
small_free_scan_madvise_free(szone, small_depot_ptr, small);
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_depot_ptr);
}
}
#endif
#if CONFIG_LARGE_CACHE
if (szone->flotsam_enabled) {
SZONE_LOCK(szone);
int idx = szone->large_entry_cache_oldest, idx_max = szone->large_entry_cache_newest;
large_entry_t local_entry_cache[LARGE_ENTRY_CACHE_SIZE];
memcpy((void *)local_entry_cache, (void *)szone->large_entry_cache, sizeof(local_entry_cache));
szone->large_entry_cache_oldest = szone->large_entry_cache_newest = 0;
szone->large_entry_cache[0].address = 0x0;
szone->large_entry_cache[0].size = 0;
szone->large_entry_cache_bytes = 0;
szone->large_entry_cache_reserve_bytes = 0;
szone->flotsam_enabled = FALSE;
SZONE_UNLOCK(szone);
size_t total = 0;
while (idx != idx_max) {
deallocate_pages(szone, (void *)local_entry_cache[idx].address, local_entry_cache[idx].size, 0);
total += local_entry_cache[idx].size;
if (++idx == LARGE_ENTRY_CACHE_SIZE) {
idx = 0;
}
}
if (0 != local_entry_cache[idx].address && 0 != local_entry_cache[idx].size) {
deallocate_pages(szone, (void *)local_entry_cache[idx].address, local_entry_cache[idx].size, 0);
total += local_entry_cache[idx].size;
}
}
#endif
MAGMALLOC_PRESSURERELIEF((void *)szone, (int)goal, (int)total); return total;
}
boolean_t
scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone)
{
szone_t *szone = (szone_t *)zone;
switch (subzone) {
case 0: {
size_t s = 0;
unsigned t = 0;
size_t u = 0;
mag_index_t mag_index;
for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_start;
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end;
t += szone->tiny_magazines[mag_index].mag_num_objects;
u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects;
}
stats->blocks_in_use = t;
stats->size_in_use = u;
stats->size_allocated = (szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE;
stats->max_size_in_use = stats->size_allocated - s;
return 1;
}
case 1: {
size_t s = 0;
unsigned t = 0;
size_t u = 0;
mag_index_t mag_index;
for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
s += szone->small_magazines[mag_index].mag_bytes_free_at_start;
s += szone->small_magazines[mag_index].mag_bytes_free_at_end;
t += szone->small_magazines[mag_index].mag_num_objects;
u += szone->small_magazines[mag_index].mag_num_bytes_in_objects;
}
stats->blocks_in_use = t;
stats->size_in_use = u;
stats->size_allocated = (szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE;
stats->max_size_in_use = stats->size_allocated - s;
return 1;
}
case 2:
stats->blocks_in_use = szone->num_large_objects_in_use;
stats->size_in_use = szone->num_bytes_in_large_objects;
stats->max_size_in_use = stats->size_allocated = stats->size_in_use;
return 1;
case 3:
stats->blocks_in_use = 0; stats->size_in_use = 0; stats->max_size_in_use = stats->size_allocated = 0;
return 1;
}
return 0;
}
static void
szone_statistics(szone_t *szone, malloc_statistics_t *stats)
{
size_t large;
size_t s = 0;
unsigned t = 0;
size_t u = 0;
mag_index_t mag_index;
for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_start;
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end;
t += szone->tiny_magazines[mag_index].mag_num_objects;
u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects;
}
for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
s += szone->small_magazines[mag_index].mag_bytes_free_at_start;
s += szone->small_magazines[mag_index].mag_bytes_free_at_end;
t += szone->small_magazines[mag_index].mag_num_objects;
u += szone->small_magazines[mag_index].mag_num_bytes_in_objects;
}
large = szone->num_bytes_in_large_objects + 0;
stats->blocks_in_use = t + szone->num_large_objects_in_use + 0; stats->size_in_use = u + large;
stats->max_size_in_use = stats->size_allocated =
(szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE +
(szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE + large;
stats->max_size_in_use -= s;
}
const struct malloc_introspection_t szone_introspect = {
(void *)szone_ptr_in_use_enumerator, (void *)szone_good_size, (void *)szone_check, (void *)szone_print, szone_log,
(void *)szone_force_lock, (void *)szone_force_unlock, (void *)szone_statistics, (void *)szone_locked, NULL, NULL, NULL,
NULL,
(void *)szone_reinit_lock, };
szone_t *
create_scalable_szone(size_t initial_size, unsigned debug_flags)
{
szone_t *szone;
#if defined(__i386__) || defined(__x86_64__)
if (_COMM_PAGE_VERSION_REQD > (*((uint16_t *)_COMM_PAGE_VERSION))) {
MALLOC_PRINTF_FATAL_ERROR((*((uint16_t *)_COMM_PAGE_VERSION)), "comm page version mismatch");
}
#endif
szone = allocate_pages(NULL, SZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC);
if (!szone) {
return NULL;
}
#if 0
#warning CHECK_REGIONS enabled
debug_flags |= CHECK_REGIONS;
#endif
#if 0
#warning LOG enabled
szone->log_address = ~0;
#endif
szone->trg[0].nextgen = &(szone->trg[1]);
szone->trg[1].nextgen = &(szone->trg[0]);
szone->tiny_region_generation = &(szone->trg[0]);
szone->tiny_region_generation->hashed_regions = szone->initial_tiny_regions;
szone->tiny_region_generation->num_regions_allocated = INITIAL_NUM_REGIONS;
szone->tiny_region_generation->num_regions_allocated_shift = INITIAL_NUM_REGIONS_SHIFT;
szone->srg[0].nextgen = &(szone->srg[1]);
szone->srg[1].nextgen = &(szone->srg[0]);
szone->small_region_generation = &(szone->srg[0]);
szone->small_region_generation->hashed_regions = szone->initial_small_regions;
szone->small_region_generation->num_regions_allocated = INITIAL_NUM_REGIONS;
szone->small_region_generation->num_regions_allocated_shift = INITIAL_NUM_REGIONS_SHIFT;
#if CONFIG_SMALL_CUTTOFF_127KB
szone->is_largemem = 1;
szone->num_small_slots = NUM_SMALL_SLOTS_LARGEMEM;
szone->large_threshold = LARGE_THRESHOLD_LARGEMEM;
szone->vm_copy_threshold = VM_COPY_THRESHOLD_LARGEMEM;
#else // CONFIG_SMALL_CUTTOFF_127KB
szone->is_largemem = 0;
szone->num_small_slots = NUM_SMALL_SLOTS;
szone->large_threshold = LARGE_THRESHOLD;
szone->vm_copy_threshold = VM_COPY_THRESHOLD;
#endif // CONFIG_SMALL_CUTTOFF_127KB
#if CONFIG_LARGE_CACHE
uint64_t hw_memsize = 0;
#if CONFIG_HAS_COMMPAGE_MEMSIZE
hw_memsize = *(uint64_t *)(uintptr_t)_COMM_PAGE_MEMORY_SIZE;
#else
size_t uint64_t_size = sizeof(hw_memsize);
(void)sysctlbyname("hw.memsize", &hw_memsize, &uint64_t_size, 0, 0);
#endif
szone->large_entry_cache_reserve_limit = hw_memsize >> 10;
int32_t libSystemVersion = NSVersionOfLinkTimeLibrary("System");
if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 112) ) {
szone->large_legacy_reset_mprotect = TRUE;
} else {
szone->large_legacy_reset_mprotect = FALSE;
}
#endif
#if __i386__ || __x86_64__ || __arm64__ || TARGET_OS_EMBEDDED
#if __i386__
uintptr_t stackbase = 0x8fe00000;
int entropic_bits = 3;
#elif __x86_64__
uintptr_t stackbase = USRSTACK64;
int entropic_bits = 16;
#elif __arm64__
uintptr_t stackbase = USRSTACK64;
int entropic_bits = 7;
#else
uintptr_t stackbase = USRSTACK;
int entropic_bits = 3;
#endif
if (0 != _dyld_get_image_slide((const struct mach_header *)_NSGetMachExecuteHeader())) {
if (0 == entropic_address) {
uintptr_t t = stackbase - MAXSSIZ - ((uintptr_t)(malloc_entropy[1] & ((1 << entropic_bits) - 1)) << ENTROPIC_SHIFT);
(void)__sync_bool_compare_and_swap(&entropic_limit, 0, t); (void)__sync_bool_compare_and_swap(&entropic_address, 0, t - ENTROPIC_KABILLION); }
debug_flags &= ~DISABLE_ASLR;
} else {
malloc_entropy[0] = 0;
malloc_entropy[1] = 0;
debug_flags |= DISABLE_ASLR;
}
#else
malloc_entropy[0] = 0;
malloc_entropy[1] = 0;
debug_flags |= DISABLE_ASLR;
#endif
szone->cookie = (uintptr_t)malloc_entropy[0];
szone->basic_zone.version = 9;
szone->basic_zone.size = (void *)szone_size;
szone->basic_zone.malloc = (void *)szone_malloc;
szone->basic_zone.calloc = (void *)szone_calloc;
szone->basic_zone.valloc = (void *)szone_valloc;
szone->basic_zone.free = (void *)szone_free;
szone->basic_zone.realloc = (void *)szone_realloc;
szone->basic_zone.destroy = (void *)szone_destroy;
szone->basic_zone.batch_malloc = (void *)szone_batch_malloc;
szone->basic_zone.batch_free = (void *)szone_batch_free;
szone->basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect;
szone->basic_zone.memalign = (void *)szone_memalign;
szone->basic_zone.free_definite_size = (void *)szone_free_definite_size;
szone->basic_zone.pressure_relief = (void *)szone_pressure_relief;
szone->basic_zone.reserved1 = 0;
szone->basic_zone.reserved2 = 0;
mprotect(szone, sizeof(szone->basic_zone), PROT_READ);
szone->debug_flags = debug_flags;
_malloc_lock_init(&szone->large_szone_lock);
szone->cpu_id_key = -1UL;
#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__)
int nproc = *(uint8_t *)(uintptr_t)_COMM_PAGE_NCPUS;
#else
int nproc = sysconf(_SC_NPROCESSORS_CONF);
#endif
szone->num_tiny_magazines = (nproc > 1) ? MIN(nproc, TINY_MAX_MAGAZINES) : 1;
magazine_t *tiny_magazines =
allocate_pages(NULL, TINY_MAGAZINE_PAGED_SIZE, 0, MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC);
if (NULL == tiny_magazines) {
return NULL;
}
szone->tiny_magazines = &(tiny_magazines[1]);
szone->num_tiny_magazines_mask_shift = 0;
int i = 1;
while (i <= (szone->num_tiny_magazines - 1)) {
szone->num_tiny_magazines_mask_shift++;
i <<= 1;
}
if (i > TINY_MAX_MAGAZINES) {
MALLOC_PRINTF_FATAL_ERROR(i, "magazine mask exceeds allocated magazines");
}
szone->num_tiny_magazines_mask = i - 1; szone->last_tiny_advise = 0;
_malloc_lock_init(&szone->tiny_regions_lock);
_malloc_lock_init(&szone->tiny_magazines[DEPOT_MAGAZINE_INDEX].magazine_lock);
for (i = 0; i < szone->num_tiny_magazines; ++i) {
_malloc_lock_init(&szone->tiny_magazines[i].magazine_lock);
}
szone->num_small_magazines = (nproc > 1) ? MIN(nproc, SMALL_MAX_MAGAZINES) : 1;
magazine_t *small_magazines =
allocate_pages(NULL, SMALL_MAGAZINE_PAGED_SIZE, 0, MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC);
if (NULL == small_magazines) {
return NULL;
}
szone->small_magazines = &(small_magazines[1]);
szone->num_small_magazines_mask_shift = 0;
while (i <= (szone->num_small_magazines - 1)) {
szone->num_small_magazines_mask_shift++;
i <<= 1;
}
if (i > SMALL_MAX_MAGAZINES) {
MALLOC_PRINTF_FATAL_ERROR(i, "magazine mask exceeds allocated magazines");
}
szone->num_small_magazines_mask = i - 1; szone->last_small_advise = 0;
_malloc_lock_init(&szone->small_regions_lock);
_malloc_lock_init(&szone->small_magazines[DEPOT_MAGAZINE_INDEX].magazine_lock);
for (i = 0; i < szone->num_small_magazines; ++i) {
_malloc_lock_init(&szone->small_magazines[i].magazine_lock);
}
CHECK(szone, __PRETTY_FUNCTION__);
return szone;
}
malloc_zone_t *
create_scalable_zone(size_t initial_size, unsigned debug_flags) {
return (malloc_zone_t *) create_scalable_szone(initial_size, debug_flags);
}