#include "internal.h"
#if TARGET_OS_IPHONE
#undef ASL_LEVEL_INFO
#define ASL_LEVEL_INFO ASL_LEVEL_NOTICE
#endif // TARGET_OS_IPHONE
#define USE_SLEEP_RATHER_THAN_ABORT 0
static _malloc_lock_s _malloc_lock = _MALLOC_LOCK_INIT;
#define MALLOC_LOCK() _malloc_lock_lock(&_malloc_lock)
#define MALLOC_TRY_LOCK() _malloc_lock_trylock(&_malloc_lock)
#define MALLOC_UNLOCK() _malloc_lock_unlock(&_malloc_lock)
#define MALLOC_REINIT_LOCK() _malloc_lock_init(&_malloc_lock)
int32_t malloc_num_zones = 0;
int32_t malloc_num_zones_allocated = 0;
malloc_zone_t **malloc_zones = (malloc_zone_t **)0xdeaddeaddeaddead;
malloc_logger_t *malloc_logger = NULL;
static malloc_zone_t *initial_scalable_zone;
static malloc_zone_t *initial_nano_zone;
static malloc_zone_t *initial_default_zone = NULL;
unsigned malloc_debug_flags = 0;
bool malloc_tracing_enabled = false;
bool malloc_space_efficient_enabled = false;
bool malloc_medium_space_efficient_enabled = false;
unsigned malloc_check_start = 0; unsigned malloc_check_counter = 0;
unsigned malloc_check_each = 1000;
static int malloc_check_sleep = 100; static int malloc_check_abort = 0;
static
struct msl {
void *dylib;
void (*handle_memory_event) (unsigned long event);
boolean_t (*stack_logging_locked) (void);
void (*fork_prepare) (void);
void (*fork_parent) (void);
void (*fork_child) (void);
kern_return_t (*get_frames_for_address)(task_t task,
mach_vm_address_t address,
mach_vm_address_t *stack_frames_buffer,
uint32_t max_stack_frames,
uint32_t *count);
uint64_t (*stackid_for_vm_region) (task_t task, mach_vm_address_t address);
kern_return_t (*get_frames_for_stackid) (task_t task,
uint64_t stack_identifier,
mach_vm_address_t *stack_frames_buffer,
uint32_t max_stack_frames,
uint32_t *count,
bool *last_frame_is_threadid);
kern_return_t (*uniquing_table_read_stack) (struct backtrace_uniquing_table *uniquing_table,
uint64_t stackid,
mach_vm_address_t *out_frames_buffer,
uint32_t *out_frames_count,
uint32_t max_frames);
} msl = {};
static int32_t volatile counterAlice = 0, counterBob = 0;
static int32_t volatile * volatile pFRZCounterLive = &counterAlice;
static int32_t volatile * volatile pFRZCounterDrain = &counterBob;
unsigned int _os_cpu_number_override = -1;
static inline malloc_zone_t *inline_malloc_default_zone(void) __attribute__((always_inline));
#define MALLOC_LOG_TYPE_ALLOCATE stack_logging_type_alloc
#define MALLOC_LOG_TYPE_DEALLOCATE stack_logging_type_dealloc
#define MALLOC_LOG_TYPE_HAS_ZONE stack_logging_flag_zone
#define MALLOC_LOG_TYPE_CLEARED stack_logging_flag_cleared
#define DEFAULT_MALLOC_ZONE_STRING "DefaultMallocZone"
#define DEFAULT_PUREGEABLE_ZONE_STRING "DefaultPurgeableMallocZone"
#define MALLOC_HELPER_ZONE_STRING "MallocHelperZone"
#define MALLOC_PGUARD_ZONE_STRING "PGuardMallocZone"
MALLOC_NOEXPORT
unsigned int phys_ncpus;
MALLOC_NOEXPORT
unsigned int logical_ncpus;
MALLOC_NOEXPORT
unsigned int hyper_shift;
static const char max_magazines_boot_arg[] = "malloc_max_magazines";
static const char large_expanded_cache_threshold_boot_arg[] = "malloc_large_expanded_cache_threshold";
#if CONFIG_MEDIUM_ALLOCATOR
static const char medium_enabled_boot_arg[] = "malloc_medium_zone";
static const char max_medium_magazines_boot_arg[] = "malloc_max_medium_magazines";
static const char medium_activation_threshold_boot_arg[] = "malloc_medium_activation_threshold";
static const char medium_space_efficient_boot_arg[] = "malloc_medium_space_efficient";
#endif // CONFIG_MEDIUM_ALLOCATOR
static bool _malloc_entropy_initialized;
#if !TARGET_OS_DRIVERKIT
#include <dlfcn.h>
typedef void * (*dlopen_t) (const char * __path, int __mode);
typedef void * (*dlsym_t) (void * __handle, const char * __symbol);
static dlopen_t _dlopen = NULL;
static dlsym_t _dlsym = NULL;
#else
#define _dlopen(...) NULL
#define _dlsym(...) NULL
#endif // TARGET_OS_DRIVERKIT
void __malloc_init(const char *apple[]);
static void _malloc_initialize(const char *apple[], const char *bootargs);
static int
__entropy_from_kernel(const char *str)
{
unsigned long long val;
char tmp[20], *p;
int idx = 0;
str = strchr(str, '=');
if (str == NULL) {
return 0;
}
str++;
while (str && idx < sizeof(malloc_entropy) / sizeof(malloc_entropy[0])) {
strlcpy(tmp, str, 20);
p = strchr(tmp, ',');
if (p) {
*p = '\0';
}
val = strtoull_l(tmp, NULL, 0, NULL);
malloc_entropy[idx] = (uint64_t)val;
idx++;
if ((str = strchr(str, ',')) != NULL) {
str++;
}
}
return idx;
}
#if TARGET_OS_OSX && defined(__x86_64__)
static uint64_t
__is_translated(void)
{
return (*(uint64_t*)_COMM_PAGE_CPU_CAPABILITIES64) & kIsTranslated;
}
#endif
static void
__malloc_init_from_bootargs(const char *bootargs)
{
char value_buf[256];
const char *flag = malloc_common_value_for_key_copy(bootargs,
max_magazines_boot_arg, value_buf, sizeof(value_buf));
if (flag) {
const char *endp;
long value = malloc_common_convert_to_long(flag, &endp);
if (!*endp && value >= 0) {
max_magazines = (unsigned int)value;
} else {
malloc_report(ASL_LEVEL_ERR,
"malloc_max_magazines must be positive - ignored.\n");
}
}
flag = malloc_common_value_for_key_copy(bootargs,
large_expanded_cache_threshold_boot_arg, value_buf, sizeof(value_buf));
if (flag) {
const char *endp;
long value = malloc_common_convert_to_long(flag, &endp);
if (!*endp && value >= 0) {
magazine_large_expanded_cache_threshold = (unsigned int)value;
} else {
malloc_report(ASL_LEVEL_ERR,
"malloc_large_expanded_cache_threshold must be positive - ignored.\n");
}
}
#if CONFIG_MEDIUM_ALLOCATOR
#if TARGET_OS_OSX
#if defined(__x86_64__)
if (__is_translated()) {
magazine_medium_active_threshold = 0;
}
#elif defined(__arm64__)
magazine_medium_active_threshold = 0;
#endif
#endif
flag = malloc_common_value_for_key_copy(bootargs, medium_enabled_boot_arg,
value_buf, sizeof(value_buf));
if (flag) {
const char *endp;
long value = malloc_common_convert_to_long(flag, &endp);
if (!*endp) {
magazine_medium_enabled = (value != 0);
}
}
flag = malloc_common_value_for_key_copy(bootargs,
medium_activation_threshold_boot_arg, value_buf, sizeof(value_buf));
if (flag) {
const char *endp;
long value = malloc_common_convert_to_long(flag, &endp);
if (!*endp && value >= 0) {
magazine_medium_active_threshold = (uint64_t)value;
} else {
malloc_report(ASL_LEVEL_ERR,
"malloc_medium_activation_threshold must be positive - ignored.\n");
}
}
flag = malloc_common_value_for_key_copy(bootargs,
max_medium_magazines_boot_arg, value_buf, sizeof(value_buf));
if (flag) {
const char *endp;
long value = malloc_common_convert_to_long(flag, &endp);
if (!*endp && value >= 0) {
max_medium_magazines = (int)value;
} else {
malloc_report(ASL_LEVEL_ERR,
"malloc_max_medium_magazines must be positive - ignored.\n");
}
}
flag = malloc_common_value_for_key_copy(bootargs,
medium_space_efficient_boot_arg, value_buf, sizeof(value_buf));
if (flag) {
const char *endp;
long value = malloc_common_convert_to_long(flag, &endp);
if (!*endp) {
malloc_medium_space_efficient_enabled = (value != 0);
}
}
#endif // CONFIG_MEDIUM_ALLOCATOR
}
extern malloc_zone_t *force_asan_init_if_present(void)
asm("_malloc_default_zone");
void
__malloc_init(const char *apple[])
{
char bootargs[1024] = { '\0' };
bool allow_bootargs = true;
#if CONFIG_FEATUREFLAGS_SIMPLE
allow_bootargs &= os_feature_enabled_simple(libmalloc, EnableBootArgs, false);
#endif
#if defined(_COMM_PAGE_DEV_FIRM)
allow_bootargs &= !!*((uint32_t *)_COMM_PAGE_DEV_FIRM);
#endif // _COMM_PAGE_DEV_FIRM
size_t len = sizeof(bootargs) - 1;
if (allow_bootargs &&
!sysctlbyname("kern.bootargs", bootargs, &len, NULL, 0) &&
len > 0) {
bootargs[len + 1] = '\0';
}
const char **p;
for (p = apple; p && *p; p++) {
if (strstr(*p, "malloc_entropy") == *p) {
int count = __entropy_from_kernel(*p);
bzero((void *)*p, strlen(*p));
if (sizeof(malloc_entropy) / sizeof(malloc_entropy[0]) == count) {
_malloc_entropy_initialized = true;
}
break;
}
}
if (!_malloc_entropy_initialized) {
getentropy((void*)malloc_entropy, sizeof(malloc_entropy));
_malloc_entropy_initialized = true;
}
__malloc_init_from_bootargs(bootargs);
mvm_aslr_init();
force_asan_init_if_present();
_malloc_initialize(apple, bootargs);
}
MALLOC_NOEXPORT malloc_zone_t* lite_zone = NULL;
MALLOC_ALWAYS_INLINE
static inline malloc_zone_t *
runtime_default_zone() {
return (lite_zone) ? lite_zone : inline_malloc_default_zone();
}
static size_t
default_zone_size(malloc_zone_t *zone, const void *ptr)
{
zone = runtime_default_zone();
return zone->size(zone, ptr);
}
static void *
default_zone_malloc(malloc_zone_t *zone, size_t size)
{
zone = runtime_default_zone();
return zone->malloc(zone, size);
}
static void *
default_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size)
{
zone = runtime_default_zone();
return zone->calloc(zone, num_items, size);
}
static void *
default_zone_valloc(malloc_zone_t *zone, size_t size)
{
zone = runtime_default_zone();
return zone->valloc(zone, size);
}
static void
default_zone_free(malloc_zone_t *zone, void *ptr)
{
zone = runtime_default_zone();
return zone->free(zone, ptr);
}
static void *
default_zone_realloc(malloc_zone_t *zone, void *ptr, size_t new_size)
{
zone = runtime_default_zone();
return zone->realloc(zone, ptr, new_size);
}
static void
default_zone_destroy(malloc_zone_t *zone)
{
zone = runtime_default_zone();
return zone->destroy(zone);
}
static unsigned
default_zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, unsigned count)
{
zone = runtime_default_zone();
return zone->batch_malloc(zone, size, results, count);
}
static void
default_zone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned count)
{
zone = runtime_default_zone();
return zone->batch_free(zone, to_be_freed, count);
}
static void *
default_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
{
zone = runtime_default_zone();
return zone->memalign(zone, alignment, size);
}
static void
default_zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
zone = runtime_default_zone();
return zone->free_definite_size(zone, ptr, size);
}
static size_t
default_zone_pressure_relief(malloc_zone_t *zone, size_t goal)
{
zone = runtime_default_zone();
return zone->pressure_relief(zone, goal);
}
static boolean_t
default_zone_malloc_claimed_address(malloc_zone_t *zone, void *ptr)
{
zone = runtime_default_zone();
return malloc_zone_claimed_address(zone, ptr);
}
static kern_return_t
default_zone_ptr_in_use_enumerator(task_t task,
void *context,
unsigned type_mask,
vm_address_t zone_address,
memory_reader_t reader,
vm_range_recorder_t recorder)
{
malloc_zone_t *zone = runtime_default_zone();
return zone->introspect->enumerator(task, context, type_mask, (vm_address_t) zone, reader, recorder);
}
static size_t
default_zone_good_size(malloc_zone_t *zone, size_t size)
{
zone = runtime_default_zone();
return zone->introspect->good_size(zone, size);
}
static boolean_t
default_zone_check(malloc_zone_t *zone)
{
zone = runtime_default_zone();
return zone->introspect->check(zone);
}
static void
default_zone_print(malloc_zone_t *zone, boolean_t verbose)
{
zone = runtime_default_zone();
return (void)zone->introspect->print(zone, verbose);
}
static void
default_zone_log(malloc_zone_t *zone, void *log_address)
{
zone = runtime_default_zone();
return zone->introspect->log(zone, log_address);
}
static void
default_zone_force_lock(malloc_zone_t *zone)
{
zone = runtime_default_zone();
return zone->introspect->force_lock(zone);
}
static void
default_zone_force_unlock(malloc_zone_t *zone)
{
zone = runtime_default_zone();
return zone->introspect->force_unlock(zone);
}
static void
default_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats)
{
zone = runtime_default_zone();
return zone->introspect->statistics(zone, stats);
}
static boolean_t
default_zone_locked(malloc_zone_t *zone)
{
zone = runtime_default_zone();
return zone->introspect->zone_locked(zone);
}
static void
default_zone_reinit_lock(malloc_zone_t *zone)
{
zone = runtime_default_zone();
return zone->introspect->reinit_lock(zone);
}
static struct malloc_introspection_t default_zone_introspect = {
default_zone_ptr_in_use_enumerator,
default_zone_good_size,
default_zone_check,
default_zone_print,
default_zone_log,
default_zone_force_lock,
default_zone_force_unlock,
default_zone_statistics,
default_zone_locked,
NULL,
NULL,
NULL,
NULL,
default_zone_reinit_lock
};
typedef struct {
malloc_zone_t malloc_zone;
uint8_t pad[PAGE_MAX_SIZE - sizeof(malloc_zone_t)];
} virtual_default_zone_t;
static virtual_default_zone_t virtual_default_zone
__attribute__((section("__DATA,__v_zone")))
__attribute__((aligned(PAGE_MAX_SIZE))) = {
NULL,
NULL,
default_zone_size,
default_zone_malloc,
default_zone_calloc,
default_zone_valloc,
default_zone_free,
default_zone_realloc,
default_zone_destroy,
DEFAULT_MALLOC_ZONE_STRING,
default_zone_batch_malloc,
default_zone_batch_free,
&default_zone_introspect,
10,
default_zone_memalign,
default_zone_free_definite_size,
default_zone_pressure_relief,
default_zone_malloc_claimed_address,
};
static malloc_zone_t *default_zone = &virtual_default_zone.malloc_zone;
MALLOC_NOEXPORT
boolean_t
has_default_zone0(void)
{
if (!malloc_zones) {
return false;
}
return initial_default_zone == malloc_zones[0];
}
static inline malloc_zone_t *find_registered_zone(const void *, size_t *) __attribute__((always_inline));
static inline malloc_zone_t *
find_registered_zone(const void *ptr, size_t *returned_size)
{
if (0 == malloc_num_zones) {
if (returned_size) {
*returned_size = 0;
}
return NULL;
}
if (lite_zone) {
malloc_zone_t *zone = lite_zone;
size_t size = zone->size(zone, ptr);
if (size) { if (returned_size) {
*returned_size = size;
}
return default_zone;
}
}
malloc_zone_t *zone = malloc_zones[0];
size_t size = zone->size(zone, ptr);
if (size) { if (returned_size) {
*returned_size = size;
}
if (!has_default_zone0()) {
return zone;
} else {
return default_zone;
}
}
int32_t volatile *pFRZCounter = pFRZCounterLive; OSAtomicIncrement32Barrier(pFRZCounter);
unsigned index;
int32_t limit = *(int32_t volatile *)&malloc_num_zones;
malloc_zone_t **zones = &malloc_zones[1];
for (index = 1; index < limit; ++index, ++zones) {
zone = *zones;
size = zone->size(zone, ptr);
if (size) { goto out;
}
}
zone = NULL;
size = 0;
out:
if (returned_size) {
*returned_size = size;
}
OSAtomicDecrement32Barrier(pFRZCounter); return zone;
}
void
malloc_error_break(void)
{
MAGMALLOC_MALLOCERRORBREAK(); }
int
malloc_gdb_po_unsafe(void)
{
if (msl.stack_logging_locked && msl.stack_logging_locked()) {
return 1;
}
malloc_zone_t **zones = malloc_zones;
unsigned i, e = malloc_num_zones;
for (i = 0; i != e; ++i) {
malloc_zone_t *zone = zones[i];
if (zone->version < 5) {
continue;
}
if (zone->introspect->zone_locked && zone->introspect->zone_locked(zone)) {
return 1;
}
}
return 0;
}
static void set_flags_from_environment(void);
MALLOC_NOEXPORT void
malloc_zone_register_while_locked(malloc_zone_t *zone)
{
size_t protect_size;
unsigned i;
for (i = 0; i != malloc_num_zones; ++i) {
if (zone == malloc_zones[i]) {
malloc_report(ASL_LEVEL_ERR, "Attempted to register zone more than once: %p\n", zone);
return;
}
}
if (malloc_num_zones == malloc_num_zones_allocated) {
size_t malloc_zones_size = malloc_num_zones * sizeof(malloc_zone_t *);
mach_vm_size_t alloc_size = round_page(malloc_zones_size + vm_page_size);
mach_vm_address_t vm_addr;
int alloc_flags = VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_MALLOC);
vm_addr = vm_page_size;
kern_return_t kr = mach_vm_allocate(mach_task_self(), &vm_addr, alloc_size, alloc_flags);
if (kr) {
malloc_report(ASL_LEVEL_ERR, "malloc_zone_register allocation failed: %d\n", kr);
return;
}
malloc_zone_t **new_zones = (malloc_zone_t **)vm_addr;
if (malloc_zones) {
memcpy(new_zones, malloc_zones, malloc_zones_size);
vm_addr = (mach_vm_address_t)malloc_zones;
mach_vm_size_t dealloc_size = round_page(malloc_zones_size);
mach_vm_deallocate(mach_task_self(), vm_addr, dealloc_size);
}
protect_size = (size_t)alloc_size;
malloc_zones = new_zones;
malloc_num_zones_allocated = (int32_t)(alloc_size / sizeof(malloc_zone_t *));
} else {
protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
}
malloc_zones[malloc_num_zones] = zone;
OSAtomicIncrement32Barrier(&malloc_num_zones);
mprotect(malloc_zones, protect_size, PROT_READ);
}
static void
_malloc_initialize(const char *apple[], const char *bootargs)
{
phys_ncpus = *(uint8_t *)(uintptr_t)_COMM_PAGE_PHYSICAL_CPUS;
logical_ncpus = *(uint8_t *)(uintptr_t)_COMM_PAGE_LOGICAL_CPUS;
if (0 != (logical_ncpus % phys_ncpus)) {
MALLOC_REPORT_FATAL_ERROR(logical_ncpus % phys_ncpus,
"logical_ncpus %% phys_ncpus != 0\n");
}
switch (logical_ncpus / phys_ncpus) {
case 1:
hyper_shift = 0;
break;
case 2:
hyper_shift = 1;
break;
case 4:
hyper_shift = 2;
break;
default:
MALLOC_REPORT_FATAL_ERROR(logical_ncpus / phys_ncpus, "logical_ncpus / phys_ncpus not 1, 2, or 4");
}
if (max_magazines) {
max_magazines = MIN(max_magazines, logical_ncpus);
} else {
max_magazines = logical_ncpus;
}
if (max_medium_magazines) {
max_medium_magazines = MIN(max_medium_magazines, logical_ncpus);
} else {
max_medium_magazines = max_magazines;
}
set_flags_from_environment();
#if CONFIG_NANOZONE
const char **envp = (const char **)*_NSGetEnviron();
nano_common_init(envp, apple, bootargs);
#endif
const uint32_t k_max_zones = 3;
malloc_zone_t *zone_stack[k_max_zones];
const char *name_stack[k_max_zones];
uint32_t num_zones = 0;
initial_scalable_zone = create_scalable_zone(0, malloc_debug_flags);
zone_stack[num_zones] = initial_scalable_zone;
name_stack[num_zones] = DEFAULT_MALLOC_ZONE_STRING;
num_zones++;
#if CONFIG_NANOZONE
nano_common_configure();
malloc_zone_t *helper_zone = zone_stack[num_zones - 1];
malloc_zone_t *nano_zone = NULL;
if (_malloc_engaged_nano == NANO_V2) {
nano_zone = nanov2_create_zone(helper_zone, malloc_debug_flags);
} else if (_malloc_engaged_nano == NANO_V1) {
nano_zone = nano_create_zone(helper_zone, malloc_debug_flags);
}
if (nano_zone) {
initial_nano_zone = nano_zone;
zone_stack[num_zones] = nano_zone;
name_stack[num_zones] = DEFAULT_MALLOC_ZONE_STRING;
name_stack[num_zones - 1] = MALLOC_HELPER_ZONE_STRING;
num_zones++;
}
#endif
if (pguard_enabled()) {
malloc_zone_t *wrapped_zone = zone_stack[num_zones - 1];
zone_stack[num_zones] = pguard_create_zone(wrapped_zone, malloc_debug_flags);
name_stack[num_zones] = MALLOC_PGUARD_ZONE_STRING;
num_zones++;
}
MALLOC_ASSERT(num_zones <= k_max_zones);
initial_default_zone = zone_stack[num_zones - 1];
for (int i = num_zones - 1; i >= 0; i--) malloc_zone_register_while_locked(zone_stack[i]);
for (int i = num_zones - 1; i >= 0; i--) malloc_set_zone_name(zone_stack[i], name_stack[i]);
}
static inline malloc_zone_t *
inline_malloc_default_zone(void)
{
return malloc_zones[0];
}
malloc_zone_t *
malloc_default_zone(void)
{
return default_zone;
}
static void *
legacy_zeroing_large_malloc(malloc_zone_t *zone, size_t size)
{
if (size > LEGACY_ZEROING_THRESHOLD) {
return default_zone_calloc(zone, 1, size);
} else {
return default_zone_malloc(zone, size);
}
}
static void *
legacy_zeroing_large_valloc(malloc_zone_t *zone, size_t size)
{
void *p = default_zone_valloc(zone, size);
memset(p, 0, size); return p;
}
void
zeroify_scalable_zone(malloc_zone_t *zone)
{
if (zone == default_zone) {
zone->malloc = (void *)legacy_zeroing_large_malloc;
zone->valloc = (void *)legacy_zeroing_large_valloc;
}
}
int
malloc_engaged_nano(void)
{
#if CONFIG_NANOZONE
return _malloc_engaged_nano;
#else
return 0;
#endif
}
malloc_zone_t *
malloc_default_purgeable_zone(void)
{
static malloc_zone_t *dpz;
if (!dpz) {
malloc_zone_t *tmp = create_purgeable_zone(0, initial_scalable_zone, malloc_debug_flags);
malloc_zone_register(tmp);
malloc_set_zone_name(tmp, DEFAULT_PUREGEABLE_ZONE_STRING);
if (!OSAtomicCompareAndSwapPtrBarrier(NULL, tmp, (void**)&dpz)) {
malloc_destroy_zone(tmp);
}
}
return dpz;
}
static void
set_flags_from_environment(void)
{
const char *flag;
const char **env = (const char **)*_NSGetEnviron();
const char **p;
const char *c;
#if __LP64__
malloc_debug_flags = MALLOC_ABORT_ON_CORRUPTION; #else
int libSystemVersion = NSVersionOfLinkTimeLibrary("System");
if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 126) ) {
malloc_debug_flags = 0;
} else {
malloc_debug_flags = MALLOC_ABORT_ON_CORRUPTION;
}
#endif
for (p = env; (c = *p) != NULL; ++p) {
#if RDAR_48993662
if (!strncmp(c, "Malloc", 6) || !strncmp(c, "_Malloc", 6)) {
#else // RDAR_48993662
if (!strncmp(c, "Malloc", 6)) {
#endif // RDAR_48993662
if (issetugid()) {
return;
}
break;
}
}
bool restricted = dyld_process_is_restricted();
malloc_print_configure(restricted);
if (c == NULL) {
return;
}
flag = getenv("MallocGuardEdges");
if (flag) {
if (!strcmp(flag, "all")) {
#if MALLOC_TARGET_64BIT
malloc_debug_flags |= MALLOC_GUARD_ALL | MALLOC_ADD_GUARD_PAGE_FLAGS;
malloc_debug_flags &= ~(MALLOC_DONT_PROTECT_PRELUDE|MALLOC_DONT_PROTECT_POSTLUDE);
malloc_report(ASL_LEVEL_INFO, "adding guard pages to all regions\n");
#endif // MALLOC_TARGET_64BIT
} else {
malloc_debug_flags |= MALLOC_ADD_GUARD_PAGE_FLAGS;
malloc_debug_flags &= ~MALLOC_GUARD_ALL;
malloc_report(ASL_LEVEL_INFO, "adding guard pages for large allocator blocks\n");
if (getenv("MallocDoNotProtectPrelude")) {
malloc_debug_flags |= MALLOC_DONT_PROTECT_PRELUDE;
malloc_report(ASL_LEVEL_INFO, "... but not protecting prelude guard page\n");
}
if (getenv("MallocDoNotProtectPostlude")) {
malloc_debug_flags |= MALLOC_DONT_PROTECT_POSTLUDE;
malloc_report(ASL_LEVEL_INFO, "... but not protecting postlude guard page\n");
}
}
}
if (getenv("MallocScribble")) {
malloc_debug_flags |= MALLOC_DO_SCRIBBLE;
malloc_report(ASL_LEVEL_INFO, "enabling scribbling to detect mods to free blocks\n");
}
if (getenv("MallocErrorAbort")) {
malloc_debug_flags |= MALLOC_ABORT_ON_ERROR;
malloc_report(ASL_LEVEL_INFO, "enabling abort() on bad malloc or free\n");
}
if (getenv("MallocTracing")) {
malloc_tracing_enabled = true;
}
#if __LP64__
#else
flag = getenv("MallocCorruptionAbort");
if (!restricted && flag && (flag[0] == '0')) { malloc_debug_flags &= ~MALLOC_ABORT_ON_CORRUPTION;
} else if (flag) {
malloc_debug_flags |= MALLOC_ABORT_ON_CORRUPTION;
}
#endif
flag = getenv("MallocCheckHeapStart");
if (flag) {
malloc_check_start = (unsigned)strtoul(flag, NULL, 0);
if (malloc_check_start == 0) {
malloc_check_start = 1;
}
if (malloc_check_start == -1) {
malloc_check_start = 1;
}
flag = getenv("MallocCheckHeapEach");
if (flag) {
malloc_check_each = (unsigned)strtoul(flag, NULL, 0);
if (malloc_check_each == 0) {
malloc_check_each = 1;
}
if (malloc_check_each == -1) {
malloc_check_each = 1;
}
}
malloc_report(ASL_LEVEL_INFO, "checks heap after operation #%d and each %d operations\n", malloc_check_start, malloc_check_each);
flag = getenv("MallocCheckHeapAbort");
if (flag) {
malloc_check_abort = (unsigned)strtol(flag, NULL, 0);
}
if (malloc_check_abort) {
malloc_report(ASL_LEVEL_INFO, "will abort on heap corruption\n");
} else {
flag = getenv("MallocCheckHeapSleep");
if (flag) {
malloc_check_sleep = (unsigned)strtol(flag, NULL, 0);
}
if (malloc_check_sleep > 0) {
malloc_report(ASL_LEVEL_INFO, "will sleep for %d seconds on heap corruption\n", malloc_check_sleep);
} else if (malloc_check_sleep < 0) {
malloc_report(ASL_LEVEL_INFO, "will sleep once for %d seconds on heap corruption\n", -malloc_check_sleep);
} else {
malloc_report(ASL_LEVEL_INFO, "no sleep on heap corruption\n");
}
}
}
flag = getenv("MallocMaxMagazines");
#if RDAR_48993662
if (!flag) {
flag = getenv("_MallocMaxMagazines");
}
#endif // RDAR_48993662
if (flag) {
int value = (unsigned)strtol(flag, NULL, 0);
if (value == 0) {
malloc_report(ASL_LEVEL_INFO, "Maximum magazines defaulted to %d\n", max_magazines);
} else if (value < 0) {
malloc_report(ASL_LEVEL_ERR, "Maximum magazines must be positive - ignored.\n");
} else if (value > logical_ncpus) {
max_magazines = logical_ncpus;
malloc_report(ASL_LEVEL_INFO, "Maximum magazines limited to number of logical CPUs (%d)\n", max_magazines);
} else {
max_magazines = value;
malloc_report(ASL_LEVEL_INFO, "Maximum magazines set to %d\n", max_magazines);
}
}
flag = getenv("MallocLargeExpandedCacheThreshold");
if (flag) {
uint64_t value = (uint64_t)strtoull(flag, NULL, 0);
if (value == 0) {
malloc_report(ASL_LEVEL_INFO, "Large expanded cache threshold defaulted to %lly\n", magazine_large_expanded_cache_threshold);
} else if (value < 0) {
malloc_report(ASL_LEVEL_ERR, "MallocLargeExpandedCacheThreshold must be positive - ignored.\n");
} else {
magazine_large_expanded_cache_threshold = value;
malloc_report(ASL_LEVEL_INFO, "Large expanded cache threshold set to %lly\n", magazine_large_expanded_cache_threshold);
}
}
flag = getenv("MallocLargeDisableASLR");
if (flag) {
uint64_t value = (uint64_t)strtoull(flag, NULL, 0);
if (value == 0) {
malloc_report(ASL_LEVEL_INFO, "Enabling ASLR slide on large allocations\n");
malloc_debug_flags &= ~DISABLE_LARGE_ASLR;
} else if (value != 0) {
malloc_report(ASL_LEVEL_INFO, "Disabling ASLR slide on large allocations\n");
malloc_debug_flags |= DISABLE_LARGE_ASLR;
}
}
#if CONFIG_AGGRESSIVE_MADVISE || CONFIG_LARGE_CACHE
flag = getenv("MallocSpaceEfficient");
if (flag) {
const char *endp;
long value = malloc_common_convert_to_long(flag, &endp);
if (!*endp && endp != flag && (value == 0 || value == 1)) {
#if CONFIG_AGGRESSIVE_MADVISE
aggressive_madvise_enabled = (value == 1);
#endif // CONFIG_AGGRESSIVE_MADVISE
#if CONFIG_LARGE_CACHE
large_cache_enabled = (value == 0);
#endif // CONFIG_LARGE_CACHE
malloc_space_efficient_enabled = (value == 1);
} else {
malloc_report(ASL_LEVEL_ERR, "MallocSpaceEfficient must be 0 or 1.\n");
}
}
#endif // CONFIG_AGGRESSIVE_MADVISE || CONFIG_LARGE_CACHE
#if CONFIG_MEDIUM_ALLOCATOR
flag = getenv("MallocMediumZone");
if (flag) {
int value = (unsigned)strtol(flag, NULL, 0);
if (value == 0) {
magazine_medium_enabled = false;
} else if (value == 1) {
magazine_medium_enabled = true;
}
}
flag = getenv("MallocMediumActivationThreshold");
if (flag) {
uint64_t value = (uint64_t)strtoull(flag, NULL, 0);
if (value == 0) {
malloc_report(ASL_LEVEL_INFO, "Medium activation threshold defaulted to %lly\n", magazine_medium_active_threshold);
} else if (value < 0) {
malloc_report(ASL_LEVEL_ERR, "MallocMediumActivationThreshold must be positive - ignored.\n");
} else {
magazine_medium_active_threshold = value;
malloc_report(ASL_LEVEL_INFO, "Medium activation threshold set to %lly\n", magazine_medium_active_threshold);
}
}
flag = getenv("MallocMediumSpaceEfficient");
if (flag) {
uint64_t value = (uint64_t)strtoull(flag, NULL, 0);
if (value == 0) {
malloc_medium_space_efficient_enabled = false;
} else if (value == 1) {
malloc_medium_space_efficient_enabled = true;
}
}
if (malloc_medium_space_efficient_enabled && malloc_space_efficient_enabled) {
max_medium_magazines = 1;
}
flag = getenv("MallocMaxMediumMagazines");
#if RDAR_48993662
if (!flag) {
flag = getenv("_MallocMaxMediumMagazines");
}
#endif // RDAR_48993662
if (flag) {
int value = (unsigned)strtol(flag, NULL, 0);
if (value == 0) {
malloc_report(ASL_LEVEL_INFO, "Maximum medium magazines defaulted to %d\n", max_magazines);
} else if (value < 0) {
malloc_report(ASL_LEVEL_ERR, "Maximum medium magazines must be positive - ignored.\n");
} else if (value > logical_ncpus) {
max_medium_magazines = logical_ncpus;
malloc_report(ASL_LEVEL_INFO, "Maximum medium magazines limited to number of logical CPUs (%d)\n", max_medium_magazines);
} else {
max_medium_magazines = value;
malloc_report(ASL_LEVEL_INFO, "Maximum medium magazines set to %d\n", max_medium_magazines);
}
}
#endif // CONFIG_MEDIUM_ALLOCATOR
#if CONFIG_AGGRESSIVE_MADVISE
flag = getenv("MallocAggressiveMadvise");
if (flag) {
const char *endp;
long value = malloc_common_convert_to_long(flag, &endp);
if (!*endp && endp != flag && (value == 0 || value == 1)) {
aggressive_madvise_enabled = (value == 1);
} else {
malloc_report(ASL_LEVEL_ERR, "MallocAggressiveMadvise must be 0 or 1.\n");
}
}
#endif // CONFIG_AGGRESSIVE_MADVISE
#if CONFIG_LARGE_CACHE
flag = getenv("MallocLargeCache");
if (flag) {
const char *endp;
long value = malloc_common_convert_to_long(flag, &endp);
if (!*endp && endp != flag && (value == 0 || value == 1)) {
large_cache_enabled = (value == 1);
} else {
malloc_report(ASL_LEVEL_ERR, "MallocLargeCache must be 0 or 1.\n");
}
}
#endif // CONFIG_LARGE_CACHE
#if CONFIG_RECIRC_DEPOT
flag = getenv("MallocRecircRetainedRegions");
if (flag) {
int value = (int)strtol(flag, NULL, 0);
if (value > 0) {
recirc_retained_regions = value;
} else {
malloc_report(ASL_LEVEL_ERR, "MallocRecircRetainedRegions must be positive - ignored.\n");
}
}
#endif // CONFIG_RECIRC_DEPOT
if (getenv("MallocHelp")) {
malloc_report(ASL_LEVEL_INFO,
"environment variables that can be set for debug:\n"
"- MallocLogFile <f> to create/append messages to file <f> instead of stderr\n"
"- MallocGuardEdges to add 2 guard pages for each large block\n"
"- MallocDoNotProtectPrelude to disable protection (when previous flag set)\n"
"- MallocDoNotProtectPostlude to disable protection (when previous flag set)\n"
"- MallocStackLogging to record all stacks. Tools like leaks can then be applied\n"
"- MallocStackLoggingNoCompact to record all stacks. Needed for malloc_history\n"
"- MallocStackLoggingDirectory to set location of stack logs, which can grow large; default is /tmp\n"
"- MallocScribble to detect writing on free blocks and missing initializers:\n"
" 0x55 is written upon free and 0xaa is written on allocation\n"
"- MallocCheckHeapStart <n> to start checking the heap after <n> operations\n"
"- MallocCheckHeapEach <s> to repeat the checking of the heap after <s> operations\n"
"- MallocCheckHeapSleep <t> to sleep <t> seconds on heap corruption\n"
"- MallocCheckHeapAbort <b> to abort on heap corruption if <b> is non-zero\n"
"- MallocCorruptionAbort to abort on malloc errors, but not on out of memory for 32-bit processes\n"
" MallocCorruptionAbort is always set on 64-bit processes\n"
"- MallocErrorAbort to abort on any malloc error, including out of memory\n"\
"- MallocTracing to emit kdebug trace points on malloc entry points\n"\
"- MallocHelp - this help!\n");
}
}
malloc_zone_t *
malloc_create_zone(vm_size_t start_size, unsigned flags)
{
malloc_zone_t *zone;
if (start_size > MALLOC_ABSOLUTE_MAX_SIZE) {
return NULL;
}
zone = create_scalable_zone(start_size, flags | malloc_debug_flags);
malloc_zone_register(zone);
return zone;
}
void
malloc_create_legacy_default_zone(void)
{
malloc_zone_t *zone;
int i;
zone = create_legacy_scalable_zone(0, malloc_debug_flags);
MALLOC_LOCK();
malloc_zone_register_while_locked(zone);
malloc_zone_t *hold = malloc_zones[0];
if (hold->zone_name && strcmp(hold->zone_name, DEFAULT_MALLOC_ZONE_STRING) == 0) {
malloc_set_zone_name(hold, NULL);
}
malloc_set_zone_name(zone, DEFAULT_MALLOC_ZONE_STRING);
unsigned protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
for (i = malloc_num_zones - 1; i > 0; --i) {
malloc_zones[i] = malloc_zones[i - 1];
}
malloc_zones[0] = zone;
mprotect(malloc_zones, protect_size, PROT_READ);
MALLOC_UNLOCK();
}
void
malloc_destroy_zone(malloc_zone_t *zone)
{
malloc_set_zone_name(zone, NULL); malloc_zone_unregister(zone);
zone->destroy(zone);
}
static vm_address_t *frames = NULL;
static unsigned num_frames;
MALLOC_NOINLINE
void
malloc_zone_check_fail(const char *msg, const char *fmt, ...)
{
_SIMPLE_STRING b = _simple_salloc();
if (b) {
_simple_sprintf(b, "*** MallocCheckHeap: FAILED check at operation #%d\n", malloc_check_counter - 1);
} else {
malloc_report(MALLOC_REPORT_NOLOG, "*** MallocCheckHeap: FAILED check at operation #%d\n", malloc_check_counter - 1);
}
if (frames) {
unsigned index = 1;
if (b) {
_simple_sappend(b, "Stack for last operation where the malloc check succeeded: ");
while (index < num_frames)
_simple_sprintf(b, "%p ", (void*)frames[index++]);
malloc_report(MALLOC_REPORT_NOLOG, "%s\n(Use 'atos' for a symbolic stack)\n", _simple_string(b));
} else {
malloc_report(MALLOC_REPORT_NOLOG, "Stack for last operation where the malloc check succeeded: ");
while (index < num_frames) {
malloc_report(MALLOC_REPORT_NOLOG, "%p ", (void *)frames[index++]);
}
malloc_report(MALLOC_REPORT_NOLOG, "\n(Use 'atos' for a symbolic stack)\n");
}
}
if (malloc_check_each > 1) {
unsigned recomm_each = (malloc_check_each > 10) ? malloc_check_each / 10 : 1;
unsigned recomm_start =
(malloc_check_counter > malloc_check_each + 1) ? malloc_check_counter - 1 - malloc_check_each : 1;
malloc_report(MALLOC_REPORT_NOLOG,
"*** Recommend using 'setenv MallocCheckHeapStart %d; setenv MallocCheckHeapEach %d' to narrow down failure\n",
recomm_start, recomm_each);
}
if (b) {
_simple_sfree(b);
}
unsigned sleep_time = 0;
uint32_t report_flags = ASL_LEVEL_ERR | MALLOC_REPORT_DEBUG | MALLOC_REPORT_NOLOG;
if (malloc_check_abort) {
report_flags |= MALLOC_REPORT_CRASH;
} else {
if (malloc_check_sleep > 0) {
malloc_report(ASL_LEVEL_NOTICE, "*** Will sleep for %d seconds to leave time to attach\n", malloc_check_sleep);
sleep_time = malloc_check_sleep;
} else if (malloc_check_sleep < 0) {
malloc_report(ASL_LEVEL_NOTICE, "*** Will sleep once for %d seconds to leave time to attach\n", -malloc_check_sleep);
sleep_time = -malloc_check_sleep;
malloc_check_sleep = 0;
}
}
va_list ap;
va_start(ap, fmt);
malloc_vreport(report_flags, sleep_time, msg, NULL, fmt, ap);
va_end(ap);
}
__attribute__((cold, noinline))
static void
internal_check(void)
{
if (malloc_check_counter++ < malloc_check_start) {
return;
}
if (malloc_zone_check(NULL)) {
if (!frames) {
vm_allocate(mach_task_self(), (void *)&frames, vm_page_size, 1);
}
thread_stack_pcs(frames, (unsigned)(vm_page_size / sizeof(vm_address_t) - 1), &num_frames);
}
malloc_check_start += malloc_check_each;
}
__options_decl(malloc_zone_options_t, unsigned, {
MZ_NONE = 0x0,
MZ_POSIX = 0x1,
MZ_C11 = 0x2,
});
static inline void
malloc_set_errno_fast(malloc_zone_options_t mzo, int err)
{
if (mzo & MZ_POSIX) {
#if TARGET_OS_SIMULATOR
errno = err;
#else
(*_pthread_errno_address_direct()) = err;
#endif
}
}
MALLOC_NOINLINE
static void *
_malloc_zone_malloc(malloc_zone_t *zone, size_t size, malloc_zone_options_t mzo)
{
MALLOC_TRACE(TRACE_malloc | DBG_FUNC_START, (uintptr_t)zone, size, 0, 0);
void *ptr = NULL;
if (malloc_check_start) {
internal_check();
}
if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
goto out;
}
ptr = zone->malloc(zone, size);
if (os_unlikely(malloc_logger)) {
malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
}
MALLOC_TRACE(TRACE_malloc | DBG_FUNC_END, (uintptr_t)zone, size, (uintptr_t)ptr, 0);
out:
if (os_unlikely(ptr == NULL)) {
malloc_set_errno_fast(mzo, ENOMEM);
}
return ptr;
}
void *
malloc_zone_malloc(malloc_zone_t *zone, size_t size)
{
return _malloc_zone_malloc(zone, size, MZ_NONE);
}
MALLOC_NOINLINE
static void *
_malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size,
malloc_zone_options_t mzo)
{
MALLOC_TRACE(TRACE_calloc | DBG_FUNC_START, (uintptr_t)zone, num_items, size, 0);
void *ptr;
if (malloc_check_start) {
internal_check();
}
ptr = zone->calloc(zone, num_items, size);
if (os_unlikely(malloc_logger)) {
malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE | MALLOC_LOG_TYPE_CLEARED, (uintptr_t)zone,
(uintptr_t)(num_items * size), 0, (uintptr_t)ptr, 0);
}
MALLOC_TRACE(TRACE_calloc | DBG_FUNC_END, (uintptr_t)zone, num_items, size, (uintptr_t)ptr);
if (os_unlikely(ptr == NULL)) {
malloc_set_errno_fast(mzo, ENOMEM);
}
return ptr;
}
void *
malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size)
{
return _malloc_zone_calloc(zone, num_items, size, MZ_NONE);
}
MALLOC_NOINLINE
static void *
_malloc_zone_valloc(malloc_zone_t *zone, size_t size, malloc_zone_options_t mzo)
{
MALLOC_TRACE(TRACE_valloc | DBG_FUNC_START, (uintptr_t)zone, size, 0, 0);
void *ptr = NULL;
if (malloc_check_start) {
internal_check();
}
if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
goto out;
}
ptr = zone->valloc(zone, size);
if (os_unlikely(malloc_logger)) {
malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
}
MALLOC_TRACE(TRACE_valloc | DBG_FUNC_END, (uintptr_t)zone, size, (uintptr_t)ptr, 0);
out:
if (os_unlikely(ptr == NULL)) {
malloc_set_errno_fast(mzo, ENOMEM);
}
return ptr;
}
void *
malloc_zone_valloc(malloc_zone_t *zone, size_t size)
{
return _malloc_zone_valloc(zone, size, MZ_NONE);
}
void *
malloc_zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
MALLOC_TRACE(TRACE_realloc | DBG_FUNC_START, (uintptr_t)zone, (uintptr_t)ptr, size, 0);
void *new_ptr;
if (malloc_check_start) {
internal_check();
}
if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
return NULL;
}
new_ptr = zone->realloc(zone, ptr, size);
if (os_unlikely(malloc_logger)) {
malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone,
(uintptr_t)ptr, (uintptr_t)size, (uintptr_t)new_ptr, 0);
}
MALLOC_TRACE(TRACE_realloc | DBG_FUNC_END, (uintptr_t)zone, (uintptr_t)ptr, size, (uintptr_t)new_ptr);
return new_ptr;
}
void
malloc_zone_free(malloc_zone_t *zone, void *ptr)
{
MALLOC_TRACE(TRACE_free, (uintptr_t)zone, (uintptr_t)ptr, (ptr) ? *(uintptr_t*)ptr : 0, 0);
if (os_unlikely(malloc_logger)) {
malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0);
}
if (malloc_check_start) {
internal_check();
}
zone->free(zone, ptr);
}
static void
malloc_zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
MALLOC_TRACE(TRACE_free, (uintptr_t)zone, (uintptr_t)ptr, size, (ptr && size) ? *(uintptr_t*)ptr : 0);
if (os_unlikely(malloc_logger)) {
malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)ptr, 0, 0, 0);
}
if (malloc_check_start) {
internal_check();
}
zone->free_definite_size(zone, ptr, size);
}
malloc_zone_t *
malloc_zone_from_ptr(const void *ptr)
{
if (!ptr) {
return NULL;
} else {
return find_registered_zone(ptr, NULL);
}
}
MALLOC_NOINLINE
static void *
_malloc_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size,
malloc_zone_options_t mzo)
{
MALLOC_TRACE(TRACE_memalign | DBG_FUNC_START, (uintptr_t)zone, alignment, size, 0);
void *ptr = NULL;
int err = ENOMEM;
if (zone->version < 5) { goto out;
}
if (malloc_check_start) {
internal_check();
}
if (size > MALLOC_ABSOLUTE_MAX_SIZE) {
goto out;
}
if (alignment < sizeof(void *) || 0 != (alignment & (alignment - 1))) { err = EINVAL;
goto out;
}
if ((mzo & MZ_C11) && (size & (alignment - 1)) != 0) {
err = EINVAL;
goto out;
}
if (!(zone->memalign)) {
goto out;
}
ptr = zone->memalign(zone, alignment, size);
if (os_unlikely(malloc_logger)) {
malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0, (uintptr_t)ptr, 0);
}
MALLOC_TRACE(TRACE_memalign | DBG_FUNC_END, (uintptr_t)zone, alignment, size, (uintptr_t)ptr);
out:
if (os_unlikely(ptr == NULL)) {
if (mzo & MZ_POSIX) {
malloc_set_errno_fast(mzo, err);
}
}
return ptr;
}
void *
malloc_zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
{
return _malloc_zone_memalign(zone, alignment, size, MZ_NONE);
}
boolean_t
malloc_zone_claimed_address(malloc_zone_t *zone, void *ptr)
{
if (!ptr) {
return false;
}
if (malloc_check_start) {
internal_check();
}
if (zone->version < 10 || !zone->claimed_address) {
return true;
}
return zone->claimed_address(zone, ptr);
}
void
malloc_zone_register(malloc_zone_t *zone)
{
MALLOC_LOCK();
malloc_zone_register_while_locked(zone);
MALLOC_UNLOCK();
}
void
malloc_zone_unregister(malloc_zone_t *z)
{
unsigned index;
if (malloc_num_zones == 0) {
return;
}
MALLOC_LOCK();
for (index = 0; index < malloc_num_zones; ++index) {
if (z != malloc_zones[index]) {
continue;
}
size_t protect_size = malloc_num_zones_allocated * sizeof(malloc_zone_t *);
mprotect(malloc_zones, protect_size, PROT_READ | PROT_WRITE);
malloc_zones[index] = malloc_zones[malloc_num_zones - 1];
--malloc_num_zones;
mprotect(malloc_zones, protect_size, PROT_READ);
int32_t volatile *p = pFRZCounterLive;
pFRZCounterLive = pFRZCounterDrain;
pFRZCounterDrain = p;
OSMemoryBarrier();
while (0 != *pFRZCounterDrain) {
yield();
}
MALLOC_UNLOCK();
return;
}
MALLOC_UNLOCK();
malloc_report(ASL_LEVEL_ERR, "*** malloc_zone_unregister() failed for %p\n", z);
}
void
malloc_set_zone_name(malloc_zone_t *z, const char *name)
{
char *newName;
mprotect(z, sizeof(malloc_zone_t), PROT_READ | PROT_WRITE);
if (z->zone_name) {
free((char *)z->zone_name);
z->zone_name = NULL;
}
if (name) {
size_t buflen = strlen(name) + 1;
newName = _malloc_zone_malloc(z, buflen, MZ_NONE);
if (newName) {
strlcpy(newName, name, buflen);
z->zone_name = (const char *)newName;
} else {
z->zone_name = NULL;
}
}
mprotect(z, sizeof(malloc_zone_t), PROT_READ);
}
const char *
malloc_get_zone_name(malloc_zone_t *zone)
{
return zone->zone_name;
}
void *
malloc(size_t size)
{
return _malloc_zone_malloc(default_zone, size, MZ_POSIX);
}
void *
aligned_alloc(size_t alignment, size_t size)
{
return _malloc_zone_memalign(default_zone, alignment, size,
MZ_POSIX | MZ_C11);
}
void *
calloc(size_t num_items, size_t size)
{
return _malloc_zone_calloc(default_zone, num_items, size, MZ_POSIX);
}
void
free(void *ptr)
{
malloc_zone_t *zone;
size_t size;
if (!ptr) {
return;
}
zone = find_registered_zone(ptr, &size);
if (!zone) {
int flags = MALLOC_REPORT_DEBUG | MALLOC_REPORT_NOLOG;
if ((malloc_debug_flags & (MALLOC_ABORT_ON_CORRUPTION | MALLOC_ABORT_ON_ERROR))) {
flags = MALLOC_REPORT_CRASH | MALLOC_REPORT_NOLOG;
}
malloc_report(flags,
"*** error for object %p: pointer being freed was not allocated\n", ptr);
} else if (zone->version >= 6 && zone->free_definite_size) {
malloc_zone_free_definite_size(zone, ptr, size);
} else {
malloc_zone_free(zone, ptr);
}
}
void *
realloc(void *in_ptr, size_t new_size)
{
void *retval = NULL;
void *old_ptr;
malloc_zone_t *zone;
old_ptr = (new_size == 0) ? NULL : in_ptr;
if (!old_ptr) {
retval = malloc_zone_malloc(default_zone, new_size);
} else {
zone = find_registered_zone(old_ptr, NULL);
if (!zone) {
int flags = MALLOC_REPORT_DEBUG | MALLOC_REPORT_NOLOG;
if (malloc_debug_flags & (MALLOC_ABORT_ON_CORRUPTION | MALLOC_ABORT_ON_ERROR)) {
flags = MALLOC_REPORT_CRASH | MALLOC_REPORT_NOLOG;
}
malloc_report(flags, "*** error for object %p: pointer being realloc'd was not allocated\n", in_ptr);
} else {
retval = malloc_zone_realloc(zone, old_ptr, new_size);
}
}
if (retval == NULL) {
malloc_set_errno_fast(MZ_POSIX, ENOMEM);
} else if (new_size == 0) {
free(in_ptr);
}
return retval;
}
void *
valloc(size_t size)
{
return _malloc_zone_valloc(default_zone, size, MZ_POSIX);
}
extern void
vfree(void *ptr)
{
free(ptr);
}
size_t
malloc_size(const void *ptr)
{
size_t size = 0;
if (!ptr) {
return size;
}
(void)find_registered_zone(ptr, &size);
return size;
}
size_t
malloc_good_size(size_t size)
{
malloc_zone_t *zone = default_zone;
return zone->introspect->good_size(zone, size);
}
int
posix_memalign(void **memptr, size_t alignment, size_t size)
{
void *retval;
retval = malloc_zone_memalign(default_zone, alignment, size);
if (retval == NULL) {
if (alignment < sizeof(void *) || 0 != (alignment & (alignment - 1))) { return EINVAL;
}
return ENOMEM;
} else {
*memptr = retval; return 0;
}
}
boolean_t
malloc_claimed_address(void *ptr)
{
if (malloc_num_zones == 0) {
return false;
}
if (lite_zone && malloc_zone_claimed_address(lite_zone, ptr)) {
return true;
}
if (malloc_zone_claimed_address(malloc_zones[0], ptr)) {
return true;
}
int32_t volatile *pFRZCounter = pFRZCounterLive;
OSAtomicIncrement32Barrier(pFRZCounter);
int32_t limit = *(int32_t volatile *)&malloc_num_zones;
malloc_zone_t **zones = &malloc_zones[1];
boolean_t result = false;
for (unsigned index = 1; index < limit; ++index, ++zones) {
malloc_zone_t *zone = *zones;
if (malloc_zone_claimed_address(zone, ptr)) {
result = true;
break;
}
}
OSAtomicDecrement32Barrier(pFRZCounter);
return result;
}
void *
reallocarray(void * in_ptr, size_t nmemb, size_t size){
size_t alloc_size;
if (os_mul_overflow(nmemb, size, &alloc_size)){
malloc_set_errno_fast(MZ_POSIX, ENOMEM);
return NULL;
}
return realloc(in_ptr, alloc_size);
}
void *
reallocarrayf(void * in_ptr, size_t nmemb, size_t size){
size_t alloc_size;
if (os_mul_overflow(nmemb, size, &alloc_size)){
malloc_set_errno_fast(MZ_POSIX, ENOMEM);
return NULL;
}
return reallocf(in_ptr, alloc_size);
}
static malloc_zone_t *
find_registered_purgeable_zone(void *ptr)
{
if (!ptr) {
return NULL;
}
size_t size = 0;
malloc_zone_t *zone = find_registered_zone(ptr, &size);
if (!zone) {
return NULL;
}
if ((size < vm_page_size) || ((size % vm_page_size) != 0)) {
return NULL;
}
return zone;
}
void
malloc_make_purgeable(void *ptr)
{
malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
if (!zone) {
return;
}
int state = VM_PURGABLE_VOLATILE;
vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
return;
}
int
malloc_make_nonpurgeable(void *ptr)
{
malloc_zone_t *zone = find_registered_purgeable_zone(ptr);
if (!zone) {
return 0;
}
int state = VM_PURGABLE_NONVOLATILE;
vm_purgable_control(mach_task_self(), (vm_address_t)ptr, VM_PURGABLE_SET_STATE, &state);
if (state == VM_PURGABLE_EMPTY) {
return EFAULT;
}
return 0;
}
void
malloc_enter_process_memory_limit_warn_mode(void)
{
}
void
malloc_memory_event_handler(unsigned long event)
{
if (event & NOTE_MEMORYSTATUS_PRESSURE_WARN) {
malloc_zone_pressure_relief(0, 0);
}
if ((event & NOTE_MEMORYSTATUS_MSL_STATUS) != 0 && (event & ~NOTE_MEMORYSTATUS_MSL_STATUS) == 0) {
malloc_register_stack_logger();
}
#if ENABLE_MEMORY_RESOURCE_EXCEPTION_HANDLING
if (event & (NOTE_MEMORYSTATUS_PROC_LIMIT_WARN | NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL | NOTE_MEMORYSTATUS_PRESSURE_CRITICAL)) {
malloc_register_stack_logger();
}
#endif // ENABLE_MEMORY_RESOURCE_EXCEPTION_HANDLING
if (msl.handle_memory_event) {
msl.handle_memory_event(event);
}
}
size_t
malloc_zone_pressure_relief(malloc_zone_t *zone, size_t goal)
{
if (!zone) {
unsigned index = 0;
size_t total = 0;
MALLOC_LOCK();
while (index < malloc_num_zones) {
zone = malloc_zones[index++];
if (zone->version < 8) {
continue;
}
if (NULL == zone->pressure_relief) {
continue;
}
if (0 == goal) {
total += zone->pressure_relief(zone, 0);
} else if (goal > total) {
total += zone->pressure_relief(zone, goal - total);
} else {
break;
}
}
MALLOC_UNLOCK();
return total;
} else {
if (zone->version < 8) {
return 0;
}
if (NULL == zone->pressure_relief) {
return 0;
}
return zone->pressure_relief(zone, goal);
}
}
unsigned
malloc_zone_batch_malloc(malloc_zone_t *zone, size_t size, void **results, unsigned num_requested)
{
if (!zone->batch_malloc) {
return 0;
}
if (malloc_check_start) {
internal_check();
}
unsigned batched = zone->batch_malloc(zone, size, results, num_requested);
if (os_unlikely(malloc_logger)) {
unsigned index = 0;
while (index < batched) {
malloc_logger(MALLOC_LOG_TYPE_ALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)size, 0,
(uintptr_t)results[index], 0);
index++;
}
}
return batched;
}
void
malloc_zone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num)
{
if (malloc_check_start) {
internal_check();
}
if (os_unlikely(malloc_logger)) {
unsigned index = 0;
while (index < num) {
malloc_logger(
MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, (uintptr_t)zone, (uintptr_t)to_be_freed[index], 0, 0, 0);
index++;
}
}
if (zone->batch_free) {
zone->batch_free(zone, to_be_freed, num);
} else {
void (*free_fun)(malloc_zone_t *, void *) = zone->free;
while (num--) {
void *ptr = *to_be_freed++;
free_fun(zone, ptr);
}
}
}
kern_return_t
malloc_get_all_zones(task_t task, memory_reader_t reader, vm_address_t **addresses, unsigned *count)
{
vm_address_t remote_malloc_zones = (vm_address_t)&malloc_zones;
vm_address_t remote_malloc_num_zones = (vm_address_t)&malloc_num_zones;
kern_return_t err;
vm_address_t zones_address;
vm_address_t *zones_address_ref;
unsigned num_zones;
unsigned *num_zones_ref;
if (!reader) {
reader = _malloc_default_reader;
}
err = reader(task, remote_malloc_zones, sizeof(void *), (void **)&zones_address_ref);
if (err) {
malloc_report(ASL_LEVEL_ERR, "*** malloc_get_all_zones: error reading zones_address at %p\n", (void *)remote_malloc_zones);
return err;
}
zones_address = *zones_address_ref;
err = reader(task, remote_malloc_num_zones, sizeof(unsigned), (void **)&num_zones_ref);
if (err) {
malloc_report(ASL_LEVEL_ERR, "*** malloc_get_all_zones: error reading num_zones at %p\n", (void *)remote_malloc_num_zones);
return err;
}
num_zones = *num_zones_ref;
*count = num_zones;
err = reader(task, zones_address, sizeof(malloc_zone_t *) * num_zones, (void **)addresses);
if (err) {
malloc_report(ASL_LEVEL_ERR, "*** malloc_get_all_zones: error reading zones at %p\n", &zones_address);
return err;
}
return err;
}
void
malloc_zone_print_ptr_info(void *ptr)
{
malloc_zone_t *zone;
if (!ptr) {
return;
}
zone = malloc_zone_from_ptr(ptr);
if (zone) {
printf("ptr %p in registered zone %p\n", ptr, zone);
} else {
printf("ptr %p not in heap\n", ptr);
}
}
boolean_t
malloc_zone_check(malloc_zone_t *zone)
{
boolean_t ok = 1;
if (!zone) {
unsigned index = 0;
while (index < malloc_num_zones) {
zone = malloc_zones[index++];
if (!zone->introspect->check(zone)) {
ok = 0;
}
}
} else {
ok = zone->introspect->check(zone);
}
return ok;
}
void
malloc_zone_print(malloc_zone_t *zone, boolean_t verbose)
{
if (!zone) {
unsigned index = 0;
while (index < malloc_num_zones) {
zone = malloc_zones[index++];
zone->introspect->print(zone, verbose);
}
} else {
zone->introspect->print(zone, verbose);
}
}
void
malloc_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats)
{
if (!zone) {
memset(stats, 0, sizeof(*stats));
unsigned index = 0;
while (index < malloc_num_zones) {
zone = malloc_zones[index++];
malloc_statistics_t this_stats;
zone->introspect->statistics(zone, &this_stats);
stats->blocks_in_use += this_stats.blocks_in_use;
stats->size_in_use += this_stats.size_in_use;
stats->max_size_in_use += this_stats.max_size_in_use;
stats->size_allocated += this_stats.size_allocated;
}
} else {
zone->introspect->statistics(zone, stats);
}
}
void
malloc_zone_log(malloc_zone_t *zone, void *address)
{
if (!zone) {
unsigned index = 0;
while (index < malloc_num_zones) {
zone = malloc_zones[index++];
zone->introspect->log(zone, address);
}
} else {
zone->introspect->log(zone, address);
}
}
void
mag_set_thread_index(unsigned int index)
{
_os_cpu_number_override = index;
#if CONFIG_NANOZONE
nano_common_cpu_number_override_set();
#endif // CONFIG_NANOZONE
}
static void
DefaultMallocError(int x)
{
#if USE_SLEEP_RATHER_THAN_ABORT
malloc_report(ASL_LEVEL_ERR, "*** error %d\n", x);
sleep(3600);
#else
_SIMPLE_STRING b = _simple_salloc();
if (b) {
_simple_sprintf(b, "*** error %d", x);
malloc_report(MALLOC_REPORT_NOLOG, "%s\n", _simple_string(b));
_os_set_crash_log_message_dynamic(_simple_string(b));
} else {
malloc_report(MALLOC_REPORT_NOLOG, "*** error %d\n", x);
_os_set_crash_log_message("*** DefaultMallocError called");
}
abort();
#endif
}
void (*malloc_error(void (*func)(int)))(int)
{
return DefaultMallocError;
}
static void
_malloc_lock_all(void (*callout)(void))
{
unsigned index = 0;
MALLOC_LOCK();
while (index < malloc_num_zones) {
malloc_zone_t *zone = malloc_zones[index++];
zone->introspect->force_lock(zone);
}
if (callout) {
callout();
}
}
static void
_malloc_unlock_all(void (*callout)(void))
{
unsigned index = 0;
if (callout) {
callout();
}
while (index < malloc_num_zones) {
malloc_zone_t *zone = malloc_zones[index++];
zone->introspect->force_unlock(zone);
}
MALLOC_UNLOCK();
}
static void
_malloc_reinit_lock_all(void (*callout)(void))
{
unsigned index = 0;
if (callout) {
callout();
}
while (index < malloc_num_zones) {
malloc_zone_t *zone = malloc_zones[index++];
if (zone->version < 9) { zone->introspect->force_unlock(zone);
} else {
zone->introspect->reinit_lock(zone);
}
}
MALLOC_REINIT_LOCK();
}
void
_malloc_fork_prepare(void)
{
return _malloc_lock_all(msl.fork_prepare);
}
void
_malloc_fork_parent(void)
{
return _malloc_unlock_all(msl.fork_parent);
}
void
_malloc_fork_child(void)
{
#if CONFIG_NANOZONE
if (_malloc_entropy_initialized) {
if (_malloc_engaged_nano == NANO_V2) {
nanov2_forked_zone((nanozonev2_t *)initial_nano_zone);
} else if (_malloc_engaged_nano == NANO_V1) {
nano_forked_zone((nanozone_t *)initial_nano_zone);
}
}
#endif
return _malloc_reinit_lock_all(msl.fork_child);
}
struct mstats
mstats(void)
{
malloc_statistics_t s;
struct mstats m;
malloc_zone_statistics(NULL, &s);
m.bytes_total = s.size_allocated;
m.chunks_used = s.blocks_in_use;
m.bytes_used = s.size_in_use;
m.chunks_free = 0;
m.bytes_free = m.bytes_total - m.bytes_used;
return (m);
}
boolean_t
malloc_zone_enable_discharge_checking(malloc_zone_t *zone)
{
if (zone->version < 7) { return FALSE;
}
if (NULL == zone->introspect->enable_discharge_checking) {
return FALSE;
}
return zone->introspect->enable_discharge_checking(zone);
}
void
malloc_zone_disable_discharge_checking(malloc_zone_t *zone)
{
if (zone->version < 7) { return;
}
if (NULL == zone->introspect->disable_discharge_checking) {
return;
}
zone->introspect->disable_discharge_checking(zone);
}
void
malloc_zone_discharge(malloc_zone_t *zone, void *memory)
{
if (NULL == zone) {
zone = malloc_zone_from_ptr(memory);
}
if (NULL == zone) {
return;
}
if (zone->version < 7) { return;
}
if (NULL == zone->introspect->discharge) {
return;
}
zone->introspect->discharge(zone, memory);
}
void
malloc_zone_enumerate_discharged_pointers(malloc_zone_t *zone, void (^report_discharged)(void *memory, void *info))
{
if (!zone) {
unsigned index = 0;
while (index < malloc_num_zones) {
zone = malloc_zones[index++];
if (zone->version < 7) {
continue;
}
if (NULL == zone->introspect->enumerate_discharged_pointers) {
continue;
}
zone->introspect->enumerate_discharged_pointers(zone, report_discharged);
}
} else {
if (zone->version < 7) {
return;
}
if (NULL == zone->introspect->enumerate_discharged_pointers) {
return;
}
zone->introspect->enumerate_discharged_pointers(zone, report_discharged);
}
}
#if PHASE_OUT_OLD_MALLOC
#error PHASE OUT THE FOLLOWING FUNCTIONS
#endif
void
set_malloc_singlethreaded(boolean_t single)
{
static boolean_t warned = 0;
if (!warned) {
#if PHASE_OUT_OLD_MALLOC
malloc_report(ASL_LEVEL_ERR, "*** OBSOLETE: set_malloc_singlethreaded(%d)\n", single);
#endif
warned = 1;
}
}
void
malloc_singlethreaded(void)
{
static boolean_t warned = 0;
if (!warned) {
malloc_report(ASL_LEVEL_ERR, "*** OBSOLETE: malloc_singlethreaded()\n");
warned = 1;
}
}
int
malloc_debug(int level)
{
malloc_report(ASL_LEVEL_ERR, "*** OBSOLETE: malloc_debug()\n");
return 0;
}
#pragma mark -
#pragma mark Malloc Stack Logging
void
__stack_logging_early_finished(const struct _malloc_functions *funcs)
{
#if !TARGET_OS_DRIVERKIT
_dlopen = funcs->dlopen;
_dlsym = funcs->dlsym;
#endif
const char **env = (const char**) *_NSGetEnviron();
for (const char **e = env; *e; e++) {
if (0==strncmp(*e, "MallocStackLogging", 18)) {
malloc_register_stack_logger();
void (*msl_set_flags_from_environment) (const char **env);
msl_set_flags_from_environment = _dlsym(msl.dylib, "msl_set_flags_from_environment");
if (msl_set_flags_from_environment) {
msl_set_flags_from_environment(env);
}
break;
}
}
if (msl.dylib) {
void (*initialize) () = _dlsym(msl.dylib, "msl_initialize");
if (initialize) {
initialize();
}
}
}
static os_once_t _register_msl_dylib_pred;
static void
register_msl_dylib(void *dylib)
{
if (!dylib) {
return;
}
msl.dylib = dylib;
msl.handle_memory_event = _dlsym(dylib, "msl_handle_memory_event");
msl.stack_logging_locked = _dlsym(dylib, "msl_stack_logging_locked");
msl.fork_prepare = _dlsym(dylib, "msl_fork_prepare");
msl.fork_child = _dlsym(dylib, "msl_fork_child");
msl.fork_parent = _dlsym(dylib, "msl_fork_parent");
msl.get_frames_for_address = _dlsym(dylib, "msl_get_frames_for_address");
msl.stackid_for_vm_region = _dlsym(dylib, "msl_stackid_for_vm_region");
msl.get_frames_for_stackid = _dlsym(dylib, "msl_get_frames_for_stackid");
msl.uniquing_table_read_stack = _dlsym(dylib, "msl_uniquing_table_read_stack");
void (*msl_copy_msl_lite_hooks) (struct _malloc_msl_lite_hooks_s *hooksp, size_t size);
msl_copy_msl_lite_hooks = _dlsym(dylib, "msl_copy_msl_lite_hooks");
if (msl_copy_msl_lite_hooks) {
set_msl_lite_hooks(msl_copy_msl_lite_hooks);
}
}
MALLOC_EXPORT
boolean_t
malloc_register_stack_logger(void)
{
if (msl.dylib != NULL) {
return true;
}
void *dylib = _dlopen("/System/Library/PrivateFrameworks/MallocStackLogging.framework/MallocStackLogging", RTLD_GLOBAL);
if (dylib == NULL) {
return false;
}
os_once(&_register_msl_dylib_pred, dylib, register_msl_dylib);
if (!msl.dylib) {
malloc_report(ASL_LEVEL_WARNING, "failed to load MallocStackLogging.framework\n");
}
return msl.dylib == dylib;
}
uint64_t __mach_stack_logging_shared_memory_address = 0;
#pragma mark -
#pragma mark Malloc Stack Logging - Legacy stubs
MALLOC_EXPORT
boolean_t
turn_on_stack_logging(stack_logging_mode_type mode)
{
malloc_register_stack_logger();
if (!msl.dylib) {
return false;
}
boolean_t (*msl_turn_on_stack_logging) (stack_logging_mode_type mode);
msl_turn_on_stack_logging = _dlsym(msl.dylib, "msl_turn_on_stack_logging");
if (!msl_turn_on_stack_logging) {
return false;
}
return msl_turn_on_stack_logging(mode);
}
MALLOC_EXPORT
void turn_off_stack_logging(void)
{
malloc_register_stack_logger();
if (!msl.dylib) {
return;
}
void (*msl_turn_off_stack_logging) ();
msl_turn_off_stack_logging = _dlsym(msl.dylib, "msl_turn_off_stack_logging");
if (msl_turn_off_stack_logging) {
msl_turn_off_stack_logging();
}
}
kern_return_t
__mach_stack_logging_start_reading(task_t task, vm_address_t shared_memory_address, boolean_t *uses_lite_mode)
{
malloc_register_stack_logger();
if (!msl.dylib) {
return KERN_FAILURE;
}
kern_return_t (*f) (task_t task, vm_address_t shared_memory_address, boolean_t *uses_lite_mode);
f = _dlsym(msl.dylib, "msl_start_reading");
if (!f) {
return KERN_FAILURE;
}
return f(task, shared_memory_address, uses_lite_mode);
}
kern_return_t
__mach_stack_logging_stop_reading(task_t task)
{
malloc_register_stack_logger();
if (!msl.dylib) {
return KERN_FAILURE;
}
kern_return_t (*f) (task_t task);
f = _dlsym(msl.dylib, "msl_stop_reading");
if (!f) {
return KERN_FAILURE;
}
return f(task);
}
kern_return_t
__mach_stack_logging_get_frames(task_t task,
mach_vm_address_t address,
mach_vm_address_t *stack_frames_buffer,
uint32_t max_stack_frames,
uint32_t *count)
{
malloc_register_stack_logger();
if (!msl.get_frames_for_address) {
return KERN_FAILURE;
}
return msl.get_frames_for_address(task, address, stack_frames_buffer, max_stack_frames, count);
}
uint64_t
__mach_stack_logging_stackid_for_vm_region(task_t task, mach_vm_address_t address)
{
malloc_register_stack_logger();
if (!msl.stackid_for_vm_region) {
return -1ull;
}
return msl.stackid_for_vm_region(task, address);
}
kern_return_t
__mach_stack_logging_frames_for_uniqued_stack(task_t task,
uint64_t stack_identifier,
mach_vm_address_t *stack_frames_buffer,
uint32_t max_stack_frames,
uint32_t *count)
{
malloc_register_stack_logger();
if (!msl.get_frames_for_stackid) {
return KERN_FAILURE;
}
return msl.get_frames_for_stackid(task, stack_identifier, stack_frames_buffer, max_stack_frames, count, NULL);
}
kern_return_t
__mach_stack_logging_get_frames_for_stackid(task_t task,
uint64_t stack_identifier,
mach_vm_address_t *stack_frames_buffer,
uint32_t max_stack_frames,
uint32_t *count,
bool *last_frame_is_threadid)
{
malloc_register_stack_logger();
if (!msl.get_frames_for_stackid) {
return KERN_FAILURE;
}
return msl.get_frames_for_stackid(task, stack_identifier, stack_frames_buffer, max_stack_frames, count, last_frame_is_threadid);
}
kern_return_t
__mach_stack_logging_uniquing_table_read_stack(struct backtrace_uniquing_table *uniquing_table,
uint64_t stackid,
mach_vm_address_t *out_frames_buffer,
uint32_t *out_frames_count,
uint32_t max_frames)
{
malloc_register_stack_logger();
if (!msl.uniquing_table_read_stack) {
return KERN_FAILURE;
}
return msl.uniquing_table_read_stack(uniquing_table, stackid, out_frames_buffer, out_frames_count, max_frames);
}
kern_return_t
__mach_stack_logging_enumerate_records(task_t task,
mach_vm_address_t address,
void enumerator(mach_stack_logging_record_t, void *),
void *context)
{
malloc_register_stack_logger();
kern_return_t (*f) (task_t task,
mach_vm_address_t address,
void enumerator(mach_stack_logging_record_t, void *),
void *context);
if (!msl.dylib) {
return KERN_FAILURE;
}
f = _dlsym(msl.dylib, "msl_disk_stack_logs_enumerate_from_task");
if (!f) {
return KERN_FAILURE;
}
return f(task, address, enumerator, context);
}
struct backtrace_uniquing_table *
__mach_stack_logging_copy_uniquing_table(task_t task)
{
malloc_register_stack_logger();
struct backtrace_uniquing_table * (*f) (task_t task);
if (!msl.dylib) {
return NULL;
}
f = _dlsym(msl.dylib, "msl_uniquing_table_copy_from_task");
if (!f) {
return NULL;
}
return f(task);
}
struct backtrace_uniquing_table *
__mach_stack_logging_uniquing_table_copy_from_serialized(void *buffer, size_t size)
{
malloc_register_stack_logger();
struct backtrace_uniquing_table * (*f) (void *buffer, size_t size);
if (!msl.dylib) {
return NULL;
}
f = _dlsym(msl.dylib, "msl_uniquing_table_copy_from_serialized");
if (!f) {
return NULL;
}
return f(buffer, size);
}
void
__mach_stack_logging_uniquing_table_release(struct backtrace_uniquing_table *table)
{
malloc_register_stack_logger();
if (!msl.dylib) {
return;
}
void (*f) (struct backtrace_uniquing_table *table);
f = _dlsym(msl.dylib, "msl_uniquing_table_release");
if (f) {
f(table);
}
}
void
__mach_stack_logging_uniquing_table_retain(struct backtrace_uniquing_table *table)
{
malloc_register_stack_logger();
if (!msl.dylib) {
return;
}
void (*f) (struct backtrace_uniquing_table *table);
f = _dlsym(msl.dylib, "msl_uniquing_table_retain");
if (f) {
f(table);
}
}
extern
size_t
__mach_stack_logging_uniquing_table_sizeof(struct backtrace_uniquing_table *table)
{
malloc_register_stack_logger();
size_t (*f) (struct backtrace_uniquing_table *table);
f = _dlsym(msl.dylib, "msl_uniquing_table_retain");
return f(table);
}
void *
__mach_stack_logging_uniquing_table_serialize(struct backtrace_uniquing_table *table, mach_vm_size_t *size)
{
malloc_register_stack_logger();
if (!msl.dylib) {
return NULL;
}
void * (*f) (struct backtrace_uniquing_table *table, mach_vm_size_t *size);
f = _dlsym(msl.dylib, "msl_uniquing_table_serialize");
if (!f) {
return NULL;
}
return f(table, size);
}
kern_return_t
__mach_stack_logging_set_file_path(task_t task, char* file_path)
{
return KERN_SUCCESS;
}
int stack_logging_enable_logging = 0;