#include "resolver.h"
#include "internal.h"
#if CONFIG_NANOZONE
#pragma mark -
#pragma mark Forward Declarations
#if OS_VARIANT_NOTRESOLVED
static void nanov2_statistics(nanozonev2_t *nanozone, malloc_statistics_t *stats);
#endif // OS_VARIANT_NOTRESOLVED
#pragma mark -
#pragma mark Externals for resolved functions
extern void *nanov2_allocate(nanozonev2_t *nanozone, size_t rounded_size,
boolean_t clear);
extern void nanov2_free_to_block(nanozonev2_t *nanozone, void *ptr,
nanov2_size_class_t size_class);
extern boolean_t nanov2_madvise_block(nanozonev2_t *nanozone,
nanov2_block_meta_t *block_metap, nanov2_block_t *blockp,
nanov2_size_class_t size_class);
extern size_t nanov2_pointer_size(nanozonev2_t *nanozone, void *ptr,
boolean_t allow_inner);
extern size_t nanov2_pressure_relief(nanozonev2_t *nanozone, size_t goal);
#if OS_VARIANT_RESOLVED
extern boolean_t nanov2_allocate_new_region(nanozonev2_t *nanozone);
#endif // OS_VARIANT_RESOLVED
#pragma mark -
#pragma mark Global Allocator State
typedef enum {
NANO_SCAN_FIRST_FIT = 0,
NANO_SCAN_CAPACITY_BASED,
} nanov2_block_scan_policy_t;
#define DEFAULT_SCAN_MIN_CAPACITY 20
#define DEFAULT_SCAN_MAX_CAPACITY 80
#define DEFAULT_SCAN_LIMIT 10
typedef enum {
NANO_MADVISE_IMMEDIATE = 0,
NANO_MADVISE_WARNING_PRESSURE,
NANO_MADVISE_CRITICAL_PRESSURE,
} nanov2_madvise_policy_t;
typedef struct nanov2_policy_config_s {
nanov2_block_scan_policy_t block_scan_policy;
int block_scan_min_capacity;
int block_scan_max_capacity;
int block_scan_limit;
uint16_t single_arena_size_classes;
nanov2_madvise_policy_t madvise_policy;
} nanov2_policy_config_t;
#if OS_VARIANT_NOTRESOLVED
nanov2_madvise_policy_t nanov2_madvise_policy;
nanov2_policy_config_t nanov2_policy_config = {
.block_scan_policy = NANO_SCAN_CAPACITY_BASED,
.block_scan_min_capacity = DEFAULT_SCAN_MIN_CAPACITY,
.block_scan_max_capacity = DEFAULT_SCAN_MAX_CAPACITY,
.block_scan_limit = DEFAULT_SCAN_LIMIT,
.single_arena_size_classes = 0,
.madvise_policy = NANO_MADVISE_IMMEDIATE,
};
#else // OS_VARIANT_NOTRESOLVED
extern nanov2_policy_config_t nanov2_policy_config;
extern nanov2_madvise_policy_t nanov2_madvise_policy;
#endif // OS_VARIANT_NOTRESOLVED
#define BLOCKS_PER_UNIT_SHIFT 6
#define BLOCKS_PER_UNIT (1 << BLOCKS_PER_UNIT_SHIFT)
#if OS_VARIANT_NOTRESOLVED
int block_units_by_size_class[] = {
2, 10, 11, 10, 5, 3, 3, 4, 3, 2, 2, 2, 2, 2, 1, 2, };
MALLOC_STATIC_ASSERT(
sizeof(block_units_by_size_class)/sizeof(block_units_by_size_class[0])
== NANO_SIZE_CLASSES,
"Size of block_units_by_size_class is incorrect");
#define TOTAL_BLOCK_UNITS (NANOV2_BLOCKS_PER_ARENA/BLOCKS_PER_UNIT)
int first_block_offset_by_size_class[NANO_SIZE_CLASSES];
int last_block_offset_by_size_class[NANO_SIZE_CLASSES];
int ptr_offset_to_size_class[TOTAL_BLOCK_UNITS];
const int slots_by_size_class[] = {
NANOV2_BLOCK_SIZE/(1 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(2 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(3 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(4 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(5 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(6 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(7 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(8 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(9 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(10 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(11 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(12 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(13 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(14 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(15 * NANO_REGIME_QUANTA_SIZE), NANOV2_BLOCK_SIZE/(16 * NANO_REGIME_QUANTA_SIZE), };
#else // OS_VARIANT_NOTRESOLVED
extern int block_units_by_size_class[];
extern int ptr_offset_to_size_class[];
extern int first_block_offset_by_size_class[];
extern int last_block_offset_by_size_class[];
extern const int slots_by_size_class[];
#endif // OS_VARIANT_NOTRESOLVED
#pragma mark -
#pragma mark Conversion and Mapping Inlines
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_meta_index_t
nanov2_block_index_to_meta_index(nanov2_block_index_t block_index)
{
return ((block_index >> 6) | (block_index << 6)) & 0xFFF;
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_index_t
nanov2_meta_index_to_block_index(nanov2_meta_index_t block_meta_index)
{
return ((block_meta_index >> 6) | (block_meta_index << 6)) & 0xFFF;
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_meta_index_t
nanov2_metablock_meta_index(nanozonev2_t *nanozone)
{
return nanov2_block_index_to_meta_index((nanov2_block_index_t)
nanozone->aslr_cookie);
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE boolean_t
nanov2_is_block_active(nanov2_block_meta_t block_meta)
{
return block_meta.next_slot != SLOT_NULL
&& block_meta.next_slot != SLOT_MADVISING
&& block_meta.next_slot != SLOT_MADVISED;
}
#if OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE boolean_t
nanov2_can_allocate_from_block(nanov2_block_meta_t block_meta)
{
return block_meta.in_use && block_meta.next_slot != SLOT_FULL;
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE boolean_t
nanov2_has_valid_signature(void *ptr)
{
return (((uintptr_t)ptr) >> SHIFT_NANO_SIGNATURE) == NANOZONE_SIGNATURE;
}
#endif // OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE void *
nanov2_logical_address_to_ptr(nanozonev2_t *nanozone, void *laddr)
{
return (void *)(((uintptr_t)laddr) ^ nanozone->aslr_cookie_aligned);
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE int
nanov2_size_from_size_class(nanov2_size_class_t size_class)
{
return (size_class + 1) * NANO_REGIME_QUANTA_SIZE;
}
#if OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_size_class_t
nanov2_size_class_from_size(size_t size)
{
return (nanov2_size_class_t)howmany(size, NANO_REGIME_QUANTA_SIZE) - 1;
}
#endif // OS_VARIANT_RESOLVED
#if OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_t *
nanov2_block_address_for_ptr(void *ptr)
{
return (void *)(((uintptr_t)ptr) & NANOV2_BLOCK_ADDRESS_MASK);
}
#endif // OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_arena_t *
nanov2_arena_address_for_ptr(void *ptr)
{
return (void *)(((uintptr_t)ptr) & NANOV2_ARENA_ADDRESS_MASK);
}
#if OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_region_t *
nanov2_region_address_for_ptr(void *ptr)
{
return (nanov2_region_t *)(((uintptr_t)ptr) & NANOV2_REGION_ADDRESS_MASK);
}
#endif // OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_arena_metablock_t *
nanov2_metablock_address_for_ptr(nanozonev2_t *nanozone, void *ptr)
{
return (nanov2_arena_metablock_t *)nanov2_logical_address_to_ptr(nanozone,
nanov2_arena_address_for_ptr(ptr));
}
#if OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_t *
nanov2_block_address_from_meta_ptr(nanozonev2_t *nanozone,
nanov2_block_meta_t *block_metap)
{
nanov2_block_t *meta_block = nanov2_block_address_for_ptr(block_metap);
nanov2_arena_t *arena = nanov2_arena_address_for_ptr(block_metap);
nanov2_meta_index_t meta_index =
(nanov2_meta_index_t)(block_metap - (nanov2_block_meta_t *)meta_block);
nanov2_block_index_t block_index = nanov2_meta_index_to_block_index(meta_index);
return &arena->blocks[block_index];
}
#endif // OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_t *
nanov2_block_address_from_meta_index(nanozonev2_t *nanozone,
nanov2_arena_t *arena, nanov2_meta_index_t meta_index)
{
nanov2_block_index_t block_index = nanov2_meta_index_to_block_index(meta_index);
return &arena->blocks[block_index];
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_index_t
nanov2_block_index_for_ptr(void *ptr)
{
return (nanov2_block_index_t)(((uintptr_t)ptr) >> NANOV2_OFFSET_BITS)
& ((1 << NANOV2_BLOCK_BITS) - 1);
}
#if OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_meta_t *
nanov2_meta_ptr_for_ptr(nanozonev2_t *nanozone, void *ptr)
{
nanov2_arena_metablock_t *meta_block = nanov2_metablock_address_for_ptr(
nanozone, ptr);
nanov2_block_index_t block_index = nanov2_block_index_for_ptr(ptr);
nanov2_meta_index_t meta_index = nanov2_block_index_to_meta_index(block_index);
return &meta_block->arena_block_meta[meta_index];
}
#endif // OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_arena_t *
nanov2_first_arena_for_region(nanov2_region_t *region)
{
return (nanov2_arena_t *)region;
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_arena_t *
nanov2_limit_arena_for_region(nanozonev2_t *nanozone, nanov2_region_t *region)
{
nanov2_arena_t *limit_arena;
if (region == nanozone->current_region_base) {
limit_arena = nanozone->current_region_next_arena;
} else {
limit_arena = nanov2_first_arena_for_region(region + 1);
}
return limit_arena;
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_region_linkage_t *
nanov2_region_linkage_for_region(nanozonev2_t *nanozone, nanov2_region_t *region)
{
nanov2_arena_metablock_t *first_metadata_block =
nanov2_metablock_address_for_ptr(nanozone, region);
return (nanov2_region_linkage_t *)&first_metadata_block->arena_block_meta[
nanov2_metablock_meta_index(nanozone)];
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_region_t *
nanov2_next_region_for_region(nanozonev2_t *nanozone, nanov2_region_t *region)
{
nanov2_region_linkage_t *linkage =
nanov2_region_linkage_for_region(nanozone, region);
int offset = linkage->next_region_offset;
return offset ? region + offset : NULL;
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE void *
nanov2_slot_in_block_ptr(nanov2_block_t *block, nanov2_size_class_t size_class,
int slot_index)
{
return (void *)((uintptr_t)block +
nanov2_size_from_size_class(size_class) * slot_index);
}
#if OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE int
nanov2_slot_index_in_block(nanov2_block_t *block, nanov2_size_class_t size_class,
void *ptr)
{
return (int)((uintptr_t)ptr - (uintptr_t)block)/
(nanov2_size_from_size_class(size_class));
}
#endif // OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_size_class_t
nanov2_size_class_for_ptr(nanozonev2_t *nanozone, void *ptr)
{
nanov2_block_index_t block =
(int)(nanov2_block_index_for_ptr(ptr) ^ nanozone->aslr_cookie);
return ptr_offset_to_size_class[block >> BLOCKS_PER_UNIT_SHIFT];
}
#if OS_VARIANT_NOTRESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_size_class_t
nanov2_size_class_for_meta_index(nanozonev2_t *nanozone, nanov2_meta_index_t meta_index)
{
nanov2_block_index_t block_index = nanov2_meta_index_to_block_index(meta_index);
int logical_block_index = (int)(block_index ^ nanozone->aslr_cookie);
return ptr_offset_to_size_class[logical_block_index >> BLOCKS_PER_UNIT_SHIFT];
}
#endif // OS_VARIANT_NOTRESOLVED
#if OS_VARIANT_RESOLVED
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_meta_t *
nanov2_first_block_for_size_class_in_arena(nanozonev2_t *nanozone,
nanov2_size_class_t size_class, nanov2_arena_t *arena)
{
int block_offset = first_block_offset_by_size_class[size_class];
nanov2_arena_metablock_t *meta_blockp =
nanov2_metablock_address_for_ptr(nanozone, arena);
nanov2_block_index_t block_index =
(nanov2_block_index_t)(block_offset ^ nanozone->aslr_cookie);
nanov2_meta_index_t meta_index = nanov2_block_index_to_meta_index(block_index);
return &meta_blockp->arena_block_meta[meta_index];
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_meta_t *
nanov2_next_block_for_size_class(nanozonev2_t *nanozone,
nanov2_size_class_t size_class, nanov2_block_meta_t *meta_blockp,
boolean_t *wrapped)
{
nanov2_block_meta_t *base_meta_blockp =
(nanov2_block_meta_t *)(((uintptr_t)meta_blockp) & (NANOV2_BLOCK_ADDRESS_MASK));
nanov2_meta_index_t meta_index = (int)(meta_blockp - base_meta_blockp);
nanov2_block_index_t block_index = nanov2_meta_index_to_block_index(meta_index);
block_index ^= nanozone->aslr_cookie; int last_offset = last_block_offset_by_size_class[size_class];
if (wrapped) *wrapped = block_index == last_offset;
block_index = block_index == last_offset ?
first_block_offset_by_size_class[size_class] : block_index + 1;
block_index = (nanov2_block_index_t)(block_index ^ nanozone->aslr_cookie);
meta_index = nanov2_block_index_to_meta_index(block_index);
return &base_meta_blockp[meta_index];
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_meta_t *
nanov2_previous_block_for_size_class(nanozonev2_t *nanozone,
nanov2_size_class_t size_class, nanov2_block_meta_t *meta_blockp,
boolean_t *wrapped)
{
nanov2_block_meta_t *base_meta_blockp =
(nanov2_block_meta_t *)(((uintptr_t)meta_blockp) & (NANOV2_BLOCK_ADDRESS_MASK));
nanov2_meta_index_t meta_index = (int)(meta_blockp - base_meta_blockp);
nanov2_block_index_t block_index = nanov2_meta_index_to_block_index(meta_index);
block_index ^= nanozone->aslr_cookie; int first_offset = first_block_offset_by_size_class[size_class];
if (wrapped) *wrapped = block_index == first_offset;
block_index = block_index == first_offset ?
last_block_offset_by_size_class[size_class] : block_index - 1;
block_index = (nanov2_block_index_t)(block_index ^ nanozone->aslr_cookie);
meta_index = nanov2_block_index_to_meta_index(block_index);
return &base_meta_blockp[meta_index];
}
static MALLOC_ALWAYS_INLINE MALLOC_INLINE void
nanov2_turn_off_in_use(nanov2_block_meta_t *block_metap)
{
static nanov2_block_meta_t mask = {
.in_use = 0,
.next_slot = ~0,
.free_count = ~0,
.gen_count = ~0,
};
os_atomic_and((uint32_t *)block_metap, *(uint32_t *)&mask, relaxed);
}
#pragma mark -
#pragma mark Policy Functions
static MALLOC_ALWAYS_INLINE MALLOC_INLINE int
nanov2_get_allocation_block_index(void)
{
#if CONFIG_NANO_USES_HYPER_SHIFT
if (os_likely(nano_common_max_magazines_is_ncpu)) {
return _os_cpu_number() >> hyper_shift;
}
#else // CONFIG_NANO_USES_HYPER_SHIFT
if (os_likely(nano_common_max_magazines_is_ncpu)) {
return _os_cpu_number();
}
#endif // CONFIG_NANO_USES_HYPER_SHIFT
unsigned int shift = 0;
#if CONFIG_NANO_USES_HYPER_SHIFT
shift = hyper_shift;
#endif // CONFIG_NANO_USES_HYPER_SHIFT
if (os_likely(_os_cpu_number_override == -1)) {
return (_os_cpu_number() >> shift) % nano_common_max_magazines;
}
return (_os_cpu_number_override >> shift) % nano_common_max_magazines;
}
#endif // OS_VARIANT_RESOLVED
#pragma mark -
#pragma mark Allocator Initialization
#if OS_VARIANT_NOTRESOLVED
static const char madvise_policy_env[] = "MallocNanoMadvisePolicy";
static const char madvise_policy_bootarg[] = "nanov2_madvise_policy";
static const char madvise_immediate[] = "immediate";
static const char madvise_warning[] = "warning";
static const char madvise_critical[] = "critical";
static const char single_arena_env[] = "MallocNanoSingleArena";
static const char single_arena_bootarg[] = "nanov2_single_arena";
static const char scan_policy_env[] = "MallocNanoScanPolicy";
static const char scan_policy_bootarg[] = "nanov2_scan_policy";
static const char size_class_blocks_env[] = "MallocNanoSizeClassBlocks";
static const char size_class_blocks_bootarg[] = "nanov2_size_class_blocks";
static void
nanov2_set_madvise_policy(const char *name, const char *ptr)
{
nanov2_madvise_policy_t madvise_policy = NANO_MADVISE_IMMEDIATE;
if (ptr) {
if (!strncmp(ptr, madvise_immediate, sizeof(madvise_immediate) - 1)) {
madvise_policy = NANO_MADVISE_IMMEDIATE;
} else if (!strncmp(ptr, madvise_warning, sizeof(madvise_warning) - 1)) {
madvise_policy = NANO_MADVISE_WARNING_PRESSURE;
} else if (!strncmp(ptr, madvise_critical, sizeof(madvise_critical) - 1)) {
madvise_policy = NANO_MADVISE_CRITICAL_PRESSURE;
} else {
malloc_report(ASL_LEVEL_ERR,
"%s value (%s) invalid - ignored.\n", name, ptr);
}
}
nanov2_madvise_policy = madvise_policy;
}
static void
nanov2_set_single_arena_size_classes(const char *name, const char *ptr)
{
uint16_t single_arena_size_classes = 0;
if (ptr) {
const char *value = ptr;
const char *endp;
boolean_t failed = FALSE;
while (*ptr) {
long size = malloc_common_convert_to_long(ptr, &endp);
if (endp != ptr) {
if (*endp && *endp != ':') {
failed = TRUE;
break;
}
if (size > NANO_MAX_SIZE || size < NANO_REGIME_QUANTA_SIZE ||
(size % NANO_REGIME_QUANTA_SIZE) != 0) {
failed = TRUE;
break;
}
single_arena_size_classes |=
1 << ((size/NANO_REGIME_QUANTA_SIZE) - 1);
} else {
failed = true;
break;
}
if (!*endp) {
break;
}
ptr = endp + 1;
}
if (failed) {
malloc_report(ASL_LEVEL_ERR,
"%s value (%s) invalid - ignored.\n", name, value);
single_arena_size_classes = 0;
}
}
nanov2_policy_config.single_arena_size_classes = single_arena_size_classes;
}
static void
nanov2_set_block_scan_policy(const char *name, const char *ptr)
{
static char first_fit_key[] = "firstfit";
static char min_key[] = "min";
static char max_key[] = "max";
static char lim_key[] = "lim";
nanov2_block_scan_policy_t block_scan_policy = NANO_SCAN_CAPACITY_BASED;
int scan_min_capacity = DEFAULT_SCAN_MIN_CAPACITY;
int scan_max_capacity = DEFAULT_SCAN_MAX_CAPACITY;
int scan_limit = DEFAULT_SCAN_LIMIT;
const char *endp;
boolean_t failed = FALSE;
boolean_t min_found = FALSE;
boolean_t max_found = FALSE;
boolean_t lim_found = FALSE;
const char *value = ptr;
if (ptr) {
if (!strcmp(ptr, first_fit_key)) {
block_scan_policy = NANO_SCAN_FIRST_FIT;
} else {
while (!failed && ptr && *ptr) {
if (!strncmp(ptr, min_key, sizeof(min_key) - 1) && !min_found) {
min_found = TRUE;
ptr += sizeof(min_key) - 1;
long value = malloc_common_convert_to_long(ptr, &endp);
if (ptr != endp && value >= 0 && value <= 100) {
scan_min_capacity = (int)value;
ptr = endp;
} else {
failed = TRUE;
}
} else if (!strncmp(ptr, max_key, sizeof(max_key) - 1)
&& !max_found) {
max_found = TRUE;
ptr += sizeof(max_key) - 1;
long value = malloc_common_convert_to_long(ptr, &endp);
if (ptr != endp && value >= 0 && value <= 100) {
scan_max_capacity = (int)value;
ptr = endp;
} else {
failed = TRUE;
}
} else if (!strncmp(ptr, lim_key, sizeof(lim_key) - 1)
&& !lim_found) {
lim_found = TRUE;
ptr += sizeof(lim_key) - 1;
long value = malloc_common_convert_to_long(ptr, &endp);
if (ptr != endp && value >= 0) {
scan_limit = (int)value;
ptr = endp;
} else {
failed = TRUE;
}
} else {
failed = TRUE;
}
if (*ptr) {
if (*ptr == ':') {
ptr++;
} else {
failed = TRUE;
}
}
}
if (!failed && scan_min_capacity > scan_max_capacity) {
failed = TRUE;
}
}
}
if (!failed) {
nanov2_policy_config.block_scan_policy = block_scan_policy;
nanov2_policy_config.block_scan_min_capacity = scan_min_capacity;
nanov2_policy_config.block_scan_max_capacity = scan_max_capacity;
nanov2_policy_config.block_scan_limit = scan_limit;
} else {
malloc_report(ASL_LEVEL_ERR, "%s value (%s) invalid - ignored.\n",
name, value);
}
}
static void
nanov2_set_blocks_by_size_class(const char *name, const char *ptr)
{
int new_total_block_units = 0;
int new_blocks_by_size_class[NANO_SIZE_CLASSES];
MALLOC_STATIC_ASSERT(
sizeof(new_blocks_by_size_class) == sizeof(block_units_by_size_class),
"Size mismatch in nanov2_set_blocks_by_size_class()");
const char *endp;
const char *sptr = ptr;
for (int i = 0; i < NANO_SIZE_CLASSES; i++) {
int count = (int)malloc_common_convert_to_long(ptr, &endp);
char separator = i == NANO_SIZE_CLASSES - 1 ? '\0' : ',';
if (endp == ptr || *endp != separator || count > TOTAL_BLOCK_UNITS) {
malloc_report(ASL_LEVEL_ERR,
"%s value invalid: [%s] - ignored.\n", name, sptr);
return;
}
new_blocks_by_size_class[i] = count;
new_total_block_units += count;
ptr = endp + 1;
}
if (new_total_block_units != TOTAL_BLOCK_UNITS) {
malloc_report(ASL_LEVEL_ERR,
"%s value invalid - values must sum to %d, not %d - ignored.\n",
name, TOTAL_BLOCK_UNITS, new_total_block_units);
} else {
memcpy(block_units_by_size_class, new_blocks_by_size_class,
sizeof(block_units_by_size_class));
}
}
void
nanov2_init(const char *envp[], const char *apple[], const char *bootargs)
{
char value_buf[256];
const char *value = _simple_getenv(envp, madvise_policy_env);
const char *name = madvise_policy_env;
if (!value) {
value = malloc_common_value_for_key(bootargs, madvise_policy_bootarg);
if (value) {
name = madvise_policy_bootarg;
}
}
nanov2_set_madvise_policy(name, value);
name = single_arena_env;
value = _simple_getenv(envp, single_arena_env);
if (!value) {
value = malloc_common_value_for_key_copy(bootargs, single_arena_bootarg,
value_buf, sizeof(value_buf));
if (value) {
name = single_arena_bootarg;
}
}
nanov2_set_single_arena_size_classes(name, value);
name = scan_policy_env;
value = _simple_getenv(envp, scan_policy_env);
if (!value) {
value = malloc_common_value_for_key_copy(bootargs, scan_policy_bootarg,
value_buf, sizeof(value_buf));
if (value) {
name = scan_policy_bootarg;
}
}
nanov2_set_block_scan_policy(name, value);
name = size_class_blocks_env;
value = _simple_getenv(envp, size_class_blocks_env);
if (!value) {
value = malloc_common_value_for_key_copy(bootargs, size_class_blocks_bootarg,
value_buf, sizeof(value_buf));
if (value) {
name = size_class_blocks_bootarg;
}
}
if (value) {
nanov2_set_blocks_by_size_class(name, value);
}
}
static void
nanov2_configure_once(void *context MALLOC_UNUSED)
{
int total_blocks = 0;
for (int i = 0; i < NANO_SIZE_CLASSES; i++) {
total_blocks += block_units_by_size_class[i] * BLOCKS_PER_UNIT;
}
MALLOC_ASSERT(total_blocks == NANOV2_BLOCKS_PER_ARENA);
int next_offset = 1;
first_block_offset_by_size_class[0] = next_offset;
next_offset = block_units_by_size_class[0] * BLOCKS_PER_UNIT;
last_block_offset_by_size_class[0] = next_offset - 1;
for (int i = 1; i < NANO_SIZE_CLASSES; i++) {
first_block_offset_by_size_class[i] = next_offset;
next_offset += block_units_by_size_class[i] * BLOCKS_PER_UNIT;
last_block_offset_by_size_class[i] = next_offset - 1;
}
MALLOC_ASSERT(next_offset == NANOV2_BLOCKS_PER_ARENA);
int next_index = 0;
for (int i = 0; i < NANO_SIZE_CLASSES; i++) {
int block_units = block_units_by_size_class[i];
for (int j = 0; j < block_units; j++) {
ptr_offset_to_size_class[next_index++] = i;
}
}
MALLOC_ASSERT(next_index == NANOV2_BLOCKS_PER_ARENA/BLOCKS_PER_UNIT);
}
static os_once_t nanov2_config_predicate;
void
nanov2_configure(void)
{
os_once(&nanov2_config_predicate, NULL, nanov2_configure_once);
}
#endif // OS_VARIANT_NOTRESOLVED
#pragma mark -
#pragma mark Zone Functions
#if OS_VARIANT_RESOLVED
size_t
nanov2_size(nanozonev2_t *nanozone, const void *ptr)
{
size_t size = nanov2_pointer_size(nanozone, (void *)ptr, FALSE);
return size ? size : nanozone->helper_zone->size(nanozone->helper_zone, ptr);
}
void *
nanov2_malloc(nanozonev2_t *nanozone, size_t size)
{
size_t rounded_size = _nano_common_good_size(size);
if (rounded_size <= NANO_MAX_SIZE) {
void *ptr = nanov2_allocate(nanozone, rounded_size, FALSE);
if (ptr) {
if (os_unlikely(size && (nanozone->debug_flags & MALLOC_DO_SCRIBBLE))) {
memset(ptr, SCRIBBLE_BYTE, size);
}
return ptr;
}
}
return nanozone->helper_zone->malloc(nanozone->helper_zone, size);
}
void
nanov2_free_definite_size(nanozonev2_t *nanozone, void *ptr, size_t size)
{
if (ptr && nanov2_has_valid_signature(ptr)) {
if (os_unlikely(nanozone->debug_flags & MALLOC_DO_SCRIBBLE)) {
memset(ptr, SCRABBLE_BYTE, size);
}
nanov2_free_to_block(nanozone, ptr, nanov2_size_class_from_size(size));
return;
}
return nanozone->helper_zone->free_definite_size(nanozone->helper_zone, ptr,
size);
}
void
nanov2_free(nanozonev2_t *nanozone, void *ptr)
{
if (ptr && nanov2_has_valid_signature(ptr)) {
size_t size = nanov2_pointer_size(nanozone, ptr, FALSE);
if (size) {
if (os_unlikely(nanozone->debug_flags & MALLOC_DO_SCRIBBLE)) {
memset(ptr, SCRABBLE_BYTE, size);
}
nanov2_free_to_block(nanozone, ptr, nanov2_size_class_from_size(size));
return;
}
}
return nanozone->helper_zone->free(nanozone->helper_zone, ptr);
}
void *
nanov2_calloc(nanozonev2_t *nanozone, size_t num_items, size_t size)
{
size_t total_bytes;
if (calloc_get_size(num_items, size, 0, &total_bytes)) {
return NULL;
}
size_t rounded_size = _nano_common_good_size(total_bytes);
if (total_bytes <= NANO_MAX_SIZE) {
void *ptr = nanov2_allocate(nanozone, rounded_size, TRUE);
if (ptr) {
return ptr;
}
}
return nanozone->helper_zone->calloc(nanozone->helper_zone, 1, total_bytes);
}
#endif // OS_VARIANT_RESOLVED
#if OS_VARIANT_NOTRESOLVED
static void *
nanov2_valloc(nanozonev2_t *nanozone, size_t size)
{
return nanozone->helper_zone->valloc(nanozone->helper_zone, size);
}
#endif // OS_VARIANT_NOTRESOLVED
#if OS_VARIANT_RESOLVED
void *
nanov2_realloc(nanozonev2_t *nanozone, void *ptr, size_t new_size)
{
if (ptr == NULL) {
return nanov2_malloc(nanozone, new_size);
}
size_t old_size = nanov2_pointer_size(nanozone, ptr, FALSE);
if (!old_size) {
return nanozone->helper_zone->realloc(nanozone->helper_zone, ptr, new_size);
}
void *new_ptr;
if (new_size > NANO_MAX_SIZE) {
new_ptr = nanozone->helper_zone->malloc(nanozone->helper_zone, new_size);
if (!new_ptr) {
return NULL;
}
} else if (!new_size) {
nanov2_free(nanozone, ptr);
return nanov2_malloc(nanozone, 0);
} else {
size_t new_good_size = _nano_common_good_size(new_size);
if (new_good_size > old_size || new_good_size <= old_size/2) {
new_ptr = nanov2_malloc(nanozone, new_good_size);
if (!new_ptr) {
return NULL;
}
} else {
if (new_size != old_size) {
MALLOC_ASSERT(new_size < old_size);
if (os_unlikely(nanozone->debug_flags & MALLOC_DO_SCRIBBLE)) {
memset(ptr + new_size, SCRABBLE_BYTE, old_size - new_size);
}
}
return ptr;
}
}
MALLOC_ASSERT(new_ptr);
memcpy(new_ptr, ptr, MIN(old_size, new_size));
nanov2_free(nanozone, ptr);
return new_ptr;
}
#endif // OS_VARIANT_RESOLVED
#if OS_VARIANT_NOTRESOLVED
static void
nanov2_destroy(nanozonev2_t *nanozone)
{
nanozone->helper_zone->destroy(nanozone->helper_zone);
nano_common_deallocate_pages((void *)nanozone, NANOZONEV2_ZONE_PAGED_SIZE,
nanozone->debug_flags);
}
#endif // OS_VARIANT_NOTRESOLVED
#if OS_VARIANT_RESOLVED
boolean_t
nanov2_claimed_address(nanozonev2_t *nanozone, void *ptr)
{
return nanov2_pointer_size(nanozone, ptr, TRUE) != 0;
}
unsigned
nanov2_batch_malloc(nanozonev2_t *nanozone, size_t size, void **results,
unsigned count)
{
unsigned allocated = 0;
size_t rounded_size = _nano_common_good_size(size);
if (rounded_size <= NANO_MAX_SIZE) {
while (allocated < count) {
void *ptr = nanov2_allocate(nanozone, rounded_size, FALSE);
if (!ptr) {
break;
}
*results++ = ptr;
allocated++;
}
if (allocated == count) {
return allocated;
}
}
return allocated + nanozone->helper_zone->batch_malloc(
nanozone->helper_zone, size, results, count - allocated);
}
void
nanov2_batch_free(nanozonev2_t *nanozone, void **to_be_freed, unsigned count)
{
if (count) {
while (count--) {
void *ptr = to_be_freed[count];
if (ptr) {
nanov2_free(nanozone, ptr);
}
}
}
}
#endif // OS_VARIANT_RESOLVED
#if OS_VARIANT_NOTRESOLVED
static void *
nanov2_memalign(nanozonev2_t *nanozone, size_t alignment, size_t size)
{
return nanozone->helper_zone->memalign(nanozone->helper_zone, alignment,
size);
}
#endif // OS_VARIANT_NOTRESOLVED
#if OS_VARIANT_RESOLVED
size_t
nanov2_pressure_relief(nanozonev2_t *nanozone, size_t goal)
{
if (nanov2_madvise_policy != NANO_MADVISE_WARNING_PRESSURE
&& nanov2_madvise_policy != NANO_MADVISE_CRITICAL_PRESSURE) {
return 0;
}
const char *name = nanozone->basic_zone.zone_name;
MAGMALLOC_PRESSURERELIEFBEGIN((void *)nanozone, name, (int)goal);
MALLOC_TRACE(TRACE_nano_memory_pressure | DBG_FUNC_START,
(uint64_t)nanozone, goal, 0, 0);
size_t total = 0;
nanov2_region_t *region = nanozone->first_region_base;
nanov2_meta_index_t metablock_meta_index = nanov2_metablock_meta_index(nanozone);
while (region) {
nanov2_arena_t *arena = nanov2_first_arena_for_region(region);
nanov2_arena_t *arena_after_region = nanov2_limit_arena_for_region(nanozone, region);
while (arena < arena_after_region) {
nanov2_arena_metablock_t *meta_blockp =
nanov2_metablock_address_for_ptr(nanozone, arena);
nanov2_block_meta_t *block_metap = &meta_blockp->arena_block_meta[0];
_malloc_lock_lock(&nanozone->madvise_lock);
for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA;
i++, block_metap++) {
if (i != metablock_meta_index) {
nanov2_block_meta_t meta = os_atomic_load(block_metap, relaxed);
if (meta.next_slot == SLOT_CAN_MADVISE) {
nanov2_block_t *blockp = nanov2_block_address_from_meta_index(
nanozone, arena, i);
if (nanov2_madvise_block(nanozone, block_metap,
blockp, nanov2_size_class_for_ptr(nanozone, blockp))) {
total += NANOV2_BLOCK_SIZE;
}
}
}
}
_malloc_lock_unlock(&nanozone->madvise_lock);
if (goal && total >= goal) {
goto done;
}
arena++;
}
region = nanov2_next_region_for_region(nanozone, region);
}
done:
MAGMALLOC_PRESSURERELIEFEND((void *)nanozone, name, (int)goal, (int)total);
MALLOC_TRACE(TRACE_nano_memory_pressure | DBG_FUNC_END,
(uint64_t)nanozone, goal, total, 0);
return total;
}
#endif // OS_VARIANT_RESOLVED
#pragma mark -
#pragma mark Zone Introspection
#if OS_VARIANT_NOTRESOLVED
#define NANOV2_ZONE_PTR_TO_MAPPED_PTR(type, zone_ptr, offset) \
(type)((mach_vm_address_t)zone_ptr - (mach_vm_offset_t)offset)
#define NANOV2_MAPPED_PTR_TO_ZONE_PTR(type, mapped_ptr, offset) \
(type)((mach_vm_address_t)mapped_ptr + (mach_vm_offset_t)offset)
static kern_return_t
nanov2_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask,
vm_address_t zone_address, memory_reader_t reader,
vm_range_recorder_t recorder)
{
nanov2_configure();
if (!(type_mask & (MALLOC_PTR_IN_USE_RANGE_TYPE|MALLOC_PTR_REGION_RANGE_TYPE))) {
return 0;
}
nanozonev2_t *nanozone;
nanozonev2_t zone_copy;
kern_return_t kr;
bitarray_t slots;
if (!reader) {
reader = nano_common_default_reader;
}
kr = reader(task, zone_address, sizeof(nanozonev2_t), (void **)&nanozone);
if (kr) {
return kr;
}
boolean_t self_zone = (nanozonev2_t *)zone_address == nanozone;
memcpy(&zone_copy, nanozone, sizeof(zone_copy));
nanozone = &zone_copy;
nanov2_meta_index_t metablock_meta_index = nanov2_metablock_meta_index(nanozone);
nanov2_region_t *region = nanozone->first_region_base;
while (region) {
mach_vm_address_t vm_addr = (mach_vm_address_t)NULL;
kern_return_t kr = reader(task, (vm_address_t)region, NANOV2_REGION_SIZE, (void **)&vm_addr);
if (kr) {
return kr;
}
mach_vm_offset_t ptr_offset = (mach_vm_address_t)region - vm_addr;
nanov2_arena_t *arena = nanov2_first_arena_for_region(region);
nanov2_arena_t *limit_arena = nanov2_limit_arena_for_region(nanozone, region);
vm_range_t ptr_range;
while (arena < limit_arena) {
nanov2_arena_metablock_t *arena_meta_blockp =
NANOV2_ZONE_PTR_TO_MAPPED_PTR(nanov2_arena_metablock_t *,
nanov2_metablock_address_for_ptr(nanozone, arena),
ptr_offset);
nanov2_block_meta_t *block_metap = &arena_meta_blockp->arena_block_meta[0];
for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA; i++, block_metap++) {
if (i == metablock_meta_index) {
continue;
}
nanov2_block_meta_t meta = os_atomic_load(block_metap, relaxed);
if (!nanov2_is_block_active(meta)) {
continue;
}
nanov2_block_t *blockp = nanov2_block_address_from_meta_index(
nanozone, arena, i);
if (type_mask & MALLOC_PTR_REGION_RANGE_TYPE) {
ptr_range.address = (vm_address_t)blockp;
ptr_range.size = NANOV2_BLOCK_SIZE;
recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1);
}
if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
nanov2_size_class_t size_class = nanov2_size_class_for_ptr(
nanozone, blockp);
int slot_size = nanov2_size_from_size_class(size_class);
int slot_count = slots_by_size_class[size_class];
vm_range_t ranges[NANOV2_MAX_SLOTS_PER_BLOCK];
int range_count = 0;
if (meta.next_slot == SLOT_BUMP || meta.next_slot == SLOT_FULL) {
range_count = meta.next_slot == SLOT_BUMP ?
slot_count - meta.free_count - 1 : slot_count;
for (int i = 0; i < range_count; i++) {
ranges[i].address = (vm_address_t)nanov2_slot_in_block_ptr(blockp, size_class, i);
ranges[i].size = slot_size;
}
} else {
int log_size = 64 - __builtin_clzl(slot_count);
if (self_zone) {
slots = nanozone->helper_zone->calloc(nanozone->helper_zone,
1, bitarray_size(log_size));
} else {
slots = bitarray_create(log_size);
}
for (int i = 0; i < slot_count; i++) {
bitarray_set(slots, log_size, i);
}
int next_slot = meta.next_slot;
int free_list_count = 0;
while (next_slot != SLOT_BUMP) {
next_slot--; if (next_slot < 0 || next_slot >= slot_count ||
!bitarray_get(slots, log_size, next_slot)) {
break;
}
bitarray_zap(slots, log_size, next_slot);
void *ptr = nanov2_slot_in_block_ptr(blockp, size_class, next_slot);
nanov2_free_slot_t *slotp = NANOV2_ZONE_PTR_TO_MAPPED_PTR(nanov2_free_slot_t *, ptr, ptr_offset);
next_slot = slotp->next_slot;
free_list_count++;
}
int block_free_count = meta.free_count + 1; int in_use_count = slot_count - block_free_count;
int slots_used_count = in_use_count + free_list_count;
index_t index;
while (bitarray_zap_first_set(slots, log_size, &index)) {
if (index >= slots_used_count) {
break;
}
ranges[range_count].address = (vm_address_t)nanov2_slot_in_block_ptr(blockp, size_class, index);
ranges[range_count].size = slot_size;
range_count++;
}
free(slots);
}
if (range_count) {
recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, ranges, range_count);
}
}
}
arena++;
}
nanov2_region_linkage_t *region_linkagep =
nanov2_region_linkage_for_region(nanozone, region);
nanov2_region_linkage_t *mapped_region_linkagep =
NANOV2_ZONE_PTR_TO_MAPPED_PTR(nanov2_region_linkage_t *,
region_linkagep, ptr_offset);
int offset = mapped_region_linkagep->next_region_offset;
region = offset ? region + offset : NULL;
}
return 0;
}
static size_t
nanov2_good_size(nanozonev2_t *nanozone, size_t size)
{
if (size <= NANO_MAX_SIZE) {
return _nano_common_good_size(size);
}
return nanozone->helper_zone->introspect->good_size(nanozone->helper_zone,
size);
}
static boolean_t
nanov2_check(nanozonev2_t *nanozone)
{
return 1;
}
static void
nanov2_print(nanozonev2_t *nanozone, boolean_t verbose)
{
malloc_statistics_t stats;
nanov2_statistics_t *nano_stats = &nanozone->statistics;
nanov2_statistics(nanozone, &stats);
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"Nanozonev2 %p: blocks in use: %llu, size in use: %llu allocated size: %llu, "
"allocated regions: %d, region holes: %d\n",
nanozone, (uint64_t)stats.blocks_in_use, (uint64_t)stats.size_in_use,
(uint64_t)stats.size_allocated, nano_stats->allocated_regions,
nano_stats->region_address_clashes);
#if DEBUG_MALLOC
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"\nPer size-class statistics:\n");
for (int i = 0; i < NANO_SIZE_CLASSES; i++) {
nanov2_size_class_statistics *cs = &nano_stats->size_class_statistics[i];
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
" Class %d: ", i);
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"total alloc: %llu, total frees: %llu, madvised blocks: %llu, madvise races: %llu",
cs->total_allocations, cs->total_frees, cs->madvised_blocks,
cs->madvise_races);
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "\n");
}
#endif // DEBUG_MALLOC
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"Current Allocation Blocks By Size Class/Context [CPU]\n");
for (int i = 0; i < NANO_SIZE_CLASSES; i++) {
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
" Class %d: ", i);
for (int j = 0; j < MAX_CURRENT_BLOCKS; j++) {
if (nanozone->current_block[i][j]) {
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"%d: %p; ", j, nanozone->current_block[i][j]);
}
}
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "\n");
}
nanov2_meta_index_t metablock_meta_index = nanov2_metablock_meta_index(nanozone);
nanov2_region_t *region = nanozone->first_region_base;
int region_index = 0;
while (region) {
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"\nRegion %d: base address %p\n", region_index, region);
nanov2_arena_t *arena = nanov2_first_arena_for_region(region);
nanov2_arena_t *limit_arena = nanov2_limit_arena_for_region(nanozone, region);
int arena_index = 0;
while (arena < limit_arena) {
nanov2_arena_metablock_t *arena_meta_blockp =
nanov2_metablock_address_for_ptr(nanozone, arena);
nanov2_block_meta_t *block_metap = &arena_meta_blockp->arena_block_meta[0];
int active_blocks = 0;
int madvisable_blocks = 0;
int unused_blocks = 0;
int madvised_blocks = 0;
int madvising_blocks = 0;
for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA; i++) {
if (i == metablock_meta_index) {
continue;
}
nanov2_block_meta_t meta = block_metap[i];
switch (meta.next_slot) {
case SLOT_NULL:
unused_blocks++;
break;
case SLOT_MADVISED:
madvised_blocks++;
break;
case SLOT_MADVISING:
madvising_blocks++;
break;
case SLOT_CAN_MADVISE:
madvisable_blocks++;
break;
default:
active_blocks++;
break;
}
}
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"Arena #%d: base address %p. Blocks - active: %d, "
"madvisable: %d, madvising: %d, madvised: %d, unused: %d\n",
arena_index, arena, active_blocks, madvisable_blocks,
madvising_blocks, madvised_blocks, unused_blocks);
int non_empty_size_classes[NANO_SIZE_CLASSES];
for (int i = 0; i < NANO_SIZE_CLASSES; i++) {
non_empty_size_classes[i] = 0;
}
for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA; i++) {
if (i == metablock_meta_index) {
continue;
}
nanov2_block_meta_t meta = block_metap[i];
nanov2_size_class_t size_class =
nanov2_size_class_for_meta_index(nanozone, i);
switch (meta.next_slot) {
case SLOT_FULL:
case SLOT_BUMP:
default:
non_empty_size_classes[size_class]++;
break;
case SLOT_NULL:
case SLOT_CAN_MADVISE:
case SLOT_MADVISING:
case SLOT_MADVISED:
break;
}
}
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"Size classes with allocated blocks: ");
for (int i = 0; i < NANO_SIZE_CLASSES; i++) {
if (non_empty_size_classes[i]) {
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"%d ", i);
}
}
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX, "\n");
for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA; i++) {
if (i == metablock_meta_index) {
continue;
}
nanov2_block_meta_t meta = block_metap[i];
if (!nanov2_is_block_active(meta) && !verbose) {
continue;
}
nanov2_size_class_t size_class =
nanov2_size_class_for_meta_index(nanozone, i);
char *slot_text;
switch (meta.next_slot) {
case SLOT_NULL:
slot_text = "NOT USED";
break;
case SLOT_FULL:
slot_text = "FULL";
break;
case SLOT_CAN_MADVISE:
slot_text = "CAN MADVISE";
break;
case SLOT_MADVISING:
slot_text = "MADVISING";
break;
case SLOT_MADVISED:
slot_text = "MADVISED";
break;
default:
slot_text = NULL;
break;
}
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
" Block %d: base %p; metadata: %p, size %d (class %d) in-use: %d ",
i, nanov2_block_address_from_meta_index(nanozone, arena, i),
&block_metap[i], nanov2_size_from_size_class(size_class),
size_class, meta.in_use);
if (slot_text) {
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"%s\n", slot_text);
} else {
int allocated = slots_by_size_class[size_class] - meta.free_count - 1;
if (meta.next_slot == SLOT_BUMP) {
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"BUMP (free list empty)");
} else {
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
"next_slot (1-based) = %d", meta.next_slot);
}
malloc_report(MALLOC_REPORT_NOLOG | MALLOC_REPORT_NOPREFIX,
", allocated slots: %d, free slots = %d, occupancy: %d%%\n",
allocated, meta.free_count + 1,
(100 * allocated)/slots_by_size_class[size_class]);
}
}
arena++;
arena_index++;
}
region = nanov2_next_region_for_region(nanozone, region);
region_index++;
}
}
static void
nanov2_log(malloc_zone_t *zone, void *log_address)
{
}
static void
nanov2_force_lock(nanozonev2_t *nanozone)
{
}
static void
nanov2_force_unlock(nanozonev2_t *nanozone)
{
}
static void
nanov2_reinit_lock(nanozonev2_t *nanozone)
{
}
static boolean_t
nanov2_locked(nanozonev2_t *nanozone)
{
return FALSE;
}
static void
nanov2_statistics(nanozonev2_t *nanozone, malloc_statistics_t *stats)
{
memset(stats, '\0', sizeof(*stats));
nanov2_region_t *region;
nanov2_arena_t * arena;
nanov2_meta_index_t metadata_block_index = nanov2_metablock_meta_index(nanozone);
for (region = nanozone->first_region_base; region;
region = nanov2_next_region_for_region(nanozone, region)) {
for (arena = nanov2_first_arena_for_region(region);
arena < nanov2_limit_arena_for_region(nanozone, region);
arena++) {
nanov2_arena_metablock_t *meta_block =
nanov2_metablock_address_for_ptr(nanozone, arena);
for (nanov2_meta_index_t i = 0; i < NANOV2_BLOCKS_PER_ARENA; i++) {
if (i == metadata_block_index) {
continue;
}
nanov2_block_meta_t *block_metap = &meta_block->arena_block_meta[i];
nanov2_size_class_t size_class =
nanov2_size_class_for_meta_index(nanozone, i);
int slot_size = nanov2_size_from_size_class(size_class);
nanov2_block_meta_t meta = os_atomic_load(block_metap, relaxed);
int slots_in_use = 0;
switch (meta.next_slot) {
case SLOT_NULL:
case SLOT_CAN_MADVISE:
case SLOT_MADVISING:
case SLOT_MADVISED:
break;
case SLOT_FULL:
slots_in_use = slots_by_size_class[size_class];
break;
case SLOT_BUMP:
default:
slots_in_use = slots_by_size_class[size_class] - meta.free_count - 1;
break;
}
if (slots_in_use) {
stats->blocks_in_use += slots_in_use;
stats->size_in_use += slots_in_use * slot_size;
stats->size_allocated += NANOV2_BLOCK_SIZE;
}
}
}
}
}
static const struct malloc_introspection_t nanov2_introspect = {
.enumerator = (void *)nanov2_ptr_in_use_enumerator,
.good_size = (void *)nanov2_good_size,
.check = (void *)nanov2_check,
.print = (void *)nanov2_print,
.log = (void *)nanov2_log,
.force_lock = (void *)nanov2_force_lock,
.force_unlock = (void *)nanov2_force_unlock,
.statistics = (void *)nanov2_statistics,
.zone_locked = (void *)nanov2_locked,
.enable_discharge_checking = NULL,
.disable_discharge_checking = NULL,
#ifdef __BLOCKS__
.enumerate_discharged_pointers = NULL,
#else // __BLOCKS__
.enumerate_unavailable_without_blocks = NULL,
#endif // __BLOCKS__
.reinit_lock = (void *)nanov2_reinit_lock,
};
#endif // OS_VARIANT_NOTRESOLVED
#pragma mark -
#pragma mark Utility Functions
#if OS_VARIANT_RESOLVED
size_t
nanov2_pointer_size(nanozonev2_t *nanozone, void *ptr, boolean_t allow_inner)
{
if (!nanov2_has_valid_signature((void *)ptr)) {
return 0;
}
if (!allow_inner && ((uintptr_t)ptr) & NANO_QUANTA_MASK) {
return 0;
}
if (ptr < (void *)nanozone->first_region_base ||
ptr > (void *)nanozone->current_region_next_arena) {
return 0;
}
#if NANOV2_MULTIPLE_REGIONS
if (nanozone->statistics.region_address_clashes) {
nanov2_region_t *ptr_region = nanov2_region_address_for_ptr(ptr);
nanov2_region_t *region = nanozone->first_region_base;
while (region) {
if (ptr_region == region) {
break;
}
region = nanov2_next_region_for_region(nanozone, region);
}
if (!region) {
return 0;
}
}
#endif // NANOV2_MULTIPLE_REGIONS
nanov2_size_class_t size_class = nanov2_size_class_for_ptr(nanozone, ptr);
nanov2_block_meta_t *block_metap = nanov2_meta_ptr_for_ptr(nanozone, ptr);
nanov2_block_meta_t meta = os_atomic_load(block_metap, relaxed);
if (!nanov2_is_block_active(meta) || (meta.next_slot != SLOT_FULL &&
meta.free_count == slots_by_size_class[size_class] - 1)) {
return 0;
}
size_t size = nanov2_size_from_size_class(size_class);
nanov2_addr_t addr = { .addr = ptr };
if (!allow_inner && (addr.fields.nano_offset % size)) {
return 0;
}
nanov2_free_slot_t *slotp = (nanov2_free_slot_t *)ptr;
uintptr_t guard = os_atomic_load(&slotp->double_free_guard, relaxed);
if ((guard ^ nanozone->slot_freelist_cookie) == (uintptr_t)ptr) {
return 0;
}
return size;
}
#pragma mark -
#pragma mark Madvise Management
boolean_t
nanov2_madvise_block(nanozonev2_t *nanozone, nanov2_block_meta_t *block_metap,
nanov2_block_t *blockp, nanov2_size_class_t size_class)
{
_malloc_lock_assert_owner(&nanozone->madvise_lock);
boolean_t madvised = FALSE;
nanov2_block_meta_t old_meta = os_atomic_load(block_metap, relaxed);
if (old_meta.next_slot == SLOT_CAN_MADVISE) {
nanov2_block_meta_t new_meta = {
.next_slot = SLOT_MADVISING,
.gen_count = old_meta.gen_count + 1,
};
if (!os_atomic_cmpxchgv(block_metap, old_meta, new_meta, &old_meta,
relaxed)) {
nanozone->statistics.size_class_statistics[size_class].madvise_races++;
return false;
}
if (mvm_madvise_free(nanozone, nanov2_region_address_for_ptr(blockp),
(uintptr_t)blockp, (uintptr_t)(blockp + 1), NULL, FALSE)) {
malloc_zone_error(0, false, "Failed to madvise block at blockp: %p, error: %d\n", blockp, errno);
} else {
nanozone->statistics.size_class_statistics[size_class].madvised_blocks++;
madvised = TRUE;
}
nanov2_block_meta_t final_meta = {
.next_slot = SLOT_MADVISED,
.gen_count = new_meta.gen_count + 1,
};
if (!os_atomic_cmpxchgv(block_metap, new_meta, final_meta, &old_meta,
relaxed)) {
malloc_zone_error(nanozone->debug_flags, false,
"Failed when changing state from MADVISING to MADVISED, "
"block_metap = %p, blockp = %p\n", block_metap, blockp);
}
}
return madvised;
}
#endif // OS_VARIANT_RESOLVED
#pragma mark -
#pragma mark Region Management
#if OS_VARIANT_NOTRESOLVED
#if NANOV2_MULTIPLE_REGIONS
static nanov2_addr_t nanov2_max_region_base = {
.fields.nano_signature = NANOZONE_SIGNATURE,
.fields.nano_region = NANOV2_MAX_REGION_NUMBER
};
#endif // NANOV2_MULTIPLE_REGIONS
static boolean_t
nanov2_allocate_region(nanov2_region_t *region)
{
MALLOC_TRACE(TRACE_nanov2_region_allocation | DBG_FUNC_START,
(uint64_t)region, 0, 0, 0);
boolean_t result = nano_common_allocate_vm_space((mach_vm_address_t)region,
NANOV2_REGION_SIZE);
MALLOC_TRACE(TRACE_nanov2_region_allocation | DBG_FUNC_END,
(uint64_t)region, result, 0, 0);
return result;
}
boolean_t
nanov2_allocate_new_region(nanozonev2_t *nanozone)
{
#if NANOV2_MULTIPLE_REGIONS
boolean_t result = FALSE;
_malloc_lock_assert_owner(&nanozone->regions_lock);
nanov2_region_t *current_region = nanozone->current_region_base;
nanov2_region_t *next_region = (nanov2_region_t *)nanozone->current_region_limit;
while ((void *)next_region <= nanov2_max_region_base.addr) {
if (nanov2_allocate_region(next_region)) {
nanozone->current_region_base = next_region;
nanozone->current_region_next_arena = (nanov2_arena_t *)next_region;
nanozone->current_region_limit = next_region + 1;
nanozone->statistics.allocated_regions++;
result = TRUE;
break;
}
next_region++;
nanozone->statistics.region_address_clashes++;
}
if (result) {
nanov2_region_linkage_t *current_region_linkage =
nanov2_region_linkage_for_region(nanozone, current_region);
nanov2_region_linkage_t *next_region_linkage =
nanov2_region_linkage_for_region(nanozone, next_region);
uint16_t offset = next_region - current_region;
current_region_linkage->next_region_offset = offset;
next_region_linkage->next_region_offset = 0;
}
return result;
#else // NANOV2_MULTIPLE_REGIONS
return FALSE;
#endif // CONFIG_NANOV2_MULTIPLE_REGIONS
}
#endif // OS_VARIANT_NOTRESOLVED
#pragma mark -
#pragma mark Allocation
#if OS_VARIANT_RESOLVED
void *
nanov2_allocate_from_block(nanozonev2_t *nanozone,
nanov2_block_meta_t *block_metap, nanov2_size_class_t size_class)
{
nanov2_block_meta_view_t old_meta_view;
old_meta_view.meta = os_atomic_load(block_metap, relaxed);
nanov2_block_t *blockp = NULL;
again:
if (!nanov2_can_allocate_from_block(old_meta_view.meta)) {
return NULL;
}
int slot;
void *ptr = NULL;
boolean_t from_free_list = FALSE;
nanov2_block_meta_t new_meta = {
.in_use = 1,
.free_count = old_meta_view.meta.free_count - 1,
.gen_count = old_meta_view.meta.gen_count + 1
};
boolean_t slot_full = old_meta_view.meta.free_count == 0;
if (old_meta_view.meta.next_slot == SLOT_BUMP
|| old_meta_view.meta.next_slot == SLOT_CAN_MADVISE) {
new_meta.next_slot = slot_full ? SLOT_FULL : SLOT_BUMP;
slot = slots_by_size_class[size_class] - old_meta_view.meta.free_count - 1;
} else {
from_free_list = TRUE;
if (!blockp) {
blockp = nanov2_block_address_from_meta_ptr(nanozone, block_metap);
}
slot = old_meta_view.meta.next_slot - 1; ptr = nanov2_slot_in_block_ptr(blockp, size_class, slot);
nanov2_free_slot_t *slotp = (nanov2_free_slot_t *)ptr;
new_meta.next_slot = slot_full ? SLOT_FULL : slotp->next_slot;
}
if (!os_atomic_cmpxchgv(block_metap, old_meta_view.meta, new_meta,
&old_meta_view.meta, dependency)) {
if (old_meta_view.meta.next_slot == SLOT_CAN_MADVISE ||
old_meta_view.meta.next_slot == SLOT_MADVISING ||
old_meta_view.meta.next_slot == SLOT_MADVISED) {
_malloc_lock_lock(&nanozone->madvise_lock);
if (old_meta_view.meta.next_slot == SLOT_MADVISED) {
if (!blockp) {
blockp = nanov2_block_address_from_meta_ptr(nanozone, block_metap);
}
if (mvm_madvise_free(nanozone, nanov2_region_address_for_ptr(blockp),
(uintptr_t)blockp, (uintptr_t)(blockp + 1), NULL, FALSE)) {
malloc_zone_error(0, false,
"Failed to remadvise block at blockp: %p, error: %d\n", blockp, errno);
}
}
_malloc_lock_unlock(&nanozone->madvise_lock);
}
goto again;
}
if (!ptr) {
if (!blockp) {
blockp = nanov2_block_address_from_meta_ptr(nanozone, block_metap);
}
ptr = nanov2_slot_in_block_ptr(blockp, size_class, slot);
}
nanov2_free_slot_t *slotp =
(nanov2_free_slot_t *)os_atomic_force_dependency_on(ptr,
(unsigned long)old_meta_view.bits);
if (from_free_list) {
uintptr_t guard = os_atomic_load(&slotp->double_free_guard, relaxed);
if ((guard ^ nanozone->slot_freelist_cookie) != (uintptr_t)ptr) {
malloc_zone_error(MALLOC_ABORT_ON_CORRUPTION, false,
"Heap corruption detected, free list is damaged at %p\n"
"*** Incorrect guard value: %lu\n", ptr, guard);
__builtin_unreachable();
}
}
#if DEBUG_MALLOC
nanozone->statistics.size_class_statistics[size_class].total_allocations++;
#endif // DEBUG_MALLOC
return ptr;
}
MALLOC_ALWAYS_INLINE MALLOC_INLINE nanov2_block_meta_t *
nanov2_find_block_in_arena(nanozonev2_t *nanozone,
nanov2_arena_t *arena, nanov2_size_class_t size_class,
nanov2_block_meta_t *start_block)
{
boolean_t use_first_fit = !start_block ||
nanov2_policy_config.block_scan_policy == NANO_SCAN_FIRST_FIT;
nanov2_block_meta_t *first_block = nanov2_first_block_for_size_class_in_arena(
nanozone, size_class, arena);
boolean_t scanning_backwards;
if (!start_block) {
start_block = first_block;
}
int slots_in_block = slots_by_size_class[size_class];
nanov2_block_meta_t old_meta;
nanov2_block_meta_t *this_block;
nanov2_block_meta_t *found_block;
nanov2_block_meta_t *madvisable_block;
nanov2_block_meta_t *free_block;
nanov2_block_meta_t *fallback_block;
boolean_t fallback_below_max;
int scan_limit;
retry:
this_block = start_block;
found_block = NULL;
madvisable_block = NULL;
free_block = NULL;
fallback_block = NULL;
fallback_below_max = FALSE;
scan_limit = nanov2_policy_config.block_scan_limit;
scanning_backwards = TRUE;
do {
old_meta = os_atomic_load(this_block, relaxed);
if (!old_meta.in_use && old_meta.next_slot != SLOT_FULL
&& old_meta.next_slot != SLOT_MADVISING) {
if (old_meta.next_slot == SLOT_CAN_MADVISE) {
if (!madvisable_block) {
madvisable_block = this_block;
}
} else if (old_meta.next_slot == SLOT_NULL
|| old_meta.next_slot == SLOT_MADVISED) {
if (!free_block) {
free_block = this_block;
}
} else if (use_first_fit) {
found_block = this_block;
} else {
MALLOC_ASSERT(nanov2_policy_config.block_scan_policy == NANO_SCAN_CAPACITY_BASED);
int percent_used = (100 * old_meta.free_count)/slots_in_block;
if (percent_used >= nanov2_policy_config.block_scan_min_capacity
&& percent_used <= nanov2_policy_config.block_scan_max_capacity) {
found_block = this_block;
} else if (percent_used >= nanov2_policy_config.block_scan_min_capacity) {
if (!fallback_block || fallback_below_max) {
fallback_block = this_block;
}
} else if (!fallback_block
&& percent_used < nanov2_policy_config.block_scan_min_capacity) {
fallback_block = this_block;
fallback_below_max = TRUE;
} else if (!free_block) {
free_block = this_block;
}
}
if (use_first_fit && (found_block || fallback_block || free_block)) {
break;
}
}
if (scan_limit > 0) {
if ((fallback_block || free_block) && --scan_limit == 0) {
break;
}
}
if (scanning_backwards) {
boolean_t wrapped;
nanov2_block_meta_t *prev_block = nanov2_previous_block_for_size_class(
nanozone, size_class, this_block, &wrapped);
if (wrapped) {
scan_limit = nanov2_policy_config.block_scan_limit;
scanning_backwards = FALSE;
this_block = start_block;
} else {
this_block = prev_block;
}
} else {
this_block = nanov2_next_block_for_size_class(nanozone, size_class,
this_block, NULL);
if (this_block == start_block) {
break;
}
}
} while (!found_block);
if (!found_block) {
if (fallback_block) {
found_block = fallback_block;
} else if (free_block) {
found_block = free_block;
} else if (madvisable_block) {
found_block = madvisable_block;
}
}
if (found_block) {
old_meta = os_atomic_load(found_block, relaxed);
if (old_meta.next_slot == SLOT_MADVISING) {
goto retry;
}
boolean_t reset_slot = old_meta.next_slot == SLOT_NULL
|| old_meta.next_slot == SLOT_CAN_MADVISE
|| old_meta.next_slot == SLOT_MADVISED;
nanov2_block_meta_t new_meta = {
.in_use = 1,
.free_count = reset_slot ? slots_in_block - 1 : old_meta.free_count,
.next_slot = reset_slot ? SLOT_BUMP : old_meta.next_slot,
.gen_count = reset_slot ? 0 : old_meta.gen_count + 1,
};
if (!os_atomic_cmpxchgv(found_block, old_meta, new_meta, &old_meta,
relaxed)) {
goto retry;
}
}
return found_block;
}
MALLOC_NOINLINE void *
nanov2_find_block_and_allocate(nanozonev2_t *nanozone,
nanov2_size_class_t size_class, nanov2_block_meta_t **block_metapp)
{
nanov2_arena_t *arena;
nanov2_block_meta_t *start_block = os_atomic_load(block_metapp, relaxed);
nanov2_block_meta_t *orig_block = start_block;
if (start_block) {
arena = nanov2_arena_address_for_ptr(start_block);
} else {
arena = nanov2_arena_address_for_ptr(nanozone->first_region_base);
}
nanov2_region_t *start_region;
retry:
start_region = nanov2_region_address_for_ptr(arena);
nanov2_arena_t *start_arena = arena;
nanov2_region_t *region = start_region;
nanov2_arena_t *limit_arena = nanov2_limit_arena_for_region(nanozone, start_region);
nanov2_arena_t *initial_region_next_arena = nanozone->current_region_next_arena;
do {
nanov2_block_meta_t *block_metap = nanov2_find_block_in_arena(nanozone,
arena, size_class, start_block);
if (block_metap) {
void *ptr = nanov2_allocate_from_block(nanozone, block_metap, size_class);
if (ptr) {
os_atomic_store(block_metapp, block_metap, relaxed);
if (orig_block) {
nanov2_turn_off_in_use(orig_block);
}
return ptr;
}
nanov2_turn_off_in_use(block_metap);
start_block = block_metap;
goto retry;
}
start_block = NULL;
arena++;
if (arena >= limit_arena) {
region = nanov2_next_region_for_region(nanozone, region);
if (!region) {
region = nanozone->first_region_base;
}
arena = nanov2_first_arena_for_region(region);
limit_arena = nanov2_limit_arena_for_region(nanozone, region);
}
} while (arena != start_arena);
if (nanov2_policy_config.single_arena_size_classes & (1 << size_class)) {
return NULL;
}
boolean_t failed = FALSE;
arena = initial_region_next_arena;
_malloc_lock_lock(&nanozone->regions_lock);
if (nanozone->current_region_next_arena == arena) {
if ((void *)arena >= nanozone->current_region_limit) {
if (nanov2_allocate_new_region(nanozone)) {
arena = nanozone->current_region_next_arena++;
} else {
failed = TRUE;
}
} else {
nanozone->current_region_next_arena = arena + 1;
}
}
_malloc_lock_unlock(&nanozone->regions_lock);
if (!failed) {
start_block = NULL;
goto retry;
}
return NULL;
}
void *
nanov2_allocate(nanozonev2_t *nanozone, size_t rounded_size, boolean_t clear)
{
void *ptr = NULL;
nanov2_size_class_t size_class = nanov2_size_class_from_size(rounded_size);
MALLOC_ASSERT(size_class < NANO_SIZE_CLASSES);
MALLOC_ASSERT(rounded_size != 0);
nanov2_block_meta_t *block_metap;
nanov2_block_meta_t **block_metapp;
int allocation_index = nanov2_get_allocation_block_index() & MAX_CURRENT_BLOCKS_MASK;
block_metapp = &nanozone->current_block[size_class][allocation_index];
block_metap = os_atomic_load(block_metapp, relaxed);
if (block_metap) {
ptr = nanov2_allocate_from_block(nanozone, block_metap, size_class);
if (ptr) {
goto done;
}
}
if (nanozone->delegate_allocations & (1 << size_class)) {
ptr = nanozone->helper_zone->malloc(nanozone->helper_zone, rounded_size);
goto done;
}
_malloc_lock_s *lock = &nanozone->current_block_lock[size_class][allocation_index];
_malloc_lock_lock(lock);
block_metap = os_atomic_load(block_metapp, relaxed);
if (block_metap) {
ptr = nanov2_allocate_from_block(nanozone, block_metap, size_class);
if (ptr) {
goto unlock;
}
}
ptr = nanov2_find_block_and_allocate(nanozone, size_class, block_metapp);
unlock:
_malloc_lock_unlock(lock);
if (!ptr) {
_malloc_lock_lock(&nanozone->delegate_allocations_lock);
nanozone->delegate_allocations |= 1 << size_class;
_malloc_lock_unlock(&nanozone->delegate_allocations_lock);
}
done:
if (ptr) {
if (clear) {
memset(ptr, '\0', rounded_size);
} else {
nanov2_free_slot_t *slotp = (nanov2_free_slot_t *)ptr;
os_atomic_store(&slotp->double_free_guard, 0, relaxed);
}
}
return ptr;
}
#pragma mark -
#pragma mark Freeing
void
nanov2_free_to_block(nanozonev2_t *nanozone, void *ptr,
nanov2_size_class_t size_class)
{
nanov2_block_t *blockp = nanov2_block_address_for_ptr(ptr);
nanov2_block_meta_t *block_metap = nanov2_meta_ptr_for_ptr(nanozone, ptr);
nanov2_block_meta_t old_meta = os_atomic_load(block_metap, relaxed);
int slot_count = slots_by_size_class[size_class];
nanov2_block_meta_t new_meta;
boolean_t was_full;
again:
was_full = old_meta.next_slot == SLOT_FULL;
new_meta.free_count = old_meta.free_count + 1;
new_meta.in_use = old_meta.in_use;
new_meta.gen_count = old_meta.gen_count + 1;
boolean_t freeing_last_active_slot = !was_full &&
new_meta.free_count == slots_by_size_class[size_class] - 1;
if (freeing_last_active_slot) {
new_meta.next_slot = new_meta.in_use ? SLOT_BUMP : SLOT_CAN_MADVISE;
if (!os_atomic_cmpxchgv(block_metap, old_meta, new_meta, &old_meta, relaxed)) {
goto again;
}
if (new_meta.next_slot == SLOT_CAN_MADVISE &&
nanov2_madvise_policy == NANO_MADVISE_IMMEDIATE) {
_malloc_lock_lock(&nanozone->madvise_lock);
nanov2_madvise_block(nanozone, block_metap, blockp, size_class);
_malloc_lock_unlock(&nanozone->madvise_lock);
}
} else {
int slot_index = nanov2_slot_index_in_block(blockp, size_class, ptr);
new_meta.next_slot = slot_index + 1; nanov2_free_slot_t *slotp = (nanov2_free_slot_t *)ptr;
slotp->next_slot = was_full ? SLOT_BUMP : old_meta.next_slot;
os_atomic_store(&slotp->double_free_guard,
nanozone->slot_freelist_cookie ^ (uintptr_t)ptr, relaxed);
if (!os_atomic_cmpxchgv(block_metap, old_meta, new_meta, &old_meta, release)) {
goto again;
}
}
uint16_t class_mask = 1 << size_class;
if (!new_meta.in_use && (nanozone->delegate_allocations & class_mask) &&
(new_meta.free_count >= 0.75 * slot_count)) {
_malloc_lock_lock(&nanozone->delegate_allocations_lock);
nanozone->delegate_allocations &= ~class_mask;
_malloc_lock_unlock(&nanozone->delegate_allocations_lock);
}
#if DEBUG_MALLOC
nanozone->statistics.size_class_statistics[size_class].total_frees++;
#endif // DEBUG_MALLOC
}
#endif // OS_VARIANT_RESOLVED
#if OS_VARIANT_NOTRESOLVED
#pragma mark -
#pragma mark Zone Operations
malloc_zone_t *
nanov2_create_zone(malloc_zone_t *helper_zone, unsigned debug_flags)
{
MALLOC_ASSERT(_malloc_engaged_nano == NANO_V2);
nanozonev2_t *nanozone = nano_common_allocate_based_pages(
NANOZONEV2_ZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC, 0);
if (!nanozone) {
_malloc_engaged_nano = NANO_NONE;
return NULL;
}
nanozone->basic_zone.version = 10;
nanozone->basic_zone.size = OS_RESOLVED_VARIANT_ADDR(nanov2_size);
nanozone->basic_zone.malloc = OS_RESOLVED_VARIANT_ADDR(nanov2_malloc);
nanozone->basic_zone.calloc = OS_RESOLVED_VARIANT_ADDR(nanov2_calloc);
nanozone->basic_zone.valloc = (void *)nanov2_valloc;
nanozone->basic_zone.free = OS_RESOLVED_VARIANT_ADDR(nanov2_free);
nanozone->basic_zone.realloc = OS_RESOLVED_VARIANT_ADDR(nanov2_realloc);
nanozone->basic_zone.destroy = (void *)nanov2_destroy;
nanozone->basic_zone.batch_malloc = OS_RESOLVED_VARIANT_ADDR(nanov2_batch_malloc);
nanozone->basic_zone.batch_free = OS_RESOLVED_VARIANT_ADDR(nanov2_batch_free);
nanozone->basic_zone.introspect =
(struct malloc_introspection_t *)&nanov2_introspect;
nanozone->basic_zone.memalign = (void *)nanov2_memalign;
nanozone->basic_zone.free_definite_size = OS_RESOLVED_VARIANT_ADDR(nanov2_free_definite_size);
nanozone->basic_zone.pressure_relief = OS_RESOLVED_VARIANT_ADDR(nanov2_pressure_relief);
nanozone->basic_zone.claimed_address = OS_RESOLVED_VARIANT_ADDR(nanov2_claimed_address);
nanozone->basic_zone.reserved1 = 0;
nanozone->basic_zone.reserved2 = 0;
mprotect(nanozone, sizeof(nanozone->basic_zone), PROT_READ);
if (debug_flags & MALLOC_ADD_GUARD_PAGES) {
malloc_report(ASL_LEVEL_INFO, "nano does not support guard pages\n");
debug_flags &= ~MALLOC_ADD_GUARD_PAGES;
}
nanozone->debug_flags = debug_flags;
nanozone->helper_zone = helper_zone;
#define COOKIE_ENTROPY_MASK 0x0000ffffffff0000ULL
#define DEFAULT_ENTROPY_BITS 0x0000DEADDEAD0000ULL
uintptr_t cookie = (uintptr_t)malloc_entropy[0] & COOKIE_ENTROPY_MASK;
if (!cookie) {
cookie = malloc_entropy[1] & COOKIE_ENTROPY_MASK;
if (!cookie) {
cookie = DEFAULT_ENTROPY_BITS;
}
}
nanozone->slot_freelist_cookie = cookie;
nanozone->aslr_cookie = malloc_entropy[1] >> (64 - NANOV2_BLOCK_BITS);
nanozone->aslr_cookie_aligned = nanozone->aslr_cookie << NANOV2_OFFSET_BITS;
_malloc_lock_init(&nanozone->blocks_lock);
_malloc_lock_init(&nanozone->regions_lock);
_malloc_lock_init(&nanozone->madvise_lock);
nanov2_addr_t p = {.fields.nano_signature = NANOZONE_SIGNATURE};
nanov2_region_t *region = (nanov2_region_t *)p.addr;
boolean_t result = nanov2_allocate_region(region);
if (!result) {
nano_common_deallocate_pages(nanozone, NANOZONEV2_ZONE_PAGED_SIZE, 0);
_malloc_engaged_nano = NANO_NONE;
malloc_report(ASL_LEVEL_NOTICE, "nano zone abandoned due to inability "
"to preallocate reserved vm space.\n");
return NULL;
}
nanov2_region_linkage_t *region_linkage =
nanov2_region_linkage_for_region(nanozone, region);
region_linkage->next_region_offset = 0;
nanozone->first_region_base = region;
nanozone->current_region_base = region;
nanozone->current_region_next_arena = ((nanov2_arena_t *)region) + 1;
nanozone->current_region_limit = region + 1;
nanozone->statistics.allocated_regions = 1;
return (malloc_zone_t *)nanozone;
}
#endif // OS_VARIANT_NOTRESOLVED
#pragma mark -
#pragma mark Zone Fork Handling
#if OS_VARIANT_RESOLVED
void *
nanov2_forked_malloc(nanozonev2_t *nanozone, size_t size)
{
return nanozone->helper_zone->malloc(nanozone->helper_zone, size);
}
#endif // OS_VARIANT_RESOLVED
#if OS_VARIANT_NOTRESOLVED
static void *
nanov2_forked_calloc(nanozonev2_t *nanozone, size_t num_items, size_t size)
{
return nanozone->helper_zone->calloc(nanozone->helper_zone, num_items,
size);
}
#endif // OS_VARIANT_NOTRESOLVED
#if OS_VARIANT_RESOLVED
void
nanov2_forked_free(nanozonev2_t *nanozone, void *ptr)
{
if (!ptr) {
return; }
size_t sz = nanov2_pointer_size(nanozone, ptr, FALSE);
if (sz || nanov2_has_valid_signature(ptr)) {
return;
} else {
nanozone->helper_zone->free(nanozone->helper_zone, ptr);
return;
}
}
void
nanov2_forked_free_definite_size(nanozonev2_t *nanozone, void *ptr, size_t size)
{
nanov2_forked_free(nanozone, ptr);
}
void *
nanov2_forked_realloc(nanozonev2_t *nanozone, void *ptr, size_t new_size)
{
if (!ptr) {
return nanov2_forked_malloc(nanozone, new_size);
}
size_t old_size = nanov2_pointer_size(nanozone, ptr, FALSE);
if (!old_size) {
malloc_zone_t *zone = (malloc_zone_t *)(nanozone->helper_zone);
return zone->realloc(zone, ptr, new_size);
} else {
if (!new_size) {
return nanov2_forked_malloc(nanozone, 1);
}
void *new_ptr = nanozone->helper_zone->malloc(nanozone->helper_zone,
new_size);
if (new_ptr) {
size_t valid_size = MIN(old_size, new_size);
memcpy(new_ptr, ptr, valid_size);
return new_ptr;
} else {
return NULL;
}
}
}
#endif // OS_VARIANT_RESOLVED
#if OS_VARIANT_NOTRESOLVED
static unsigned
nanov2_forked_batch_malloc(nanozonev2_t *nanozone, size_t size, void **results,
unsigned count)
{
return nanozone->helper_zone->batch_malloc(nanozone->helper_zone, size,
results, count);
}
#endif // OS_VARIANT_NOTRESOLVED
#if OS_VARIANT_RESOLVED
void
nanov2_forked_batch_free(nanozonev2_t *nanozone, void **to_be_freed,
unsigned count)
{
if (!count) {
return;
}
while (count--) {
void *ptr = to_be_freed[count];
if (ptr) {
nanov2_forked_free(nanozone, ptr);
}
}
}
#endif // OS_VARIANT_RESOLVED
#if OS_VARIANT_NOTRESOLVED
static boolean_t
nanov2_forked_claimed_address(struct _malloc_zone_t *zone, void *ptr)
{
return true;
}
void
nanov2_forked_zone(nanozonev2_t *nanozone)
{
mprotect(nanozone, sizeof(nanozone->basic_zone), PROT_READ | PROT_WRITE);
nanozone->basic_zone.size = OS_RESOLVED_VARIANT_ADDR(nanov2_size); nanozone->basic_zone.malloc = OS_RESOLVED_VARIANT_ADDR(nanov2_forked_malloc);
nanozone->basic_zone.calloc = (void *)nanov2_forked_calloc;
nanozone->basic_zone.valloc = (void *)nanov2_valloc; nanozone->basic_zone.free = OS_RESOLVED_VARIANT_ADDR(nanov2_forked_free);
nanozone->basic_zone.realloc = OS_RESOLVED_VARIANT_ADDR(nanov2_forked_realloc);
nanozone->basic_zone.destroy = (void *)nanov2_destroy; nanozone->basic_zone.batch_malloc = (void *)nanov2_forked_batch_malloc;
nanozone->basic_zone.batch_free = OS_RESOLVED_VARIANT_ADDR(nanov2_forked_batch_free);
nanozone->basic_zone.introspect =
(struct malloc_introspection_t *)&nanov2_introspect; nanozone->basic_zone.memalign = (void *)nanov2_memalign; nanozone->basic_zone.free_definite_size =
OS_RESOLVED_VARIANT_ADDR(nanov2_forked_free_definite_size);
nanozone->basic_zone.claimed_address = nanov2_forked_claimed_address;
mprotect(nanozone, sizeof(nanozone->basic_zone), PROT_READ);
}
#endif // OS_VARIANT_NOTRESOLVED
#endif // CONFIG_NANOZONE