purgeable_malloc.c [plain text]
#include "internal.h"
static size_t
purgeable_size(szone_t *szone, const void *ptr)
{
return szone_size_try_large(szone, ptr);
}
static void *
purgeable_malloc(szone_t *szone, size_t size)
{
if (size <= szone->large_threshold) {
return szone_malloc(szone->helper_zone, size);
} else {
return szone_malloc(szone, size);
}
}
static void *
purgeable_calloc(szone_t *szone, size_t num_items, size_t size)
{
size_t total_bytes = num_items * size;
if (num_items > 1) {
#if __LP64__
if ((num_items | size) & 0xffffffff00000000ul) {
__uint128_t product = ((__uint128_t)num_items) * ((__uint128_t)size);
if ((uint64_t)(product >> 64)) { return NULL;
}
}
#else
if ((num_items | size) & 0xffff0000ul) {
uint64_t product = ((uint64_t)num_items) * ((uint64_t)size);
if ((uint32_t)(product >> 32)) { return NULL;
}
}
#endif
}
if (total_bytes <= szone->large_threshold) {
return szone_calloc(szone->helper_zone, 1, total_bytes);
} else {
return szone_calloc(szone, 1, total_bytes);
}
}
static void *
purgeable_valloc(szone_t *szone, size_t size)
{
if (size <= szone->large_threshold) {
return szone_valloc(szone->helper_zone, size);
} else {
return szone_valloc(szone, size);
}
}
static void
purgeable_free(szone_t *szone, void *ptr)
{
large_entry_t *entry;
SZONE_LOCK(szone);
entry = large_entry_for_pointer_no_lock(szone, ptr);
SZONE_UNLOCK(szone);
if (entry) {
return free_large(szone, ptr);
} else {
return szone_free(szone->helper_zone, ptr);
}
}
static void
purgeable_free_definite_size(szone_t *szone, void *ptr, size_t size)
{
if (size <= szone->large_threshold) {
return szone_free_definite_size(szone->helper_zone, ptr, size);
} else {
return szone_free_definite_size(szone, ptr, size);
}
}
static void *
purgeable_realloc(szone_t *szone, void *ptr, size_t new_size)
{
size_t old_size;
if (NULL == ptr) {
return purgeable_malloc(szone, new_size);
} else if (0 == new_size) {
purgeable_free(szone, ptr);
return purgeable_malloc(szone, 1);
}
old_size = purgeable_size(szone, ptr); if (!old_size) {
old_size = szone_size(szone->helper_zone, ptr);
}
if (!old_size) {
szone_error(szone, 1, "pointer being reallocated was not allocated", ptr, NULL);
return NULL;
}
if (old_size <= szone->large_threshold) {
if (new_size <= szone->large_threshold) {
return szone_realloc(szone->helper_zone, ptr, new_size);
} else {
void *new_ptr = purgeable_malloc(szone, new_size);
if (new_ptr) {
memcpy(new_ptr, ptr, old_size);
szone_free_definite_size(szone->helper_zone, ptr, old_size);
}
return new_ptr; }
} else {
if (new_size <= szone->large_threshold) {
void *new_ptr = szone_malloc(szone->helper_zone, new_size);
if (new_ptr) {
memcpy(new_ptr, ptr, new_size);
purgeable_free_definite_size(szone, ptr, old_size);
}
return new_ptr;
} else {
void *new_ptr = purgeable_malloc(szone, new_size);
if (new_ptr) {
memcpy(new_ptr, ptr, MIN(old_size, new_size));
purgeable_free_definite_size(szone, ptr, old_size);
}
return new_ptr; }
}
}
static void
purgeable_destroy(szone_t *szone)
{
size_t index = szone->num_large_entries;
large_entry_t *large;
vm_range_t range_to_deallocate;
while (index--) {
large = szone->large_entries + index;
if (large->address) {
deallocate_pages(szone, (void *)(large->address), large->size, szone->debug_flags);
}
}
large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate);
if (range_to_deallocate.size) {
deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0);
}
deallocate_pages(szone, (void *)szone, SZONE_PAGED_SIZE, 0);
}
static unsigned
purgeable_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count)
{
return szone_batch_malloc(szone->helper_zone, size, results, count);
}
static void
purgeable_batch_free(szone_t *szone, void **to_be_freed, unsigned count)
{
return szone_batch_free(szone->helper_zone, to_be_freed, count);
}
static void *
purgeable_memalign(szone_t *szone, size_t alignment, size_t size)
{
if (size <= szone->large_threshold) {
return szone_memalign(szone->helper_zone, alignment, size);
} else {
return szone_memalign(szone, alignment, size);
}
}
static kern_return_t
purgeable_ptr_in_use_enumerator(task_t task,
void *context,
unsigned type_mask,
vm_address_t zone_address,
memory_reader_t reader,
vm_range_recorder_t recorder)
{
szone_t *szone;
kern_return_t err;
if (!reader) {
reader = _szone_default_reader;
}
err = reader(task, zone_address, sizeof(szone_t), (void **)&szone);
if (err) {
return err;
}
err = large_in_use_enumerator(
task, context, type_mask, (vm_address_t)szone->large_entries, szone->num_large_entries, reader, recorder);
return err;
}
static size_t
purgeable_good_size(szone_t *szone, size_t size)
{
if (size <= szone->large_threshold) {
return szone_good_size(szone->helper_zone, size);
} else {
return szone_good_size(szone, size);
}
}
static boolean_t
purgeable_check(szone_t *szone)
{
return 1;
}
static void
purgeable_print(szone_t *szone, boolean_t verbose)
{
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "Scalable zone %p: inUse=%u(%y) flags=%d\n", szone,
szone->num_large_objects_in_use, szone->num_bytes_in_large_objects, szone->debug_flags);
}
static void
purgeable_log(malloc_zone_t *zone, void *log_address)
{
szone_t *szone = (szone_t *)zone;
szone->log_address = log_address;
}
static void
purgeable_force_lock(szone_t *szone)
{
SZONE_LOCK(szone);
}
static void
purgeable_force_unlock(szone_t *szone)
{
SZONE_UNLOCK(szone);
}
static void
purgeable_reinit_lock(szone_t *szone)
{
SZONE_REINIT_LOCK(szone);
}
static void
purgeable_statistics(szone_t *szone, malloc_statistics_t *stats)
{
stats->blocks_in_use = szone->num_large_objects_in_use;
stats->size_in_use = stats->max_size_in_use = stats->size_allocated = szone->num_bytes_in_large_objects;
}
static boolean_t
purgeable_locked(szone_t *szone)
{
int tookLock;
tookLock = SZONE_TRY_LOCK(szone);
if (tookLock == 0) {
return 1;
}
SZONE_UNLOCK(szone);
return 0;
}
static size_t
purgeable_pressure_relief(szone_t *szone, size_t goal)
{
return szone_pressure_relief(szone, goal) + szone_pressure_relief(szone->helper_zone, goal);
}
static const struct malloc_introspection_t purgeable_introspect = {
(void *)purgeable_ptr_in_use_enumerator, (void *)purgeable_good_size, (void *)purgeable_check, (void *)purgeable_print,
purgeable_log, (void *)purgeable_force_lock, (void *)purgeable_force_unlock, (void *)purgeable_statistics,
(void *)purgeable_locked, NULL, NULL, NULL, NULL,
(void *)purgeable_reinit_lock, };
malloc_zone_t *
create_purgeable_zone(size_t initial_size, malloc_zone_t *malloc_default_zone, unsigned debug_flags)
{
szone_t *szone;
uint64_t hw_memsize = 0;
szone = allocate_pages(NULL, SZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC);
if (!szone) {
return NULL;
}
#if 0
#warning LOG enabled
szone->log_address = ~0;
#endif
#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__arm64__)
hw_memsize = *(uint64_t *)(uintptr_t)_COMM_PAGE_MEMORY_SIZE;
#else
size_t uint64_t_size = sizeof(hw_memsize);
sysctlbyname("hw.memsize", &hw_memsize, &uint64_t_size, 0, 0);
#endif
szone->trg[0].nextgen = &(szone->trg[1]);
szone->trg[1].nextgen = &(szone->trg[0]);
szone->tiny_region_generation = &(szone->trg[0]);
szone->tiny_region_generation->hashed_regions = szone->initial_tiny_regions;
szone->tiny_region_generation->num_regions_allocated = INITIAL_NUM_REGIONS;
szone->tiny_region_generation->num_regions_allocated_shift = INITIAL_NUM_REGIONS_SHIFT;
szone->srg[0].nextgen = &(szone->srg[1]);
szone->srg[1].nextgen = &(szone->srg[0]);
szone->small_region_generation = &(szone->srg[0]);
szone->small_region_generation->hashed_regions = szone->initial_small_regions;
szone->small_region_generation->num_regions_allocated = INITIAL_NUM_REGIONS;
szone->small_region_generation->num_regions_allocated_shift = INITIAL_NUM_REGIONS_SHIFT;
szone->is_largemem = 0;
szone->large_threshold = LARGE_THRESHOLD;
szone->vm_copy_threshold = VM_COPY_THRESHOLD;
#if CONFIG_LARGE_CACHE
szone->large_entry_cache_reserve_limit =
hw_memsize >> 10;
int32_t libSystemVersion = NSVersionOfLinkTimeLibrary("System");
if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 112) ) {
szone->large_legacy_reset_mprotect = TRUE;
} else {
szone->large_legacy_reset_mprotect = FALSE;
}
#endif
szone->basic_zone.version = 9;
szone->basic_zone.size = (void *)purgeable_size;
szone->basic_zone.malloc = (void *)purgeable_malloc;
szone->basic_zone.calloc = (void *)purgeable_calloc;
szone->basic_zone.valloc = (void *)purgeable_valloc;
szone->basic_zone.free = (void *)purgeable_free;
szone->basic_zone.realloc = (void *)purgeable_realloc;
szone->basic_zone.destroy = (void *)purgeable_destroy;
szone->basic_zone.batch_malloc = (void *)purgeable_batch_malloc;
szone->basic_zone.batch_free = (void *)purgeable_batch_free;
szone->basic_zone.introspect = (struct malloc_introspection_t *)&purgeable_introspect;
szone->basic_zone.memalign = (void *)purgeable_memalign;
szone->basic_zone.free_definite_size = (void *)purgeable_free_definite_size;
szone->basic_zone.pressure_relief = (void *)purgeable_pressure_relief;
szone->basic_zone.reserved1 = 0;
szone->basic_zone.reserved2 = 0;
mprotect(szone, sizeof(szone->basic_zone), PROT_READ);
szone->debug_flags = debug_flags | MALLOC_PURGEABLE;
if (szone->debug_flags & MALLOC_ADD_GUARD_PAGES) {
_malloc_printf(ASL_LEVEL_INFO, "purgeable zone does not support guard pages\n");
szone->debug_flags &= ~MALLOC_ADD_GUARD_PAGES;
}
_malloc_lock_init(&szone->large_szone_lock);
szone->helper_zone = (struct szone_s *)malloc_default_zone;
CHECK(szone, __PRETTY_FUNCTION__);
return (malloc_zone_t *)szone;
}