#ifndef __MAGAZINE_INLINE_H
#define __MAGAZINE_INLINE_H
extern unsigned int _os_cpu_number_override;
#define MALLOC_ABSOLUTE_MAX_SIZE (SIZE_T_MAX - (2 * PAGE_SIZE))
static int MALLOC_INLINE MALLOC_ALWAYS_INLINE
calloc_get_size(size_t num_items, size_t size, size_t extra_size, size_t *total_size)
{
size_t alloc_size = size;
if (num_items != 1 && (os_mul_overflow(num_items, size, &alloc_size)
|| alloc_size > MALLOC_ABSOLUTE_MAX_SIZE)) {
errno = ENOMEM;
return -1;
}
if (extra_size && (os_add_overflow(alloc_size, extra_size, &alloc_size)
|| alloc_size > MALLOC_ABSOLUTE_MAX_SIZE)) {
errno = ENOMEM;
return -1;
}
*total_size = alloc_size;
return 0;
}
#pragma mark forward decls
static MALLOC_INLINE uintptr_t free_list_gen_checksum(uintptr_t ptr) MALLOC_ALWAYS_INLINE;
static MALLOC_INLINE uintptr_t free_list_checksum_ptr(rack_t *rack, void *p) MALLOC_ALWAYS_INLINE;
static MALLOC_INLINE void *free_list_unchecksum_ptr(rack_t *rack, inplace_union *ptr) MALLOC_ALWAYS_INLINE;
static MALLOC_INLINE unsigned free_list_count(task_t task,
memory_reader_t reader, print_task_printer_t printer,
rack_t *mapped_rack, free_list_t ptr);
static MALLOC_INLINE void recirc_list_extract(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node) MALLOC_ALWAYS_INLINE;
static MALLOC_INLINE void recirc_list_splice_last(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node) MALLOC_ALWAYS_INLINE;
static MALLOC_INLINE void recirc_list_splice_first(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node) MALLOC_ALWAYS_INLINE;
static MALLOC_INLINE void
yield(void)
{
thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DEPRESS, 1);
}
static MALLOC_INLINE kern_return_t
_malloc_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr)
{
*ptr = (void *)address;
return 0;
}
#pragma mark helpers
static MALLOC_INLINE MALLOC_ALWAYS_INLINE
uint64_t
platform_hw_memsize(void)
{
#if CONFIG_HAS_COMMPAGE_MEMSIZE
return *(uint64_t *)(uintptr_t)_COMM_PAGE_MEMORY_SIZE;
#else
uint64_t hw_memsize = 0;
size_t uint64_t_size = sizeof(hw_memsize);
(void)sysctlbyname("hw.memsize", &hw_memsize, &uint64_t_size, 0, 0);
return hw_memsize;
#endif
}
static MALLOC_INLINE MALLOC_ALWAYS_INLINE
uint32_t
platform_cpu_count(void)
{
#if CONFIG_HAS_COMMPAGE_NCPUS
return *(uint8_t *)(uintptr_t)_COMM_PAGE_NCPUS;
#else
return sysconf(_SC_NPROCESSORS_CONF);
#endif
}
#pragma mark szone locking
static MALLOC_INLINE MALLOC_ALWAYS_INLINE void
SZONE_LOCK(szone_t *szone)
{
_malloc_lock_lock(&szone->large_szone_lock);
}
static MALLOC_INLINE MALLOC_ALWAYS_INLINE void
SZONE_UNLOCK(szone_t *szone)
{
_malloc_lock_unlock(&szone->large_szone_lock);
}
static MALLOC_INLINE MALLOC_ALWAYS_INLINE bool
SZONE_TRY_LOCK(szone_t *szone)
{
return _malloc_lock_trylock(&szone->large_szone_lock);
}
static MALLOC_INLINE MALLOC_ALWAYS_INLINE void
SZONE_REINIT_LOCK(szone_t *szone)
{
_malloc_lock_init(&szone->large_szone_lock);
}
static MALLOC_INLINE MALLOC_ALWAYS_INLINE void
SZONE_MAGAZINE_PTR_LOCK(magazine_t *mag_ptr)
{
_malloc_lock_lock(&mag_ptr->magazine_lock);
}
static MALLOC_INLINE MALLOC_ALWAYS_INLINE void
SZONE_MAGAZINE_PTR_UNLOCK(magazine_t *mag_ptr)
{
_malloc_lock_unlock(&mag_ptr->magazine_lock);
}
static MALLOC_INLINE MALLOC_ALWAYS_INLINE bool
SZONE_MAGAZINE_PTR_TRY_LOCK(magazine_t *mag_ptr)
{
return _malloc_lock_trylock(&mag_ptr->magazine_lock);
}
static MALLOC_INLINE MALLOC_ALWAYS_INLINE void
SZONE_MAGAZINE_PTR_REINIT_LOCK(magazine_t *mag_ptr)
{
_malloc_lock_init(&mag_ptr->magazine_lock);
}
#pragma mark free list
static MALLOC_NOINLINE void
free_list_checksum_botch(rack_t *rack, void *ptr, void *value)
{
malloc_zone_error(rack->debug_flags, true,
"Incorrect checksum for freed object %p: "
"probably modified after being freed.\n"
"Corrupt value: %p\n", ptr, value);
}
static MALLOC_INLINE uintptr_t
free_list_gen_checksum(uintptr_t ptr)
{
uint8_t chk;
chk = (unsigned char)(ptr >> 0);
chk += (unsigned char)(ptr >> 8);
chk += (unsigned char)(ptr >> 16);
chk += (unsigned char)(ptr >> 24);
#if __LP64__
chk += (unsigned char)(ptr >> 32);
chk += (unsigned char)(ptr >> 40);
chk += (unsigned char)(ptr >> 48);
chk += (unsigned char)(ptr >> 56);
#endif
return chk;
}
static unsigned
free_list_count(task_t task, memory_reader_t reader,
print_task_printer_t printer, rack_t *mapped_rack, free_list_t ptr)
{
unsigned int count = 0;
inplace_free_entry_t mapped_inplace_free_entry;
while (ptr.p) {
count++;
if (reader(task, (vm_address_t)ptr.inplace, sizeof(*ptr.inplace),
(void **)&mapped_inplace_free_entry)) {
printer("** invalid pointer in free list: %p\n", ptr.inplace);
break;
}
ptr.p = free_list_unchecksum_ptr(mapped_rack, &mapped_inplace_free_entry->next);
}
return count;
}
#define NYBBLE 4
#if __LP64__
#define ANTI_NYBBLE (64 - NYBBLE)
#else
#define ANTI_NYBBLE (32 - NYBBLE)
#endif
static MALLOC_INLINE uintptr_t
free_list_checksum_ptr(rack_t *rack, void *ptr)
{
uintptr_t p = (uintptr_t)ptr;
return (p >> NYBBLE) | ((free_list_gen_checksum(p ^ rack->cookie) & (uintptr_t)0xF) << ANTI_NYBBLE); }
static MALLOC_INLINE void *
free_list_unchecksum_ptr(rack_t *rack, inplace_union *ptr)
{
inplace_union p;
uintptr_t t = ptr->u;
t = (t << NYBBLE) | (t >> ANTI_NYBBLE); p.u = t & ~(uintptr_t)0xF;
if ((t ^ free_list_gen_checksum(p.u ^ rack->cookie)) & (uintptr_t)0xF) {
free_list_checksum_botch(rack, ptr, (void *)ptr->u);
__builtin_trap();
}
return p.p;
}
#undef ANTI_NYBBLE
#undef NYBBLE
#pragma mark recirc helpers
static MALLOC_INLINE void
recirc_list_extract(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node)
{
if (NULL == node->prev) {
mag_ptr->firstNode = node->next;
} else {
node->prev->next = node->next;
}
if (NULL == node->next) {
mag_ptr->lastNode = node->prev;
} else {
node->next->prev = node->prev;
}
node->next = node->prev = NULL;
mag_ptr->recirculation_entries--;
}
static MALLOC_INLINE void
recirc_list_splice_last(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node)
{
if (NULL == mag_ptr->lastNode) {
mag_ptr->firstNode = node;
node->prev = NULL;
} else {
node->prev = mag_ptr->lastNode;
mag_ptr->lastNode->next = node;
}
mag_ptr->lastNode = node;
node->next = NULL;
node->recirc_suitable = FALSE;
mag_ptr->recirculation_entries++;
}
static MALLOC_INLINE void
recirc_list_splice_first(rack_t *rack, magazine_t *mag_ptr, region_trailer_t *node)
{
if (NULL == mag_ptr->firstNode) {
mag_ptr->lastNode = node;
node->next = NULL;
} else {
node->next = mag_ptr->firstNode;
mag_ptr->firstNode->prev = node;
}
mag_ptr->firstNode = node;
node->prev = NULL;
node->recirc_suitable = FALSE;
mag_ptr->recirculation_entries++;
}
#pragma mark region hash
static MALLOC_INLINE rgnhdl_t
hash_lookup_region_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r)
{
size_t index, hash_index;
rgnhdl_t entry;
if (!num_entries) {
return 0;
}
#if __LP64__
index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 11400714819323198549ULL) >> (64 - shift);
#else
index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 2654435761UL) >> (32 - shift);
#endif
do {
entry = regions + index;
if (*entry == 0) {
return 0;
}
if (*entry == r) {
return entry;
}
if (++index == num_entries) {
index = 0;
}
} while (index != hash_index);
return 0;
}
static void
hash_region_insert_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r)
{
size_t index, hash_index;
rgnhdl_t entry;
#if __LP64__
index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 11400714819323198549ULL) >> (64 - shift);
#else
index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 2654435761UL) >> (32 - shift);
#endif
do {
entry = regions + index;
if (*entry == HASHRING_OPEN_ENTRY || *entry == HASHRING_REGION_DEALLOCATED) {
*entry = r;
return;
}
if (++index == num_entries) {
index = 0;
}
} while (index != hash_index);
}
static region_t *
hash_regions_alloc_no_lock(size_t num_entries)
{
size_t size = num_entries * sizeof(region_t);
return mvm_allocate_pages(round_page_quanta(size), 0, 0, VM_MEMORY_MALLOC);
}
static MALLOC_INLINE region_t *
hash_regions_grow_no_lock(region_t *regions, size_t old_size, size_t *mutable_shift, size_t *new_size)
{
*new_size = old_size + old_size;
*mutable_shift = *mutable_shift + 1;
region_t *new_regions = hash_regions_alloc_no_lock(*new_size);
size_t index;
for (index = 0; index < old_size; ++index) {
region_t r = regions[index];
if (r != HASHRING_OPEN_ENTRY && r != HASHRING_REGION_DEALLOCATED) {
hash_region_insert_no_lock(new_regions, *new_size, *mutable_shift, r);
}
}
return new_regions;
}
#pragma mark mag index
extern unsigned int hyper_shift;
extern unsigned int phys_ncpus;
extern unsigned int logical_ncpus;
static MALLOC_INLINE MALLOC_ALWAYS_INLINE
unsigned int
mag_max_magazines(void)
{
return max_magazines;
}
static MALLOC_INLINE MALLOC_ALWAYS_INLINE
unsigned int
mag_max_medium_magazines(void)
{
return max_medium_magazines;
}
#pragma mark mag lock
static MALLOC_INLINE magazine_t *
mag_lock_zine_for_region_trailer(magazine_t *magazines, region_trailer_t *trailer, mag_index_t mag_index)
{
mag_index_t refreshed_index;
magazine_t *mag_ptr = &(magazines[mag_index]);
SZONE_MAGAZINE_PTR_LOCK(mag_ptr);
while (mag_index != (refreshed_index = trailer->mag_index)) {
SZONE_MAGAZINE_PTR_UNLOCK(mag_ptr);
mag_index = refreshed_index;
mag_ptr = &(magazines[mag_index]);
SZONE_MAGAZINE_PTR_LOCK(mag_ptr);
}
return mag_ptr;
}
#pragma mark Region Cookie
extern uint64_t malloc_entropy[2];
static uint32_t
region_cookie(void)
{
return (uint32_t)(malloc_entropy[0] >> 8) & 0xffff;
}
static MALLOC_INLINE void
region_check_cookie(region_t region, region_trailer_t *trailer)
{
if (trailer->region_cookie != region_cookie())
{
malloc_zone_error(MALLOC_ABORT_ON_ERROR, true,
"Region cookie corrupted for region %p (value is %x)\n",
region, trailer->region_cookie);
__builtin_unreachable();
}
}
static MALLOC_INLINE void
region_set_cookie(region_trailer_t *trailer)
{
trailer->region_cookie = region_cookie();
}
#pragma mark tiny allocator
static MALLOC_INLINE region_t
tiny_region_for_ptr_no_lock(rack_t *rack, const void *ptr)
{
rgnhdl_t r = hash_lookup_region_no_lock(rack->region_generation->hashed_regions,
rack->region_generation->num_regions_allocated,
rack->region_generation->num_regions_allocated_shift,
TINY_REGION_FOR_PTR(ptr));
return r ? *r : r;
}
static MALLOC_INLINE msize_t
get_tiny_free_size_offset(const void *ptr, off_t mapped_offset)
{
void *next_block = (void *)((uintptr_t)ptr + TINY_QUANTUM);
void *region_end = TINY_REGION_END(TINY_REGION_FOR_PTR(ptr));
if (next_block < region_end) {
uint32_t *next_header = (uint32_t *)
((char *)TINY_BLOCK_HEADER_FOR_PTR(next_block) + mapped_offset);
msize_t next_index = TINY_INDEX_FOR_PTR(next_block);
if (!BITARRAY_BIT(next_header, next_index)) {
return TINY_FREE_SIZE((uintptr_t)ptr + mapped_offset);
}
}
return 1;
}
static MALLOC_INLINE msize_t
get_tiny_free_size(const void *ptr)
{
return get_tiny_free_size_offset(ptr, 0);
}
static MALLOC_INLINE msize_t
get_tiny_meta_header_offset(const void *ptr, off_t mapped_offset,
boolean_t *is_free)
{
uint32_t *block_header;
msize_t index;
block_header = (uint32_t *)((char *)TINY_BLOCK_HEADER_FOR_PTR(ptr) + mapped_offset);
index = TINY_INDEX_FOR_PTR(ptr);
msize_t midx = (index >> 5) << 1;
uint32_t mask = 1 << (index & 31);
*is_free = 0;
if (0 == (block_header[midx] & mask)) { return 0;
}
if (0 == (block_header[midx + 1] & mask)) { *is_free = 1;
return get_tiny_free_size_offset(ptr, mapped_offset);
}
#if defined(__LP64__)
uint32_t *addr = ((uint32_t *)block_header) + ((index >> 5) << 1);
uint32_t bitidx = index & 31;
uint64_t word_lo = addr[0];
uint64_t word_mid = addr[2];
uint64_t word_hi = addr[4];
uint64_t word_lomid = (word_lo >> bitidx) | (word_mid << (32 - bitidx));
uint64_t word = bitidx ? word_lomid | (word_hi << (64 - bitidx)) : word_lomid;
uint32_t result = __builtin_ffsl(word >> 1);
#else
uint32_t *addr = ((uint32_t *)block_header) + ((index >> 5) << 1);
uint32_t bitidx = index & 31;
uint32_t word = bitidx ? (addr[0] >> bitidx) | (addr[2] << (32 - bitidx)) : addr[0];
uint32_t result = __builtin_ffs(word >> 1);
#endif
return result;
}
static MALLOC_INLINE msize_t
get_tiny_meta_header(const void *ptr, boolean_t *is_free)
{
return get_tiny_meta_header_offset(ptr, 0, is_free);
}
#if CONFIG_RECIRC_DEPOT
static MALLOC_INLINE boolean_t
tiny_region_below_recirc_threshold(region_t region)
{
region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(region);
return trailer->bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES);
}
static MALLOC_INLINE boolean_t
tiny_magazine_below_recirc_threshold(magazine_t *mag_ptr)
{
size_t a = mag_ptr->num_bytes_in_magazine; size_t u = mag_ptr->mag_num_bytes_in_objects;
return a - u > ((3 * TINY_REGION_PAYLOAD_BYTES) / 2)
&& u < DENSITY_THRESHOLD(a);
}
#endif // CONFIG_RECIRC_DEPOT
#pragma mark small allocator
static MALLOC_INLINE region_t
small_region_for_ptr_no_lock(rack_t *rack, const void *ptr)
{
rgnhdl_t r = hash_lookup_region_no_lock(rack->region_generation->hashed_regions,
rack->region_generation->num_regions_allocated, rack->region_generation->num_regions_allocated_shift,
SMALL_REGION_FOR_PTR(ptr));
return r ? *r : r;
}
#if CONFIG_RECIRC_DEPOT
static MALLOC_INLINE boolean_t
small_region_below_recirc_threshold(region_t region)
{
region_trailer_t *trailer = REGION_TRAILER_FOR_SMALL_REGION(region);
return trailer->bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES);
}
static MALLOC_INLINE boolean_t
small_magazine_below_recirc_threshold(magazine_t *mag_ptr)
{
size_t a = mag_ptr->num_bytes_in_magazine; size_t u = mag_ptr->mag_num_bytes_in_objects;
return a - u > ((3 * SMALL_REGION_PAYLOAD_BYTES) / 2)
&& u < DENSITY_THRESHOLD(a);
}
#endif // CONFIG_RECIRC_DEPOT
#pragma mark medium allocator
static MALLOC_INLINE boolean_t
medium_region_below_recirc_threshold(region_t region)
{
region_trailer_t *trailer = REGION_TRAILER_FOR_MEDIUM_REGION(region);
return trailer->bytes_used < DENSITY_THRESHOLD(MEDIUM_REGION_PAYLOAD_BYTES);
}
static MALLOC_INLINE region_t
medium_region_for_ptr_no_lock(rack_t *rack, const void *ptr)
{
rgnhdl_t r = hash_lookup_region_no_lock(rack->region_generation->hashed_regions,
rack->region_generation->num_regions_allocated, rack->region_generation->num_regions_allocated_shift,
MEDIUM_REGION_FOR_PTR(ptr));
return r ? *r : r;
}
#endif // __MAGAZINE_INLINE_H