#include <TargetConditionals.h>
#include "scalable_malloc.h"
#include "malloc_printf.h"
#include "_simple.h"
#include "magmallocProvider.h"
#include <pthread_internals.h>
#include <pthread.h>
#include <stdint.h>
#include <unistd.h>
#include <mach/vm_statistics.h>
#include <mach/mach_init.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/param.h>
#if defined(__i386__) || defined(__x86_64__)
#define __APPLE_API_PRIVATE
#include <machine/cpu_capabilities.h>
#define _COMM_PAGE_VERSION_REQD 9
#undef __APPLE_API_PRIVATE
#else
#include <sys/sysctl.h>
#endif
#include <libkern/OSAtomic.h>
#include <mach-o/dyld.h>
#include <mach-o/dyld_priv.h>
#include <crt_externs.h>
#include <mach/vm_param.h>
#include <sys/vmparam.h>
#include <CrashReporterClient.h>
#define DEBUG_MALLOC 0 // set to one to debug malloc itself
#define DEBUG_CLIENT 0 // set to one to debug malloc client
#define DEBUG_MADVISE 0
#define RELAXED_INVARIANT_CHECKS 1
#if DEBUG_MALLOC
#warning DEBUG_MALLOC ENABLED
# define INLINE
# define ALWAYSINLINE
# define CHECK_MAGAZINE_PTR_LOCKED(szone, mag_ptr, fun) \
do { \
if (__is_threaded && TRY_LOCK(mag_ptr->magazine_lock)) { \
malloc_printf("*** magazine_lock was not set %p in %s\n", \
mag_ptr->magazine_lock, fun); \
} \
} while (0)
#else
# define INLINE __inline__
# define ALWAYSINLINE __attribute__((always_inline))
# define CHECK_MAGAZINE_PTR_LOCKED(szone, mag_ptr, fun) {}
#endif
# define NOINLINE __attribute__((noinline))
#if defined(__i386__) || defined(__x86_64__)
#define CACHE_ALIGN __attribute__ ((aligned (128) ))
#elif defined(__ppc__) || defined(__ppc64__)
#define CACHE_ALIGN __attribute__ ((aligned (128) ))
#else
#define CACHE_ALIGN
#endif
#if !__LP64__
#define ASLR_INTERNAL 1
#endif
#define _vm_page_size vm_page_size
#define _vm_page_shift vm_page_shift
#define vm_page_size 4096
#define vm_page_shift 12
typedef unsigned short msize_t;
typedef union {
void *p;
uintptr_t u;
} ptr_union;
typedef struct {
ptr_union previous;
ptr_union next;
} free_list_t;
typedef unsigned int grain_t;
typedef int mag_index_t;
#define CHECK_REGIONS (1 << 31)
#define DISABLE_ASLR (1 << 30)
#define MAX_RECORDER_BUFFER 256
#define SHIFT_TINY_QUANTUM 4 // Required for AltiVec
#define TINY_QUANTUM (1 << SHIFT_TINY_QUANTUM)
#define FOLLOWING_TINY_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_TINY_QUANTUM))
#ifdef __LP64__
#define NUM_TINY_SLOTS 64 // number of slots for free-lists
#else
#define NUM_TINY_SLOTS 32 // number of slots for free-lists
#endif
#define NUM_TINY_BLOCKS 64520
#define SHIFT_TINY_CEIL_BLOCKS 16 // ceil(log2(NUM_TINY_BLOCKS))
#define NUM_TINY_CEIL_BLOCKS (1 << SHIFT_TINY_CEIL_BLOCKS)
#define TINY_BLOCKS_ALIGN (SHIFT_TINY_CEIL_BLOCKS + SHIFT_TINY_QUANTUM) // 20
#define TINY_ENTROPY_BITS 15
#define TINY_ENTROPY_MASK ((1 << TINY_ENTROPY_BITS) - 1)
#if TINY_ENTROPY_MASK + NUM_TINY_SLOTS > NUM_TINY_BLOCKS
#error Too many entropy bits for tiny region requested
#endif
#define CEIL_NUM_TINY_BLOCKS_WORDS (((NUM_TINY_BLOCKS + 31) & ~31) >> 5)
#define TINY_METADATA_SIZE (sizeof(region_trailer_t) + sizeof(tiny_header_inuse_pair_t) * CEIL_NUM_TINY_BLOCKS_WORDS)
#define TINY_REGION_SIZE \
((NUM_TINY_BLOCKS * TINY_QUANTUM + TINY_METADATA_SIZE + vm_page_size - 1) & ~ (vm_page_size - 1))
#define TINY_METADATA_START (NUM_TINY_BLOCKS * TINY_QUANTUM)
#define TINY_REGION_ADDRESS(region) ((void *)(region))
#define TINY_REGION_END(region) ((void *)(((uintptr_t)(region)) + (NUM_TINY_BLOCKS * TINY_QUANTUM)))
#define TINY_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << TINY_BLOCKS_ALIGN) - 1)))
#define TINY_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_TINY_QUANTUM)
#define TINY_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_TINY_QUANTUM)
#ifdef __LP64__
# define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[8])
#else
# define TINY_FREE_SIZE(ptr) (((msize_t *)(ptr))[4])
#endif
#define TINY_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
typedef uint32_t tiny_block_t[4];
typedef struct tiny_header_inuse_pair
{
uint32_t header;
uint32_t inuse;
} tiny_header_inuse_pair_t;
typedef struct region_trailer
{
struct region_trailer *prev;
struct region_trailer *next;
boolean_t recirc_suitable;
boolean_t failedREUSE;
volatile int pinned_to_depot;
unsigned bytes_used;
mag_index_t mag_index;
} region_trailer_t;
typedef struct tiny_region
{
tiny_block_t blocks[NUM_TINY_BLOCKS];
region_trailer_t trailer;
tiny_header_inuse_pair_t pairs[CEIL_NUM_TINY_BLOCKS_WORDS];
uint8_t pad[TINY_REGION_SIZE - (NUM_TINY_BLOCKS * sizeof(tiny_block_t)) - TINY_METADATA_SIZE];
} *tiny_region_t;
#define REGION_TRAILER_FOR_TINY_REGION(r) (&(((tiny_region_t)(r))->trailer))
#define MAGAZINE_INDEX_FOR_TINY_REGION(r) (REGION_TRAILER_FOR_TINY_REGION(r)->mag_index)
#define BYTES_USED_FOR_TINY_REGION(r) (REGION_TRAILER_FOR_TINY_REGION(r)->bytes_used)
#define TINY_BLOCK_HEADER_FOR_PTR(_p) ((void *)&(((tiny_region_t)TINY_REGION_FOR_PTR(_p))->pairs))
#define TINY_INUSE_FOR_HEADER(_h) ((void *)&(((tiny_header_inuse_pair_t *)(_h))->inuse))
#define TINY_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_TINY_QUANTUM) & (NUM_TINY_CEIL_BLOCKS - 1))
#define TINY_CACHE 1 // This governs a last-free cache of 1 that bypasses the free-list
#if ! TINY_CACHE
#warning TINY_CACHE turned off
#endif
#define TINY_REGION_PAYLOAD_BYTES (NUM_TINY_BLOCKS * TINY_QUANTUM)
#define SMALL_IS_FREE (1 << 15)
#define SHIFT_SMALL_QUANTUM (SHIFT_TINY_QUANTUM + 5) // 9
#define SMALL_QUANTUM (1 << SHIFT_SMALL_QUANTUM) // 512 bytes
#define FOLLOWING_SMALL_PTR(ptr,msize) (((unsigned char *)(ptr)) + ((msize) << SHIFT_SMALL_QUANTUM))
#define NUM_SMALL_SLOTS 32
#define NUM_SMALL_SLOTS_LARGEMEM 256
#define SMALL_BITMAP_WORDS 8
#define NUM_SMALL_BLOCKS 16320
#define SHIFT_SMALL_CEIL_BLOCKS 14 // ceil(log2(NUM_SMALL_BLOCKs))
#define NUM_SMALL_CEIL_BLOCKS (1 << SHIFT_SMALL_CEIL_BLOCKS)
#define SMALL_BLOCKS_ALIGN (SHIFT_SMALL_CEIL_BLOCKS + SHIFT_SMALL_QUANTUM) // 23
#define SMALL_ENTROPY_BITS 13
#define SMALL_ENTROPY_MASK ((1 << SMALL_ENTROPY_BITS) - 1)
#if SMALL_ENTROPY_MASK + NUM_SMALL_SLOTS > NUM_SMALL_BLOCKS
#error Too many entropy bits for small region requested
#endif
#define SMALL_METADATA_SIZE (sizeof(region_trailer_t) + NUM_SMALL_BLOCKS * sizeof(msize_t))
#define SMALL_REGION_SIZE \
((NUM_SMALL_BLOCKS * SMALL_QUANTUM + SMALL_METADATA_SIZE + vm_page_size - 1) & ~ (vm_page_size - 1))
#define SMALL_METADATA_START (NUM_SMALL_BLOCKS * SMALL_QUANTUM)
#define SMALL_REGION_ADDRESS(region) ((unsigned char *)region)
#define SMALL_REGION_END(region) (SMALL_REGION_ADDRESS(region) + (NUM_SMALL_BLOCKS * SMALL_QUANTUM))
#define SMALL_REGION_FOR_PTR(_p) ((void *)((uintptr_t)(_p) & ~((1 << SMALL_BLOCKS_ALIGN) - 1)))
#define SMALL_BYTES_FOR_MSIZE(_m) ((_m) << SHIFT_SMALL_QUANTUM)
#define SMALL_MSIZE_FOR_BYTES(_b) ((_b) >> SHIFT_SMALL_QUANTUM)
#define SMALL_PREVIOUS_MSIZE(ptr) ((msize_t *)(ptr))[-1]
typedef uint32_t small_block_t[SMALL_QUANTUM/sizeof(uint32_t)];
typedef struct small_region
{
small_block_t blocks[NUM_SMALL_BLOCKS];
region_trailer_t trailer;
msize_t small_meta_words[NUM_SMALL_BLOCKS];
uint8_t pad[SMALL_REGION_SIZE - (NUM_SMALL_BLOCKS * sizeof(small_block_t)) - SMALL_METADATA_SIZE];
} *small_region_t;
#define REGION_TRAILER_FOR_SMALL_REGION(r) (&(((small_region_t)(r))->trailer))
#define MAGAZINE_INDEX_FOR_SMALL_REGION(r) (REGION_TRAILER_FOR_SMALL_REGION(r)->mag_index)
#define BYTES_USED_FOR_SMALL_REGION(r) (REGION_TRAILER_FOR_SMALL_REGION(r)->bytes_used)
#define SMALL_META_HEADER_FOR_PTR(_p) (((small_region_t)SMALL_REGION_FOR_PTR(_p))->small_meta_words)
#define SMALL_META_INDEX_FOR_PTR(_p) (((uintptr_t)(_p) >> SHIFT_SMALL_QUANTUM) & (NUM_SMALL_CEIL_BLOCKS - 1))
#define SMALL_METADATA_FOR_PTR(_p) (SMALL_META_HEADER_FOR_PTR(_p) + SMALL_META_INDEX_FOR_PTR(_p))
#define SMALL_PTR_IS_FREE(_p) (*SMALL_METADATA_FOR_PTR(_p) & SMALL_IS_FREE)
#define SMALL_PTR_SIZE(_p) (*SMALL_METADATA_FOR_PTR(_p) & ~SMALL_IS_FREE)
#define SMALL_CACHE 1
#if !SMALL_CACHE
#warning SMALL_CACHE turned off
#endif
#define SMALL_REGION_PAYLOAD_BYTES (NUM_SMALL_BLOCKS * SMALL_QUANTUM)
#define LARGE_THRESHOLD (15 * 1024) // strictly above this use "large"
#define LARGE_THRESHOLD_LARGEMEM (127 * 1024)
#if (LARGE_THRESHOLD > NUM_SMALL_SLOTS * SMALL_QUANTUM)
#error LARGE_THRESHOLD should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM
#endif
#if (LARGE_THRESHOLD_LARGEMEM > NUM_SMALL_SLOTS_LARGEMEM * SMALL_QUANTUM)
#error LARGE_THRESHOLD_LARGEMEM should always be less than NUM_SMALL_SLOTS * SMALL_QUANTUM
#endif
#define VM_COPY_THRESHOLD (40 * 1024)
#define VM_COPY_THRESHOLD_LARGEMEM (128 * 1024)
typedef struct {
vm_address_t address;
vm_size_t size;
boolean_t did_madvise_reusable;
} large_entry_t;
#if !TARGET_OS_EMBEDDED
#define LARGE_CACHE 1
#else
#define LARGE_CACHE 0
#endif
#if !LARGE_CACHE
#warning LARGE_CACHE turned off
#endif
#if defined(__LP64__)
#define LARGE_ENTRY_CACHE_SIZE 16
#define LARGE_CACHE_SIZE_LIMIT ((vm_size_t)0x80000000)
#else
#define LARGE_ENTRY_CACHE_SIZE 8
#define LARGE_CACHE_SIZE_LIMIT ((vm_size_t)0x02000000)
#endif
#define LARGE_CACHE_SIZE_ENTRY_LIMIT (LARGE_CACHE_SIZE_LIMIT/LARGE_ENTRY_CACHE_SIZE)
#define SZONE_FLOTSAM_THRESHOLD_LOW (1024 * 512)
#define SZONE_FLOTSAM_THRESHOLD_HIGH (1024 * 1024)
typedef void * region_t;
typedef region_t * rgnhdl_t;
#define INITIAL_NUM_REGIONS_SHIFT 6 // log2(INITIAL_NUM_REGIONS)
#define INITIAL_NUM_REGIONS (1 << INITIAL_NUM_REGIONS_SHIFT) // Must be a power of 2!
#define HASHRING_OPEN_ENTRY ((region_t) 0) // Initial value and sentinel marking end of collision chain
#define HASHRING_REGION_DEALLOCATED ((region_t)-1) // Region at this slot reclaimed by OS
#define HASH_BLOCKS_ALIGN TINY_BLOCKS_ALIGN // MIN( TINY_BLOCKS_ALIGN, SMALL_BLOCKS_ALIGN, ... )
typedef struct region_hash_generation {
size_t num_regions_allocated;
size_t num_regions_allocated_shift; region_t *hashed_regions; struct region_hash_generation *nextgen;
} region_hash_generation_t;
typedef struct { pthread_lock_t magazine_lock CACHE_ALIGN;
volatile boolean_t alloc_underway;
void *mag_last_free; region_t mag_last_free_rgn;
free_list_t *mag_free_list[256]; unsigned mag_bitmap[8];
size_t mag_bytes_free_at_end;
size_t mag_bytes_free_at_start;
region_t mag_last_region;
unsigned mag_num_objects;
size_t mag_num_bytes_in_objects;
size_t num_bytes_in_magazine;
unsigned recirculation_entries;
region_trailer_t *firstNode;
region_trailer_t *lastNode;
#if __LP64__
uint64_t pad[48]; #else
uint32_t pad[12]; #endif
} magazine_t;
#define TINY_MAX_MAGAZINES 32
#define TINY_MAGAZINE_PAGED_SIZE \
(((sizeof(magazine_t) * (TINY_MAX_MAGAZINES + 1)) + vm_page_size - 1) &\
~ (vm_page_size - 1))
#define SMALL_MAX_MAGAZINES 32
#define SMALL_MAGAZINE_PAGED_SIZE \
(((sizeof(magazine_t) * (SMALL_MAX_MAGAZINES + 1)) + vm_page_size - 1) &\
~ (vm_page_size - 1))
#define DEPOT_MAGAZINE_INDEX -1
typedef struct szone_s { malloc_zone_t basic_zone; uint8_t pad[vm_page_size - sizeof(malloc_zone_t)];
pthread_key_t cpu_id_key; unsigned debug_flags;
void *log_address;
pthread_lock_t tiny_regions_lock CACHE_ALIGN;
size_t num_tiny_regions;
size_t num_tiny_regions_dealloc;
region_hash_generation_t *tiny_region_generation;
region_hash_generation_t trg[2];
int num_tiny_magazines;
unsigned num_tiny_magazines_mask;
int num_tiny_magazines_mask_shift;
magazine_t *tiny_magazines;
#if TARGET_OS_EMBEDDED
uintptr_t last_tiny_advise;
#endif
pthread_lock_t small_regions_lock CACHE_ALIGN;
size_t num_small_regions;
size_t num_small_regions_dealloc;
region_hash_generation_t *small_region_generation;
region_hash_generation_t srg[2];
unsigned num_small_slots;
int num_small_magazines;
unsigned num_small_magazines_mask;
int num_small_magazines_mask_shift;
magazine_t *small_magazines;
#if TARGET_OS_EMBEDDED
uintptr_t last_small_advise;
#endif
pthread_lock_t large_szone_lock CACHE_ALIGN; unsigned num_large_objects_in_use;
unsigned num_large_entries;
large_entry_t *large_entries; size_t num_bytes_in_large_objects;
#if LARGE_CACHE
int large_entry_cache_oldest;
int large_entry_cache_newest;
large_entry_t large_entry_cache[LARGE_ENTRY_CACHE_SIZE]; boolean_t large_legacy_reset_mprotect;
size_t large_entry_cache_reserve_bytes;
size_t large_entry_cache_reserve_limit;
size_t large_entry_cache_bytes; #endif
unsigned is_largemem;
unsigned large_threshold;
unsigned vm_copy_threshold;
uintptr_t cookie;
region_t initial_tiny_regions[INITIAL_NUM_REGIONS];
region_t initial_small_regions[INITIAL_NUM_REGIONS];
struct szone_s *helper_zone;
boolean_t flotsam_enabled;
} szone_t;
#define SZONE_PAGED_SIZE ((sizeof(szone_t) + vm_page_size - 1) & ~ (vm_page_size - 1))
#if DEBUG_MALLOC || DEBUG_CLIENT
static void szone_sleep(void);
#endif
__private_extern__ void malloc_error_break(void);
static NOINLINE void szone_error(szone_t *szone, int is_corruption, const char *msg, const void *ptr, const char *fmt, ...)
__printflike(5, 6);
static void protect(void *address, size_t size, unsigned protection, unsigned debug_flags);
static void *allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags,
int vm_page_label);
static void *allocate_pages_securely(szone_t *szone, size_t size, unsigned char align,
int vm_page_label);
static void deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags);
#if TARGET_OS_EMBEDDED
static int madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last);
#else
static int madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi);
#endif
static kern_return_t _szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr);
static INLINE mag_index_t mag_get_thread_index(szone_t *szone) ALWAYSINLINE;
static magazine_t *mag_lock_zine_for_region_trailer(szone_t *szone, magazine_t *magazines, region_trailer_t *trailer,
mag_index_t mag_index);
static INLINE rgnhdl_t hash_lookup_region_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r)
ALWAYSINLINE;
static void hash_region_insert_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r);
static region_t *hash_regions_alloc_no_lock(szone_t *szone, size_t num_entries);
static region_t *hash_regions_grow_no_lock(szone_t *szone, region_t *regions, size_t old_size,
size_t *mutable_shift, size_t *new_size);
static INLINE uintptr_t free_list_gen_checksum(uintptr_t ptr) ALWAYSINLINE;
static INLINE uintptr_t free_list_checksum_ptr(szone_t *szone, void *p) ALWAYSINLINE;
static INLINE void *free_list_unchecksum_ptr(szone_t *szone, ptr_union *ptr) ALWAYSINLINE;
static unsigned free_list_count(szone_t *szone, free_list_t *ptr);
static INLINE void recirc_list_extract(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) ALWAYSINLINE;
static INLINE void recirc_list_splice_last(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) ALWAYSINLINE;
static INLINE void recirc_list_splice_first(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node) ALWAYSINLINE;
static INLINE void BITARRAY_SET(uint32_t *bits, msize_t index) ALWAYSINLINE;
static INLINE void BITARRAY_CLR(uint32_t *bits, msize_t index) ALWAYSINLINE;
static INLINE boolean_t BITARRAY_BIT(uint32_t *bits, msize_t index) ALWAYSINLINE;
static msize_t get_tiny_free_size(const void *ptr);
static msize_t get_tiny_previous_free_msize(const void *ptr);
static INLINE msize_t get_tiny_meta_header(const void *ptr, boolean_t *is_free) ALWAYSINLINE;
static INLINE void set_tiny_meta_header_in_use(const void *ptr, msize_t msize) ALWAYSINLINE;
static INLINE void set_tiny_meta_header_in_use_1(const void *ptr) ALWAYSINLINE;
static INLINE void set_tiny_meta_header_middle(const void *ptr) ALWAYSINLINE;
static INLINE void set_tiny_meta_header_free(const void *ptr, msize_t msize) ALWAYSINLINE;
static INLINE boolean_t tiny_meta_header_is_free(const void *ptr) ALWAYSINLINE;
static INLINE void *tiny_previous_preceding_free(void *ptr, msize_t *prev_msize) ALWAYSINLINE;
static void tiny_free_list_add_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize);
static void tiny_free_list_remove_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize);
static INLINE region_t tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr) ALWAYSINLINE;
static void tiny_finalize_region(szone_t *szone, magazine_t *tiny_mag_ptr);
static int tiny_free_detach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r);
static size_t tiny_free_reattach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r);
static void tiny_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r);
static region_t tiny_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node);
static boolean_t tiny_free_do_recirc_to_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index);
static region_t tiny_find_msize_region(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize);
static boolean_t tiny_get_region_from_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize);
static INLINE boolean_t tiny_free_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, region_t region,
void *ptr, msize_t msize) ALWAYSINLINE;
static void *tiny_malloc_from_region_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index,
msize_t msize, void *fresh_region);
static boolean_t tiny_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size);
static boolean_t tiny_check_region(szone_t *szone, region_t region);
static kern_return_t tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone,
memory_reader_t reader, vm_range_recorder_t recorder);
static void *tiny_malloc_from_free_list(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index,
msize_t msize);
static INLINE void *tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) ALWAYSINLINE;
static INLINE void free_tiny(szone_t *szone, void *ptr, region_t tiny_region, size_t known_size) ALWAYSINLINE;
static void print_tiny_free_list(szone_t *szone);
static void print_tiny_region(boolean_t verbose, region_t region, size_t bytes_at_start, size_t bytes_at_end);
static boolean_t tiny_free_list_check(szone_t *szone, grain_t slot);
static INLINE void small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize) ALWAYSINLINE;
static INLINE void small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize) ALWAYSINLINE;
static INLINE void small_meta_header_set_middle(msize_t *meta_headers, msize_t index) ALWAYSINLINE;
static void small_free_list_add_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize);
static void small_free_list_remove_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize);
static INLINE region_t small_region_for_ptr_no_lock(szone_t *szone, const void *ptr) ALWAYSINLINE;
static void small_finalize_region(szone_t *szone, magazine_t *small_mag_ptr);
static int small_free_detach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r);
static size_t small_free_reattach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r);
static void small_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r);
static region_t small_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node);
static boolean_t small_free_do_recirc_to_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index);
static region_t small_find_msize_region(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize);
static boolean_t small_get_region_from_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize);
static INLINE boolean_t small_free_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, region_t region,
void *ptr, msize_t msize) ALWAYSINLINE;
static void *small_malloc_from_region_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index,
msize_t msize, void *fresh_region);
static boolean_t small_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size);
static boolean_t small_check_region(szone_t *szone, region_t region);
static kern_return_t small_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone,
memory_reader_t reader, vm_range_recorder_t recorder);
static void *small_malloc_from_free_list(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index,
msize_t msize);
static INLINE void *small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested) ALWAYSINLINE;
static INLINE void free_small(szone_t *szone, void *ptr, region_t small_region, size_t known_size) ALWAYSINLINE;
static void print_small_free_list(szone_t *szone);
static void print_small_region(szone_t *szone, boolean_t verbose, region_t region, size_t bytes_at_start, size_t bytes_at_end);
static boolean_t small_free_list_check(szone_t *szone, grain_t grain);
#if DEBUG_MALLOC
static void large_debug_print(szone_t *szone);
#endif
static large_entry_t *large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr);
static void large_entry_insert_no_lock(szone_t *szone, large_entry_t range);
static INLINE void large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry) ALWAYSINLINE;
static INLINE large_entry_t *large_entries_alloc_no_lock(szone_t *szone, unsigned num) ALWAYSINLINE;
static void large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num,
vm_range_t *range_to_deallocate);
static large_entry_t *large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate);
static vm_range_t large_entry_free_no_lock(szone_t *szone, large_entry_t *entry);
static NOINLINE kern_return_t large_in_use_enumerator(task_t task, void *context,
unsigned type_mask, vm_address_t large_entries_address,
unsigned num_entries, memory_reader_t reader,
vm_range_recorder_t recorder);
static void *large_malloc(szone_t *szone, size_t num_pages, unsigned char alignment, boolean_t cleared_requested);
static NOINLINE void free_large(szone_t *szone, void *ptr);
static INLINE int large_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size) ALWAYSINLINE;
static NOINLINE void szone_free(szone_t *szone, void *ptr);
static NOINLINE void *szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested);
static NOINLINE void *szone_malloc(szone_t *szone, size_t size);
static NOINLINE void *szone_calloc(szone_t *szone, size_t num_items, size_t size);
static NOINLINE void *szone_valloc(szone_t *szone, size_t size);
static NOINLINE size_t szone_size_try_large(szone_t *szone, const void *ptr);
static NOINLINE size_t szone_size(szone_t *szone, const void *ptr);
static NOINLINE void *szone_realloc(szone_t *szone, void *ptr, size_t new_size);
static NOINLINE void *szone_memalign(szone_t *szone, size_t alignment, size_t size);
static NOINLINE void szone_free_definite_size(szone_t *szone, void *ptr, size_t size);
static NOINLINE unsigned szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count);
static NOINLINE void szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count);
static void szone_destroy(szone_t *szone);
static NOINLINE size_t szone_good_size(szone_t *szone, size_t size);
static NOINLINE boolean_t szone_check_all(szone_t *szone, const char *function);
static boolean_t szone_check(szone_t *szone);
static kern_return_t szone_ptr_in_use_enumerator(task_t task, void *context,
unsigned type_mask, vm_address_t zone_address,
memory_reader_t reader, vm_range_recorder_t recorder);
static NOINLINE void szone_print(szone_t *szone, boolean_t verbose);
static void szone_log(malloc_zone_t *zone, void *log_address);
static void szone_force_lock(szone_t *szone);
static void szone_force_unlock(szone_t *szone);
static boolean_t szone_locked(szone_t *szone);
static void szone_statistics(szone_t *szone, malloc_statistics_t *stats);
static void purgeable_free(szone_t *szone, void *ptr);
static void *purgeable_malloc(szone_t *szone, size_t size);
static void *purgeable_calloc(szone_t *szone, size_t num_items, size_t size);
static void *purgeable_valloc(szone_t *szone, size_t size);
static size_t purgeable_size(szone_t *szone, const void *ptr);
static void *purgeable_realloc(szone_t *szone, void *ptr, size_t new_size);
static void *purgeable_memalign(szone_t *szone, size_t alignment, size_t size);
static void purgeable_free_definite_size(szone_t *szone, void *ptr, size_t size);
static unsigned purgeable_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count);
static void purgeable_batch_free(szone_t *szone, void **to_be_freed, unsigned count);
static void purgeable_destroy(szone_t *szone);
static size_t purgeable_good_size(szone_t *szone, size_t size);
static boolean_t purgeable_check(szone_t *szone);
static kern_return_t purgeable_ptr_in_use_enumerator(task_t task, void *context,
unsigned type_mask, vm_address_t zone_address,
memory_reader_t reader, vm_range_recorder_t recorder);
static void purgeable_print(szone_t *szone, boolean_t verbose);
static void purgeable_log(malloc_zone_t *zone, void *log_address);
static void purgeable_force_lock(szone_t *szone);
static void purgeable_force_unlock(szone_t *szone);
static boolean_t purgeable_locked(szone_t *szone);
static void purgeable_statistics(szone_t *szone, malloc_statistics_t *stats);
static void *frozen_malloc(szone_t *zone, size_t new_size);
static void *frozen_calloc(szone_t *zone, size_t num_items, size_t size);
static void *frozen_valloc(szone_t *zone, size_t new_size);
static void *frozen_realloc(szone_t *zone, void *ptr, size_t new_size);
static void frozen_free(szone_t *zone, void *ptr);
static void frozen_destroy(szone_t *zone);
static volatile uintptr_t entropic_address = 0;
static volatile uintptr_t entropic_limit = 0;
#define ENTROPIC_KABILLION 0x10000000
__private_extern__ uint64_t malloc_entropy[2];
#define SZONE_LOCK(szone) \
do { \
LOCK(szone->large_szone_lock); \
} while (0)
#define SZONE_UNLOCK(szone) \
do { \
UNLOCK(szone->large_szone_lock); \
} while (0)
#define SZONE_TRY_LOCK(szone) \
TRY_LOCK(szone->large_szone_lock);
#define SZONE_MAGAZINE_PTR_LOCK(szone, mag_ptr) \
do { \
LOCK(mag_ptr->magazine_lock); \
} while(0)
#define SZONE_MAGAZINE_PTR_UNLOCK(szone, mag_ptr) \
do { \
UNLOCK(mag_ptr->magazine_lock); \
} while(0)
#define SZONE_MAGAZINE_PTR_TRY_LOCK(szone, mag_ptr) \
TRY_LOCK(mag_ptr->magazine_lock);
#if DEBUG_MALLOC
# define LOG(szone,ptr) \
(szone->log_address && (((uintptr_t)szone->log_address == -1) || \
(szone->log_address == (void *)(ptr))))
#else
# define LOG(szone,ptr) 0
#endif
#if DEBUG_MALLOC || DEBUG_CLIENT
# define CHECK(szone,fun) \
if ((szone)->debug_flags & CHECK_REGIONS) \
szone_check_all(szone, fun)
#else
# define CHECK(szone,fun) \
do {} while (0)
#endif
static void
szone_sleep(void)
{
if (getenv("MallocErrorStop")) {
_malloc_printf(ASL_LEVEL_NOTICE, "*** sending SIGSTOP to help debug\n");
kill(getpid(), SIGSTOP);
} else if (getenv("MallocErrorSleep")) {
_malloc_printf(ASL_LEVEL_NOTICE, "*** sleeping to help debug\n");
sleep(3600); }
}
static NOINLINE void
szone_error(szone_t *szone, int is_corruption, const char *msg, const void *ptr, const char *fmt, ...)
{
va_list ap;
_SIMPLE_STRING b = _simple_salloc();
if (szone) SZONE_UNLOCK(szone); if (b) {
if (fmt) {
va_start(ap, fmt);
_simple_vsprintf(b, fmt, ap);
va_end(ap);
}
if (ptr) {
_simple_sprintf(b, "*** error for object %p: %s\n", ptr, msg);
} else {
_simple_sprintf(b, "*** error: %s\n", msg);
}
malloc_printf("%s*** set a breakpoint in malloc_error_break to debug\n", _simple_string(b));
} else {
if (fmt) {
va_start(ap, fmt);
_malloc_vprintf(MALLOC_PRINTF_NOLOG, fmt, ap);
va_end(ap);
}
if (ptr) {
_malloc_printf(MALLOC_PRINTF_NOLOG, "*** error for object %p: %s\n", ptr, msg);
} else {
_malloc_printf(MALLOC_PRINTF_NOLOG, "*** error: %s\n", msg);
}
_malloc_printf(MALLOC_PRINTF_NOLOG, "*** set a breakpoint in malloc_error_break to debug\n");
}
malloc_error_break();
#if DEBUG_MALLOC
szone_print(szone, 1);
#endif
szone_sleep();
if ((is_corruption && (szone->debug_flags & SCALABLE_MALLOC_ABORT_ON_CORRUPTION)) ||
(szone->debug_flags & SCALABLE_MALLOC_ABORT_ON_ERROR)) {
CRSetCrashLogMessage(b ? _simple_string(b) : msg);
abort();
} else if (b) {
_simple_sfree(b);
}
}
static void
protect(void *address, size_t size, unsigned protection, unsigned debug_flags)
{
kern_return_t err;
if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_PRELUDE)) {
err = mprotect((void *)((uintptr_t)address - vm_page_size), vm_page_size, protection);
if (err) {
malloc_printf("*** can't protect(%p) region for prelude guard page at %p\n",
protection,(uintptr_t)address - (1 << vm_page_shift));
}
}
if (!(debug_flags & SCALABLE_MALLOC_DONT_PROTECT_POSTLUDE)) {
err = mprotect((void *)((uintptr_t)address + size), vm_page_size, protection);
if (err) {
malloc_printf("*** can't protect(%p) region for postlude guard page at %p\n",
protection, (uintptr_t)address + size);
}
}
}
static void *
allocate_pages(szone_t *szone, size_t size, unsigned char align, unsigned debug_flags, int vm_page_label)
{
void *vm_addr;
uintptr_t addr = 0, aligned_address;
boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES;
boolean_t purgeable = debug_flags & SCALABLE_MALLOC_PURGEABLE;
size_t allocation_size = round_page(size);
size_t delta;
int alloc_flags = VM_MAKE_TAG(vm_page_label);
if (align) add_guard_pages = 0; if (!allocation_size) allocation_size = 1 << vm_page_shift;
if (add_guard_pages) allocation_size += 2 * (1 << vm_page_shift);
if (align) allocation_size += (size_t)1 << align;
if (purgeable) alloc_flags |= VM_FLAGS_PURGABLE;
if (allocation_size < size) return NULL;
vm_addr = mmap(0 ,
allocation_size ,
PROT_READ | PROT_WRITE ,
MAP_ANON | MAP_PRIVATE ,
alloc_flags ,
0 );
if ((uintptr_t)vm_addr == -1) {
szone_error(szone, 0, "can't allocate region", NULL, "*** mmap(size=%lu) failed (error code=%d)\n",
allocation_size, errno);
return NULL;
}
addr = (uintptr_t)vm_addr;
if (align) {
aligned_address = (addr + ((uintptr_t)1 << align) - 1) & ~ (((uintptr_t)1 << align) - 1);
if (aligned_address != addr) {
delta = aligned_address - addr;
if (munmap((void *)addr, delta) == -1)
malloc_printf("*** munmap unaligned header failed with %d\n", errno);
addr = aligned_address;
allocation_size -= delta;
}
if (allocation_size > size) {
if (munmap((void *)(addr + size), allocation_size - size) == -1)
malloc_printf("*** munmap unaligned footer failed with %d\n", errno);
}
}
if (add_guard_pages) {
addr += (uintptr_t)1 << vm_page_shift;
protect((void *)addr, size, PROT_NONE, debug_flags);
}
return (void *)addr;
}
static void *
allocate_pages_securely(szone_t *szone, size_t size, unsigned char align, int vm_page_label)
{
void *vm_addr;
uintptr_t addr, aligned_address;
size_t delta, allocation_size = MAX(round_page(size), vm_page_size);
int alloc_flags = VM_MAKE_TAG(vm_page_label);
if (szone->debug_flags & DISABLE_ASLR)
return allocate_pages(szone, size, align, 0, vm_page_label);
if (align)
allocation_size += (size_t)1 << align;
if (allocation_size < size) return NULL;
retry:
vm_addr = mmap((void *)entropic_address ,
allocation_size ,
PROT_READ | PROT_WRITE ,
MAP_ANON | MAP_PRIVATE ,
alloc_flags ,
0 );
if (MAP_FAILED == vm_addr) {
szone_error(szone, 0, "can't allocate region securely", NULL, "*** mmap(size=%lu) failed (error code=%d)\n",
size, errno);
return NULL;
}
addr = (uintptr_t)vm_addr;
if (addr + allocation_size > entropic_limit) { uintptr_t t = entropic_address;
uintptr_t u = t - ENTROPIC_KABILLION;
if (u < t) { munmap((void *)addr, allocation_size);
(void)__sync_bool_compare_and_swap(&entropic_address, t, u); goto retry;
}
}
if (addr < entropic_address) { uintptr_t t = entropic_address;
uintptr_t u = t - ENTROPIC_KABILLION;
if (u < t)
(void)__sync_bool_compare_and_swap(&entropic_address, t, u); }
if (align) {
aligned_address = (addr + ((uintptr_t)1 << align) - 1) & ~ (((uintptr_t)1 << align) - 1);
if (aligned_address != addr) {
delta = aligned_address - addr;
if (munmap((void *)addr, delta) == -1)
malloc_printf("*** munmap unaligned header failed with %d\n", errno);
addr = aligned_address;
allocation_size -= delta;
}
if (allocation_size > size) {
if (munmap((void *)(addr + size), allocation_size - size) == -1)
malloc_printf("*** munmap unaligned footer failed with %d\n", errno);
}
}
return (void *)addr;
}
static void
deallocate_pages(szone_t *szone, void *addr, size_t size, unsigned debug_flags)
{
int err;
boolean_t add_guard_pages = debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES;
if (add_guard_pages) {
addr = (void *)((uintptr_t)addr - (1 << vm_page_shift));
size += 2 * (1 << vm_page_shift);
}
err = munmap(addr, size);
if ((err == -1) && szone)
szone_error(szone, 0, "Can't deallocate_pages region", addr, NULL);
}
static int
#if TARGET_OS_EMBEDDED
madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi, uintptr_t *last)
#else
madvise_free_range(szone_t *szone, region_t r, uintptr_t pgLo, uintptr_t pgHi)
#endif
{
if (pgHi > pgLo) {
size_t len = pgHi - pgLo;
#if DEBUG_MALLOC
if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE)
memset((void *)pgLo, 0xed, len); #endif
#if TARGET_OS_EMBEDDED
if (last) {
if (*last == pgLo)
return 0;
*last = pgLo;
}
#endif
MAGMALLOC_MADVFREEREGION((void *)szone, (void *)r, (void *)pgLo, len); #if TARGET_OS_EMBEDDED
if (-1 == madvise((void *)pgLo, len, MADV_FREE)) {
#else
if (-1 == madvise((void *)pgLo, len, MADV_FREE_REUSABLE)) {
#endif
#if DEBUG_MADVISE
szone_error(szone, 0, "madvise_free_range madvise(..., MADV_FREE_REUSABLE) failed",
(void *)pgLo, "length=%d\n", len);
#endif
}
}
return 0;
}
static kern_return_t
_szone_default_reader(task_t task, vm_address_t address, vm_size_t size, void **ptr)
{
*ptr = (void *)address;
return 0;
}
#if __LP64__
#define HASH_SELF() \
((((uintptr_t)pthread_self()) >> vm_page_shift) * 11400714819323198549ULL) >> (64 - szone->num_tiny_magazines_mask_shift)
#else
#define HASH_SELF() \
((((uintptr_t)pthread_self()) >> vm_page_shift) * 2654435761UL) >> (32 - szone->num_tiny_magazines_mask_shift)
#endif
#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
static INLINE mag_index_t
mag_get_thread_index(szone_t *szone)
{
if (!__is_threaded)
return 0;
else
return cpu_number() & (TINY_MAX_MAGAZINES - 1);
}
#else
#warning deriving magazine index from pthread_self() [want processor number]
static INLINE mag_index_t
mag_get_thread_index(szone_t *szone)
{
if (!__is_threaded)
return 0;
else if ((pthread_key_t) -1 == szone->cpu_id_key) { return HASH_SELF();
} else {
mag_index_t idx = (mag_index_t)(intptr_t)pthread_getspecific(szone->cpu_id_key);
if (idx) {
return idx - 1;
} else {
idx = HASH_SELF();
pthread_setspecific(szone->cpu_id_key, (const void *)((uintptr_t)idx + 1));
return idx;
}
}
}
#endif
static magazine_t *
mag_lock_zine_for_region_trailer(szone_t *szone, magazine_t *magazines, region_trailer_t *trailer, mag_index_t mag_index)
{
mag_index_t refreshed_index;
magazine_t *mag_ptr = &(magazines[mag_index]);
SZONE_MAGAZINE_PTR_LOCK(szone, mag_ptr);
while (mag_index != (refreshed_index = trailer->mag_index)) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, mag_ptr);
mag_index = refreshed_index;
mag_ptr = &(magazines[mag_index]);
SZONE_MAGAZINE_PTR_LOCK(szone, mag_ptr);
}
return mag_ptr;
}
#pragma mark region hash
static INLINE rgnhdl_t
hash_lookup_region_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r) {
size_t index, hash_index;
rgnhdl_t entry;
if (!num_entries)
return 0;
#if __LP64__
index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 11400714819323198549ULL) >> (64 - shift);
#else
index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 2654435761UL) >> (32 - shift);
#endif
do {
entry = regions + index;
if (*entry == 0)
return 0;
if (*entry == r)
return entry;
if (++index == num_entries)
index = 0;
} while (index != hash_index);
return 0;
}
static void
hash_region_insert_no_lock(region_t *regions, size_t num_entries, size_t shift, region_t r) {
size_t index, hash_index;
rgnhdl_t entry;
#if __LP64__
index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 11400714819323198549ULL) >> (64 - shift);
#else
index = hash_index = (((uintptr_t)r >> HASH_BLOCKS_ALIGN) * 2654435761UL) >> (32 - shift);
#endif
do {
entry = regions + index;
if (*entry == HASHRING_OPEN_ENTRY || *entry == HASHRING_REGION_DEALLOCATED) {
*entry = r;
return;
}
if (++index == num_entries)
index = 0;
} while (index != hash_index);
}
static region_t *
hash_regions_alloc_no_lock(szone_t *szone, size_t num_entries)
{
size_t size = num_entries * sizeof(region_t);
return allocate_pages(szone, round_page(size), 0, 0, VM_MEMORY_MALLOC);
}
static region_t *
hash_regions_grow_no_lock(szone_t *szone, region_t *regions, size_t old_size, size_t *mutable_shift,
size_t *new_size)
{
*new_size = old_size + old_size;
*mutable_shift = *mutable_shift + 1;
region_t *new_regions = hash_regions_alloc_no_lock(szone, *new_size);
size_t index;
for (index = 0; index < old_size; ++index) {
region_t r = regions[index];
if (r != HASHRING_OPEN_ENTRY && r != HASHRING_REGION_DEALLOCATED)
hash_region_insert_no_lock(new_regions, *new_size, *mutable_shift, r);
}
return new_regions;
}
static NOINLINE void
free_list_checksum_botch(szone_t *szone, free_list_t *ptr)
{
szone_error(szone, 1, "incorrect checksum for freed object "
"- object was probably modified after being freed.", ptr, NULL);
}
static INLINE uintptr_t free_list_gen_checksum(uintptr_t ptr)
{
uint8_t chk;
chk = (unsigned char)(ptr >> 0);
chk += (unsigned char)(ptr >> 8);
chk += (unsigned char)(ptr >> 16);
chk += (unsigned char)(ptr >> 24);
#if __LP64__
chk += (unsigned char)(ptr >> 32);
chk += (unsigned char)(ptr >> 40);
chk += (unsigned char)(ptr >> 48);
chk += (unsigned char)(ptr >> 56);
#endif
return chk & (uintptr_t)0xF;
}
#define NYBBLE 4
#if __LP64__
#define ANTI_NYBBLE (64 - NYBBLE)
#else
#define ANTI_NYBBLE (32 - NYBBLE)
#endif
static INLINE uintptr_t
free_list_checksum_ptr(szone_t *szone, void *ptr)
{
uintptr_t p = (uintptr_t)ptr;
return (p >> NYBBLE) | (free_list_gen_checksum(p ^ szone->cookie) << ANTI_NYBBLE); }
static INLINE void *
free_list_unchecksum_ptr(szone_t *szone, ptr_union *ptr)
{
ptr_union p;
uintptr_t t = ptr->u;
t = (t << NYBBLE) | (t >> ANTI_NYBBLE); p.u = t & ~(uintptr_t)0xF;
if ((t & (uintptr_t)0xF) != free_list_gen_checksum(p.u ^ szone->cookie))
{
free_list_checksum_botch(szone, (free_list_t *)ptr);
return NULL;
}
return p.p;
}
#undef ANTI_NYBBLE
#undef NYBBLE
static unsigned
free_list_count(szone_t *szone, free_list_t *ptr)
{
unsigned count = 0;
while (ptr) {
count++;
ptr = free_list_unchecksum_ptr(szone, &ptr->next);
}
return count;
}
static INLINE void
recirc_list_extract(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node)
{
if (NULL == node->prev)
mag_ptr->firstNode = node->next;
else
node->prev->next = node->next;
if (NULL == node->next)
mag_ptr->lastNode = node->prev;
else
node->next->prev = node->prev;
mag_ptr->recirculation_entries--;
}
static INLINE void
recirc_list_splice_last(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node)
{
if (NULL == mag_ptr->lastNode) {
mag_ptr->firstNode = node;
node->prev = NULL;
} else {
node->prev = mag_ptr->lastNode;
mag_ptr->lastNode->next = node;
}
mag_ptr->lastNode = node;
node->next = NULL;
node->recirc_suitable = FALSE;
mag_ptr->recirculation_entries++;
}
static INLINE void
recirc_list_splice_first(szone_t *szone, magazine_t *mag_ptr, region_trailer_t *node)
{
if (NULL == mag_ptr->firstNode) {
mag_ptr->lastNode = node;
node->next = NULL;
} else {
node->next = mag_ptr->firstNode;
mag_ptr->firstNode->prev = node;
}
mag_ptr->firstNode = node;
node->prev = NULL;
node->recirc_suitable = FALSE;
mag_ptr->recirculation_entries++;
}
#if defined(__LP64__)
#define BITMAPV_SET(bitmap,slot) (bitmap[(slot) >> 5] |= 1 << ((slot) & 31))
#define BITMAPV_CLR(bitmap,slot) (bitmap[(slot) >> 5] &= ~ (1 << ((slot) & 31)))
#define BITMAPV_BIT(bitmap,slot) ((bitmap[(slot) >> 5] >> ((slot) & 31)) & 1)
#define BITMAPV_CTZ(bitmap) (__builtin_ctzl(bitmap))
#else
#define BITMAPV_SET(bitmap,slot) (bitmap[0] |= 1 << (slot))
#define BITMAPV_CLR(bitmap,slot) (bitmap[0] &= ~ (1 << (slot)))
#define BITMAPV_BIT(bitmap,slot) ((bitmap[0] >> (slot)) & 1)
#define BITMAPV_CTZ(bitmap) (__builtin_ctz(bitmap))
#endif
#define BITMAPN_SET(bitmap,slot) (bitmap[(slot) >> 5] |= 1 << ((slot) & 31))
#define BITMAPN_CLR(bitmap,slot) (bitmap[(slot) >> 5] &= ~ (1 << ((slot) & 31)))
#define BITMAPN_BIT(bitmap,slot) ((bitmap[(slot) >> 5] >> ((slot) & 31)) & 1)
#define BITMAP32_CTZ(bitmap) (__builtin_ctz(bitmap[0]))
static INLINE void
BITARRAY_SET(uint32_t *bits, msize_t index)
{
bits[(index >> 5) << 1] |= (1 << (index & 31));
}
static INLINE void
BITARRAY_CLR(uint32_t *bits, msize_t index)
{
bits[(index >> 5) << 1] &= ~(1 << (index & 31));
}
static INLINE boolean_t
BITARRAY_BIT(uint32_t *bits, msize_t index)
{
return ((bits[(index >> 5) << 1]) >> (index & 31)) & 1;
}
#if 0
static INLINE void bitarray_mclr(uint32_t *bits, unsigned start, unsigned end) ALWAYSINLINE;
static INLINE void
bitarray_mclr(uint32_t *bits, unsigned start, unsigned end)
{
uint32_t *addr = bits + ((start >> 5) << 1);
uint32_t span = end - start;
start = start & 31;
end = start + span;
if (end > 31) {
addr[0] &= (0xFFFFFFFFU >> (31 - start)) >> 1;
addr[2] &= (0xFFFFFFFFU << (end - 32));
} else {
unsigned mask = (0xFFFFFFFFU >> (31 - start)) >> 1;
mask |= (0xFFFFFFFFU << end);
addr[0] &= mask;
}
}
#endif
static msize_t
get_tiny_free_size(const void *ptr)
{
void *next_block = (void *)((uintptr_t)ptr + TINY_QUANTUM);
void *region_end = TINY_REGION_END(TINY_REGION_FOR_PTR(ptr));
if (next_block < region_end)
{
uint32_t *next_header = TINY_BLOCK_HEADER_FOR_PTR(next_block);
msize_t next_index = TINY_INDEX_FOR_PTR(next_block);
if (!BITARRAY_BIT(next_header, next_index))
return TINY_FREE_SIZE(ptr);
}
return 1;
}
static msize_t
get_tiny_previous_free_msize(const void *ptr)
{
if (ptr != TINY_REGION_FOR_PTR(ptr))
{
void *prev_block = (void *)((uintptr_t)ptr - TINY_QUANTUM);
uint32_t *prev_header = TINY_BLOCK_HEADER_FOR_PTR(prev_block);
msize_t prev_index = TINY_INDEX_FOR_PTR(prev_block);
if (BITARRAY_BIT(prev_header, prev_index))
return 1;
return TINY_PREVIOUS_MSIZE(ptr);
}
return 0;
}
static INLINE msize_t
get_tiny_meta_header(const void *ptr, boolean_t *is_free)
{
uint32_t *block_header;
msize_t index;
block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
index = TINY_INDEX_FOR_PTR(ptr);
msize_t midx = (index >> 5) << 1;
uint32_t mask = 1 << (index & 31);
*is_free = 0;
if (0 == (block_header[midx] & mask)) return 0;
if (0 == (block_header[midx + 1] & mask)) { *is_free = 1;
return get_tiny_free_size(ptr);
}
#if defined(__LP64__)
uint32_t *addr = ((uint32_t *)block_header) + ((index >> 5) << 1);
uint32_t bitidx = index & 31;
uint64_t word_lo = addr[0];
uint64_t word_mid = addr[2];
uint64_t word_hi = addr[4];
uint64_t word_lomid = (word_lo >> bitidx) | (word_mid << (32 - bitidx));
uint64_t word = bitidx ? word_lomid | (word_hi << (64 - bitidx)) : word_lomid;
uint32_t result = __builtin_ffsl(word >> 1);
#else
uint32_t *addr = ((uint32_t *)block_header) + ((index >> 5) << 1);
uint32_t bitidx = index & 31;
uint32_t word = bitidx ? (addr[0] >> bitidx) | (addr[2] << (32 - bitidx)) : addr[0];
uint32_t result = __builtin_ffs(word >> 1);
#endif
return result;
}
static INLINE void
set_tiny_meta_header_in_use(const void *ptr, msize_t msize)
{
uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
msize_t index = TINY_INDEX_FOR_PTR(ptr);
msize_t clr_msize = msize - 1;
msize_t midx = (index >> 5) << 1;
uint32_t val = (1 << (index & 31));
#if DEBUG_MALLOC
if (msize >= NUM_TINY_SLOTS)
malloc_printf("set_tiny_meta_header_in_use() invariant broken %p %d\n", ptr, msize);
if ((unsigned)index + (unsigned)msize > 0x10000)
malloc_printf("set_tiny_meta_header_in_use() invariant broken (2) %p %d\n", ptr, msize);
#endif
block_header[midx] |= val; block_header[midx + 1] |= val;
index++;
midx = (index >> 5) << 1;
unsigned start = index & 31;
unsigned end = start + clr_msize;
#if defined(__LP64__)
if (end > 63) {
unsigned mask0 = (0xFFFFFFFFU >> (31 - start)) >> 1;
unsigned mask1 = (0xFFFFFFFFU << (end - 64));
block_header[midx + 0] &= mask0; block_header[midx + 1] &= mask0; block_header[midx + 2] = 0; block_header[midx + 3] = 0; block_header[midx + 4] &= mask1; block_header[midx + 5] &= mask1; } else
#endif
if (end > 31) {
unsigned mask0 = (0xFFFFFFFFU >> (31 - start)) >> 1;
unsigned mask1 = (0xFFFFFFFFU << (end - 32));
block_header[midx + 0] &= mask0;
block_header[midx + 1] &= mask0;
block_header[midx + 2] &= mask1;
block_header[midx + 3] &= mask1;
} else {
unsigned mask = (0xFFFFFFFFU >> (31 - start)) >> 1;
mask |= (0xFFFFFFFFU << end);
block_header[midx + 0] &= mask;
block_header[midx + 1] &= mask;
}
index += clr_msize;
midx = (index >> 5) << 1;
val = (1 << (index & 31));
block_header[midx] |= val; #if DEBUG_MALLOC
{
boolean_t ff;
msize_t mf;
mf = get_tiny_meta_header(ptr, &ff);
if (msize != mf) {
malloc_printf("setting header for tiny in_use %p : %d\n", ptr, msize);
malloc_printf("reading header for tiny %p : %d %d\n", ptr, mf, ff);
}
}
#endif
}
static INLINE void
set_tiny_meta_header_in_use_1(const void *ptr) {
uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
msize_t index = TINY_INDEX_FOR_PTR(ptr);
msize_t midx = (index >> 5) << 1;
uint32_t val = (1 << (index & 31));
block_header[midx] |= val; block_header[midx + 1] |= val;
index++;
midx = (index >> 5) << 1;
val = (1 << (index & 31));
block_header[midx] |= val; }
static INLINE void
set_tiny_meta_header_middle(const void *ptr)
{
uint32_t *block_header;
uint32_t *in_use;
msize_t index;
block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
in_use = TINY_INUSE_FOR_HEADER(block_header);
index = TINY_INDEX_FOR_PTR(ptr);
BITARRAY_CLR(block_header, index);
BITARRAY_CLR(in_use, index);
}
static INLINE void
set_tiny_meta_header_free(const void *ptr, msize_t msize)
{
uint32_t *block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
msize_t index = TINY_INDEX_FOR_PTR(ptr);
msize_t midx = (index >> 5) << 1;
uint32_t val = (1 << (index & 31));
#if DEBUG_MALLOC
if ((unsigned)index + (unsigned)msize > 0x10000) {
malloc_printf("setting header for tiny free %p msize too large: %d\n", ptr, msize);
}
#endif
block_header[midx] |= val; block_header[midx + 1] &= ~val;
if (msize > 1) {
void *follower = FOLLOWING_TINY_PTR(ptr, msize);
TINY_PREVIOUS_MSIZE(follower) = msize;
TINY_FREE_SIZE(ptr) = msize;
}
if (msize == 0) {
TINY_FREE_SIZE(ptr) = msize;
}
#if DEBUG_MALLOC
boolean_t ff;
msize_t mf = get_tiny_meta_header(ptr, &ff);
if ((msize != mf) || !ff) {
malloc_printf("setting header for tiny free %p : %u\n", ptr, msize);
malloc_printf("reading header for tiny %p : %u %u\n", ptr, mf, ff);
}
#endif
}
static INLINE boolean_t
tiny_meta_header_is_free(const void *ptr)
{
uint32_t *block_header;
uint32_t *in_use;
msize_t index;
block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
in_use = TINY_INUSE_FOR_HEADER(block_header);
index = TINY_INDEX_FOR_PTR(ptr);
if (!BITARRAY_BIT(block_header, index))
return 0;
return !BITARRAY_BIT(in_use, index);
}
static INLINE void *
tiny_previous_preceding_free(void *ptr, msize_t *prev_msize)
{
uint32_t *block_header;
uint32_t *in_use;
msize_t index;
msize_t previous_msize;
msize_t previous_index;
void *previous_ptr;
block_header = TINY_BLOCK_HEADER_FOR_PTR(ptr);
in_use = TINY_INUSE_FOR_HEADER(block_header);
index = TINY_INDEX_FOR_PTR(ptr);
if (!index)
return NULL;
if ((previous_msize = get_tiny_previous_free_msize(ptr)) > index)
return NULL;
previous_index = index - previous_msize;
previous_ptr = (void *)((uintptr_t)TINY_REGION_FOR_PTR(ptr) + TINY_BYTES_FOR_MSIZE(previous_index));
if (!BITARRAY_BIT(block_header, previous_index))
return NULL;
if (BITARRAY_BIT(in_use, previous_index))
return NULL;
if (get_tiny_free_size(previous_ptr) != previous_msize)
return NULL;
*prev_msize = previous_msize;
return previous_ptr;
}
static void
tiny_free_list_add_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize)
{
grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1;
free_list_t *free_ptr = ptr;
free_list_t *free_head = tiny_mag_ptr->mag_free_list[slot];
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
}
if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) {
szone_error(szone, 1, "tiny_free_list_add_ptr: Unaligned ptr", ptr, NULL);
}
#endif
set_tiny_meta_header_free(ptr, msize);
if (free_head) {
#if DEBUG_MALLOC
if (free_list_unchecksum_ptr(szone, &free_head->previous)) {
szone_error(szone, 1, "tiny_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr,
"ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, (void *)free_head, free_head->previous.p);
}
if (! tiny_meta_header_is_free(free_head)) {
szone_error(szone, 1, "tiny_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr,
"ptr=%p slot=%d free_head=%p\n", ptr, slot, (void *)free_head);
}
#endif
free_head->previous.u = free_list_checksum_ptr(szone, free_ptr);
} else {
BITMAPV_SET(tiny_mag_ptr->mag_bitmap, slot);
}
free_ptr->previous.u = free_list_checksum_ptr(szone, NULL);
free_ptr->next.u = free_list_checksum_ptr(szone, free_head);
tiny_mag_ptr->mag_free_list[slot] = free_ptr;
}
static void
tiny_free_list_remove_ptr(szone_t *szone, magazine_t *tiny_mag_ptr, void *ptr, msize_t msize)
{
grain_t slot = (!msize || (msize >= NUM_TINY_SLOTS)) ? NUM_TINY_SLOTS - 1 : msize - 1;
free_list_t *free_ptr = ptr, *next, *previous;
next = free_list_unchecksum_ptr(szone, &free_ptr->next);
previous = free_list_unchecksum_ptr(szone, &free_ptr->previous);
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
}
#endif
if (!previous) {
#if DEBUG_MALLOC
if (tiny_mag_ptr->mag_free_list[slot] != ptr) {
szone_error(szone, 1, "tiny_free_list_remove_ptr: Internal invariant broken (tiny_mag_ptr->mag_free_list[slot])", ptr,
"ptr=%p slot=%d msize=%d tiny_mag_ptr->mag_free_list[slot]=%p\n",
ptr, slot, msize, (void *)tiny_mag_ptr->mag_free_list[slot]);
return;
}
#endif
tiny_mag_ptr->mag_free_list[slot] = next;
if (!next) BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot);
} else {
previous->next = free_ptr->next;
}
if (next) {
next->previous = free_ptr->previous;
}
}
static INLINE region_t
tiny_region_for_ptr_no_lock(szone_t *szone, const void *ptr)
{
rgnhdl_t r = hash_lookup_region_no_lock(szone->tiny_region_generation->hashed_regions,
szone->tiny_region_generation->num_regions_allocated,
szone->tiny_region_generation->num_regions_allocated_shift,
TINY_REGION_FOR_PTR(ptr));
return r ? *r : r;
}
static void
tiny_finalize_region(szone_t *szone, magazine_t *tiny_mag_ptr) {
void *last_block, *previous_block;
uint32_t *last_header;
msize_t last_msize, previous_msize, last_index;
if (tiny_mag_ptr->mag_bytes_free_at_end) {
last_block = (void *)
((uintptr_t)TINY_REGION_END(tiny_mag_ptr->mag_last_region) - tiny_mag_ptr->mag_bytes_free_at_end);
last_msize = TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end);
last_header = TINY_BLOCK_HEADER_FOR_PTR(last_block);
last_index = TINY_INDEX_FOR_PTR(last_block);
if (last_index != (NUM_TINY_BLOCKS - 1))
BITARRAY_CLR(last_header, (last_index + 1));
previous_block = tiny_previous_preceding_free(last_block, &previous_msize);
if (previous_block) {
set_tiny_meta_header_middle(last_block);
tiny_free_list_remove_ptr(szone, tiny_mag_ptr, previous_block, previous_msize);
last_block = previous_block;
last_msize += previous_msize;
}
tiny_free_list_add_ptr(szone, tiny_mag_ptr, last_block, last_msize);
tiny_mag_ptr->mag_bytes_free_at_end = 0;
}
#if ASLR_INTERNAL
if (tiny_mag_ptr->mag_bytes_free_at_start) {
last_block = TINY_REGION_ADDRESS(tiny_mag_ptr->mag_last_region);
last_msize = TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_start);
void *next_block = (void *) ((uintptr_t)last_block + tiny_mag_ptr->mag_bytes_free_at_start);
set_tiny_meta_header_middle((uintptr_t)next_block - TINY_QUANTUM);
if (tiny_meta_header_is_free(next_block)) {
msize_t next_msize = get_tiny_free_size(next_block);
set_tiny_meta_header_middle(next_block);
tiny_free_list_remove_ptr(szone, tiny_mag_ptr, next_block, next_msize);
last_msize += next_msize;
}
tiny_free_list_add_ptr(szone, tiny_mag_ptr, last_block, last_msize);
tiny_mag_ptr->mag_bytes_free_at_start = 0;
}
#endif
tiny_mag_ptr->mag_last_region = NULL;
}
static int
tiny_free_detach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r) {
uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r);
uintptr_t current = start;
uintptr_t limit = (uintptr_t)TINY_REGION_END(r);
boolean_t is_free;
msize_t msize;
int total_alloc = 0;
while (current < limit) {
msize = get_tiny_meta_header((void *)current, &is_free);
if (is_free && !msize && (current == start)) {
break;
}
if (!msize) {
#if DEBUG_MALLOC
malloc_printf("*** tiny_free_detach_region error with %p: msize=%d is_free =%d\n",
(void *)current, msize, is_free);
#endif
break;
}
if (is_free) {
tiny_free_list_remove_ptr(szone, tiny_mag_ptr, (void *)current, msize);
} else {
total_alloc++;
}
current += TINY_BYTES_FOR_MSIZE(msize);
}
return total_alloc;
}
static size_t
tiny_free_reattach_region(szone_t *szone, magazine_t *tiny_mag_ptr, region_t r) {
uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r);
uintptr_t current = start;
uintptr_t limit = (uintptr_t)TINY_REGION_END(r);
boolean_t is_free;
msize_t msize;
size_t total_alloc = 0;
while (current < limit) {
msize = get_tiny_meta_header((void *)current, &is_free);
if (is_free && !msize && (current == start)) {
break;
}
if (!msize) {
#if DEBUG_MALLOC
malloc_printf("*** tiny_free_reattach_region error with %p: msize=%d is_free =%d\n",
(void *)current, msize, is_free);
#endif
break;
}
if (is_free) {
tiny_free_list_add_ptr(szone, tiny_mag_ptr, (void *)current, msize);
} else {
total_alloc += TINY_BYTES_FOR_MSIZE(msize);
}
current += TINY_BYTES_FOR_MSIZE(msize);
}
return total_alloc;
}
typedef struct {
uint8_t pnum, size;
} tiny_pg_pair_t;
static void NOINLINE
tiny_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r) {
uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(r);
uintptr_t current = start;
uintptr_t limit = (uintptr_t)TINY_REGION_END(r);
boolean_t is_free;
msize_t msize;
tiny_pg_pair_t advisory[((TINY_REGION_PAYLOAD_BYTES + vm_page_size - 1) >> vm_page_shift) >> 1]; int advisories = 0;
while (current < limit) {
msize = get_tiny_meta_header((void *)current, &is_free);
if (is_free && !msize && (current == start)) {
#if DEBUG_MALLOC
malloc_printf("*** tiny_free_scan_madvise_free first block is all free! %p: msize=%d is_free =%d\n",
(void *)current, msize, is_free);
#endif
uintptr_t pgLo = round_page(start + sizeof(free_list_t) + sizeof(msize_t));
uintptr_t pgHi = trunc_page(start + TINY_REGION_SIZE - sizeof(msize_t));
if (pgLo < pgHi) {
advisory[advisories].pnum = (pgLo - start) >> vm_page_shift;
advisory[advisories].size = (pgHi - pgLo) >> vm_page_shift;
advisories++;
}
break;
}
if (!msize) {
#if DEBUG_MALLOC
malloc_printf("*** tiny_free_scan_madvise_free error with %p: msize=%d is_free =%d\n",
(void *)current, msize, is_free);
#endif
break;
}
if (is_free) {
uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t));
uintptr_t pgHi = trunc_page(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
if (pgLo < pgHi) {
advisory[advisories].pnum = (pgLo - start) >> vm_page_shift;
advisory[advisories].size = (pgHi - pgLo) >> vm_page_shift;
advisories++;
}
}
current += TINY_BYTES_FOR_MSIZE(msize);
}
if (advisories > 0) {
int i;
OSAtomicIncrement32Barrier(&(REGION_TRAILER_FOR_TINY_REGION(r)->pinned_to_depot));
SZONE_MAGAZINE_PTR_UNLOCK(szone, depot_ptr);
for (i = 0; i < advisories; ++i) {
uintptr_t addr = (advisory[i].pnum << vm_page_shift) + start;
size_t size = advisory[i].size << vm_page_shift;
#if TARGET_OS_EMBEDDED
madvise_free_range(szone, r, addr, addr + size, NULL);
#else
madvise_free_range(szone, r, addr, addr + size);
#endif
}
SZONE_MAGAZINE_PTR_LOCK(szone, depot_ptr);
OSAtomicDecrement32Barrier(&(REGION_TRAILER_FOR_TINY_REGION(r)->pinned_to_depot));
}
}
static region_t
tiny_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node)
{
if (0 < node->bytes_used ||
0 < node->pinned_to_depot ||
depot_ptr->recirculation_entries < (szone->num_tiny_magazines * 2)) {
return NULL;
}
recirc_list_extract(szone, depot_ptr, node);
region_t sparse_region = TINY_REGION_FOR_PTR(node);
int objects_in_use = tiny_free_detach_region(szone, depot_ptr, sparse_region);
if (0 == objects_in_use) {
rgnhdl_t pSlot = hash_lookup_region_no_lock(szone->tiny_region_generation->hashed_regions,
szone->tiny_region_generation->num_regions_allocated,
szone->tiny_region_generation->num_regions_allocated_shift, sparse_region);
if (NULL == pSlot) {
szone_error(szone, 1, "tiny_free_try_depot_unmap_no_lock hash lookup failed:", NULL, "%p\n", sparse_region);
return NULL;
}
*pSlot = HASHRING_REGION_DEALLOCATED;
depot_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES;
__sync_fetch_and_add( &(szone->num_tiny_regions_dealloc), 1);
MAGMALLOC_DEALLOCREGION((void *)szone, (void *)sparse_region, TINY_REGION_SIZE); return sparse_region;
} else {
szone_error(szone, 1, "tiny_free_try_depot_unmap_no_lock objects_in_use not zero:", NULL, "%d\n", objects_in_use);
return NULL;
}
}
static boolean_t
tiny_free_do_recirc_to_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index)
{
region_trailer_t *node = tiny_mag_ptr->firstNode;
while (node && !node->recirc_suitable) {
node = node->next;
}
if (NULL == node) {
#if DEBUG_MALLOC
malloc_printf("*** tiny_free_do_recirc_to_depot end of list\n");
#endif
return TRUE; }
region_t sparse_region = TINY_REGION_FOR_PTR(node);
if (sparse_region == tiny_mag_ptr->mag_last_region && (tiny_mag_ptr->mag_bytes_free_at_end || tiny_mag_ptr->mag_bytes_free_at_start)) {
tiny_finalize_region(szone, tiny_mag_ptr);
}
recirc_list_extract(szone, tiny_mag_ptr, node);
int objects_in_use = tiny_free_detach_region(szone, tiny_mag_ptr, sparse_region);
magazine_t *depot_ptr = &(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX]);
SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr);
MAGAZINE_INDEX_FOR_TINY_REGION(sparse_region) = DEPOT_MAGAZINE_INDEX;
node->pinned_to_depot = 0;
size_t bytes_inplay = tiny_free_reattach_region(szone, depot_ptr, sparse_region);
tiny_mag_ptr->mag_num_bytes_in_objects -= bytes_inplay;
tiny_mag_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES;
tiny_mag_ptr->mag_num_objects -= objects_in_use;
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
depot_ptr->mag_num_bytes_in_objects += bytes_inplay;
depot_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES;
depot_ptr->mag_num_objects += objects_in_use;
recirc_list_splice_last(szone, depot_ptr, node);
MAGMALLOC_RECIRCREGION((void *)szone, (int)mag_index, (void *)sparse_region, TINY_REGION_SIZE,
(int)BYTES_USED_FOR_TINY_REGION(sparse_region));
tiny_free_scan_madvise_free(szone, depot_ptr, sparse_region);
region_t r_dealloc = tiny_free_try_depot_unmap_no_lock(szone, depot_ptr, node);
SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
if (r_dealloc)
deallocate_pages(szone, r_dealloc, TINY_REGION_SIZE, 0);
return FALSE; }
static region_t
tiny_find_msize_region(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize)
{
free_list_t *ptr;
grain_t slot = msize - 1;
free_list_t **free_list = tiny_mag_ptr->mag_free_list;
free_list_t **the_slot = free_list + slot;
free_list_t **limit;
#if defined(__LP64__)
uint64_t bitmap;
#else
uint32_t bitmap;
#endif
CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__);
ptr = *the_slot;
if (ptr)
return TINY_REGION_FOR_PTR(ptr);
#if defined(__LP64__)
bitmap = ((uint64_t *)(tiny_mag_ptr->mag_bitmap))[0] & ~ ((1ULL << slot) - 1);
#else
bitmap = tiny_mag_ptr->mag_bitmap[0] & ~ ((1 << slot) - 1);
#endif
if (!bitmap)
return NULL;
slot = BITMAPV_CTZ(bitmap);
limit = free_list + NUM_TINY_SLOTS - 1;
free_list += slot;
if (free_list < limit) {
ptr = *free_list;
if (ptr)
return TINY_REGION_FOR_PTR(ptr);
else {
#if DEBUG_MALLOC
malloc_printf("in tiny_find_msize_region(), mag_bitmap out of sync, slot=%d\n",slot);
#endif
}
}
ptr = *limit;
if (ptr)
return TINY_REGION_FOR_PTR(ptr);
return NULL;
}
static boolean_t
tiny_get_region_from_depot(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize)
{
magazine_t *depot_ptr = &(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX]);
if (szone->num_tiny_magazines == 1) return 0;
#if DEBUG_MALLOC
if (DEPOT_MAGAZINE_INDEX == mag_index) {
szone_error(szone, 1, "tiny_get_region_from_depot called for magazine index -1", NULL, NULL);
return 0;
}
#endif
SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr);
region_trailer_t *node;
region_t sparse_region;
while (1) {
sparse_region = tiny_find_msize_region(szone, depot_ptr, DEPOT_MAGAZINE_INDEX, msize);
if (NULL == sparse_region) { SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
return 0;
}
node = REGION_TRAILER_FOR_TINY_REGION(sparse_region);
if (0 >= node->pinned_to_depot)
break;
SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
pthread_yield_np();
SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr);
}
recirc_list_extract(szone, depot_ptr, node);
int objects_in_use = tiny_free_detach_region(szone, depot_ptr, sparse_region);
MAGAZINE_INDEX_FOR_TINY_REGION(sparse_region) = mag_index;
node->pinned_to_depot = 0;
size_t bytes_inplay = tiny_free_reattach_region(szone, tiny_mag_ptr, sparse_region);
depot_ptr->mag_num_bytes_in_objects -= bytes_inplay;
depot_ptr->num_bytes_in_magazine -= TINY_REGION_PAYLOAD_BYTES;
depot_ptr->mag_num_objects -= objects_in_use;
tiny_mag_ptr->mag_num_bytes_in_objects += bytes_inplay;
tiny_mag_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES;
tiny_mag_ptr->mag_num_objects += objects_in_use;
recirc_list_splice_first(szone, tiny_mag_ptr, node);
SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
#if TARGET_OS_EMBEDDED
if (node->failedREUSE) {
#else
if (node->failedREUSE ||
-1 == madvise((void *)sparse_region, TINY_REGION_PAYLOAD_BYTES, MADV_FREE_REUSE)) {
#endif
#if DEBUG_MADVISE
szone_error(szone, 0, "tiny_get_region_from_depot madvise(..., MADV_FREE_REUSE) failed",
sparse_region, "length=%d\n", TINY_REGION_PAYLOAD_BYTES);
#endif
node->failedREUSE = TRUE;
}
MAGMALLOC_DEPOTREGION((void *)szone, (int)mag_index, (void *)sparse_region, TINY_REGION_SIZE,
(int)BYTES_USED_FOR_TINY_REGION(sparse_region));
return 1;
}
#define K 1.5 // headroom measured in number of 1Mb regions
#define DENSITY_THRESHOLD(a) \
((a) - ((a) >> 2))
static INLINE boolean_t
tiny_free_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, region_t region, void *ptr,
msize_t msize)
{
void *original_ptr = ptr;
size_t original_size = TINY_BYTES_FOR_MSIZE(msize);
void *next_block = ((unsigned char *)ptr + original_size);
msize_t previous_msize, next_msize;
void *previous;
free_list_t *big_free_block;
free_list_t *after_next_block;
free_list_t *before_next_block;
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
}
if (!msize) {
szone_error(szone, 1, "trying to free tiny block that is too small", ptr,
"in tiny_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
}
#endif
previous = tiny_previous_preceding_free(ptr, &previous_msize);
if (previous) {
#if DEBUG_MALLOC
if (LOG(szone, ptr) || LOG(szone,previous)) {
malloc_printf("in tiny_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous);
}
#endif
set_tiny_meta_header_middle(ptr);
tiny_free_list_remove_ptr(szone, tiny_mag_ptr, previous, previous_msize);
ptr = previous;
msize += previous_msize;
}
if ((next_block < TINY_REGION_END(region)) && tiny_meta_header_is_free(next_block)) {
next_msize = get_tiny_free_size(next_block);
#if DEBUG_MALLOC
if (LOG(szone, ptr) || LOG(szone, next_block)) {
malloc_printf("in tiny_free_no_lock(), for ptr=%p, msize=%d coalesced forward=%p next_msize=%d\n",
ptr, msize, next_block, next_msize);
}
#endif
if (next_msize >= NUM_TINY_SLOTS) {
msize += next_msize;
big_free_block = (free_list_t *)next_block;
after_next_block = free_list_unchecksum_ptr(szone, &big_free_block->next);
before_next_block = free_list_unchecksum_ptr(szone, &big_free_block->previous);
if (!before_next_block) {
tiny_mag_ptr->mag_free_list[NUM_TINY_SLOTS-1] = ptr;
} else {
before_next_block->next.u = free_list_checksum_ptr(szone, ptr);
}
if (after_next_block) {
after_next_block->previous.u = free_list_checksum_ptr(szone, ptr);
}
((free_list_t *)ptr)->previous = big_free_block->previous;
((free_list_t *)ptr)->next = big_free_block->next;
set_tiny_meta_header_middle(big_free_block);
set_tiny_meta_header_free(ptr, msize);
goto tiny_free_ending;
}
tiny_free_list_remove_ptr(szone, tiny_mag_ptr, next_block, next_msize);
set_tiny_meta_header_middle(next_block); msize += next_msize;
}
if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize && (msize >= TINY_QUANTUM))
memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize));
tiny_free_list_add_ptr(szone, tiny_mag_ptr, ptr, msize);
tiny_free_ending:
tiny_mag_ptr->mag_num_objects--;
tiny_mag_ptr->mag_num_bytes_in_objects -= original_size;
region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(region);
size_t bytes_used = node->bytes_used - original_size;
node->bytes_used = bytes_used;
#if !TARGET_OS_EMBEDDED // Always madvise for embedded platforms
if (szone->num_tiny_magazines == 1) {
} else if (DEPOT_MAGAZINE_INDEX != mag_index) {
if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) {
node->recirc_suitable = TRUE;
} else {
}
size_t a = tiny_mag_ptr->num_bytes_in_magazine; size_t u = tiny_mag_ptr->mag_num_bytes_in_objects;
if (a - u > ((3 * TINY_REGION_PAYLOAD_BYTES) / 2) && u < DENSITY_THRESHOLD(a)) {
return tiny_free_do_recirc_to_depot(szone, tiny_mag_ptr, mag_index);
}
} else {
#endif
uintptr_t safe_ptr = (uintptr_t)ptr + sizeof(free_list_t) + sizeof(msize_t);
uintptr_t round_safe = round_page(safe_ptr);
uintptr_t safe_extent = (uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t);
uintptr_t trunc_extent = trunc_page(safe_extent);
if (round_safe < trunc_extent) { uintptr_t lo = trunc_page((uintptr_t)original_ptr);
uintptr_t hi = round_page((uintptr_t)original_ptr + original_size);
tiny_free_list_remove_ptr(szone, tiny_mag_ptr, ptr, msize);
set_tiny_meta_header_in_use(ptr, msize);
OSAtomicIncrement32Barrier(&(node->pinned_to_depot));
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
#if TARGET_OS_EMBEDDED
madvise_free_range(szone, region, MAX(round_safe, lo), MIN(trunc_extent, hi), &szone->last_tiny_advise);
#else
madvise_free_range(szone, region, MAX(round_safe, lo), MIN(trunc_extent, hi));
#endif
SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
OSAtomicDecrement32Barrier(&(node->pinned_to_depot));
set_tiny_meta_header_free(ptr, msize);
tiny_free_list_add_ptr(szone, tiny_mag_ptr, ptr, msize);
}
#if !TARGET_OS_EMBEDDED
if (0 < bytes_used || 0 < node->pinned_to_depot) {
} else {
region_t r_dealloc = tiny_free_try_depot_unmap_no_lock(szone, tiny_mag_ptr, node);
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
if (r_dealloc)
deallocate_pages(szone, r_dealloc, TINY_REGION_SIZE, 0);
return FALSE; }
}
#endif
return TRUE; }
static void *
tiny_malloc_from_region_no_lock(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index,
msize_t msize, void * aligned_address)
{
void *ptr;
if (tiny_mag_ptr->mag_bytes_free_at_end || tiny_mag_ptr->mag_bytes_free_at_start)
tiny_finalize_region(szone, tiny_mag_ptr);
((tiny_region_t)aligned_address)->pairs[CEIL_NUM_TINY_BLOCKS_WORDS-1].header =
(NUM_TINY_BLOCKS & 31) ? (0xFFFFFFFFU << (NUM_TINY_BLOCKS & 31)) : 0;
((tiny_region_t)aligned_address)->pairs[CEIL_NUM_TINY_BLOCKS_WORDS-1].inuse = 0;
LOCK(szone->tiny_regions_lock);
if (szone->tiny_region_generation->num_regions_allocated < (2 * szone->num_tiny_regions)) {
region_t *new_regions;
size_t new_size;
size_t new_shift = szone->tiny_region_generation->num_regions_allocated_shift; new_regions = hash_regions_grow_no_lock(szone, szone->tiny_region_generation->hashed_regions,
szone->tiny_region_generation->num_regions_allocated,
&new_shift,
&new_size);
szone->tiny_region_generation->nextgen->hashed_regions = new_regions;
szone->tiny_region_generation->nextgen->num_regions_allocated = new_size;
szone->tiny_region_generation->nextgen->num_regions_allocated_shift = new_shift;
szone->tiny_region_generation = szone->tiny_region_generation->nextgen;
OSMemoryBarrier();
}
MAGAZINE_INDEX_FOR_TINY_REGION(aligned_address) = mag_index;
hash_region_insert_no_lock(szone->tiny_region_generation->hashed_regions,
szone->tiny_region_generation->num_regions_allocated,
szone->tiny_region_generation->num_regions_allocated_shift,
aligned_address);
szone->num_tiny_regions++;
UNLOCK(szone->tiny_regions_lock);
tiny_mag_ptr->mag_last_region = aligned_address;
BYTES_USED_FOR_TINY_REGION(aligned_address) = TINY_BYTES_FOR_MSIZE(msize);
#if ASLR_INTERNAL
int offset_msize = malloc_entropy[0] & TINY_ENTROPY_MASK;
#if DEBUG_MALLOC
if (getenv("MallocASLRForce")) offset_msize = strtol(getenv("MallocASLRForce"), NULL, 0) & TINY_ENTROPY_MASK;
if (getenv("MallocASLRPrint")) malloc_printf("Region: %p offset: %d\n", aligned_address, offset_msize);
#endif
#else
int offset_msize = 0;
#endif
ptr = (void *)((uintptr_t) aligned_address + TINY_BYTES_FOR_MSIZE(offset_msize));
set_tiny_meta_header_in_use(ptr, msize);
tiny_mag_ptr->mag_num_objects++;
tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(msize);
tiny_mag_ptr->num_bytes_in_magazine += TINY_REGION_PAYLOAD_BYTES;
set_tiny_meta_header_in_use_1((void *)((uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(msize)));
tiny_mag_ptr->mag_bytes_free_at_end = TINY_BYTES_FOR_MSIZE(NUM_TINY_BLOCKS - msize - offset_msize);
#if ASLR_INTERNAL
tiny_mag_ptr->mag_bytes_free_at_start = TINY_BYTES_FOR_MSIZE(offset_msize);
if (offset_msize) {
set_tiny_meta_header_in_use_1((void *)((uintptr_t)ptr - TINY_QUANTUM));
}
#else
tiny_mag_ptr->mag_bytes_free_at_start = 0;
#endif
recirc_list_splice_last(szone, tiny_mag_ptr, REGION_TRAILER_FOR_TINY_REGION(aligned_address));
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in tiny_malloc_from_region_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
}
#endif
return ptr;
}
static INLINE void *
tiny_try_shrink_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_good_size)
{
msize_t new_msize = TINY_MSIZE_FOR_BYTES(new_good_size);
msize_t mshrinkage = TINY_MSIZE_FOR_BYTES(old_size) - new_msize;
if (mshrinkage) {
void *q = (void *)((uintptr_t)ptr + TINY_BYTES_FOR_MSIZE(new_msize));
magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)),
MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)));
set_tiny_meta_header_in_use(q, mshrinkage);
tiny_mag_ptr->mag_num_objects++;
SZONE_MAGAZINE_PTR_UNLOCK(szone,tiny_mag_ptr);
szone_free(szone, q); }
return ptr;
}
static INLINE boolean_t
tiny_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size)
{
msize_t index;
msize_t old_msize;
unsigned next_index;
void *next_block;
boolean_t is_free;
msize_t next_msize, coalesced_msize, leftover_msize;
void *leftover;
index = TINY_INDEX_FOR_PTR(ptr);
old_msize = TINY_MSIZE_FOR_BYTES(old_size);
next_index = index + old_msize;
if (next_index >= NUM_TINY_BLOCKS) {
return 0;
}
next_block = (char *)ptr + old_size;
magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)),
MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr)));
is_free = tiny_meta_header_is_free(next_block);
if (!is_free) {
SZONE_MAGAZINE_PTR_UNLOCK(szone,tiny_mag_ptr);
return 0; }
next_msize = get_tiny_free_size(next_block);
if (old_size + TINY_BYTES_FOR_MSIZE(next_msize) < new_size) {
SZONE_MAGAZINE_PTR_UNLOCK(szone,tiny_mag_ptr);
return 0; }
tiny_free_list_remove_ptr(szone, tiny_mag_ptr, next_block, next_msize);
set_tiny_meta_header_middle(next_block); coalesced_msize = TINY_MSIZE_FOR_BYTES(new_size - old_size + TINY_QUANTUM - 1);
leftover_msize = next_msize - coalesced_msize;
if (leftover_msize) {
leftover = (void *)((uintptr_t)next_block + TINY_BYTES_FOR_MSIZE(coalesced_msize));
tiny_free_list_add_ptr(szone, tiny_mag_ptr, leftover, leftover_msize);
}
set_tiny_meta_header_in_use(ptr, old_msize + coalesced_msize);
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in tiny_try_realloc_in_place(), ptr=%p, msize=%d\n", ptr, old_msize + coalesced_msize);
}
#endif
tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(coalesced_msize);
region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
size_t bytes_used = node->bytes_used + TINY_BYTES_FOR_MSIZE(coalesced_msize);
node->bytes_used = bytes_used;
if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) {
} else {
node->recirc_suitable = FALSE;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone,tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
return 1;
}
static boolean_t
tiny_check_region(szone_t *szone, region_t region)
{
uintptr_t start, ptr, region_end;
boolean_t prev_free = 0;
boolean_t is_free;
msize_t msize;
free_list_t *free_head;
void *follower, *previous, *next;
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(region);
magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__);
start = (uintptr_t)TINY_REGION_ADDRESS(region);
ptr = start;
if (region == tiny_mag_ptr->mag_last_region) {
ptr += tiny_mag_ptr->mag_bytes_free_at_start;
if (tiny_mag_ptr->mag_bytes_free_at_start) {
msize = get_tiny_meta_header((void *)(ptr - TINY_QUANTUM), &is_free);
if (is_free || (msize != 1)) {
malloc_printf("*** invariant broken for leader block %p - %d %d\n", ptr - TINY_QUANTUM, msize, is_free);
}
}
}
region_end = (uintptr_t)TINY_REGION_END(region);
if (region == tiny_mag_ptr->mag_last_region)
region_end -= tiny_mag_ptr->mag_bytes_free_at_end;
while (ptr < region_end) {
msize = get_tiny_meta_header((void *)ptr, &is_free);
if (is_free && !msize && (ptr == start)) {
return 1;
}
if (!msize) {
malloc_printf("*** invariant broken for tiny block %p this msize=%d - size is too small\n",
ptr, msize);
return 0;
}
if (!is_free) {
prev_free = 0;
if (msize > (NUM_TINY_SLOTS - 1)) {
malloc_printf("*** invariant broken for %p this tiny msize=%d - size is too large\n",
ptr, msize);
return 0;
}
ptr += TINY_BYTES_FOR_MSIZE(msize);
} else {
#if !RELAXED_INVARIANT_CHECKS
if (prev_free) {
malloc_printf("*** invariant broken for free block %p this tiny msize=%d: two free blocks in a row\n",
ptr, msize);
return 0;
}
#endif // RELAXED_INVARIANT_CHECKS
prev_free = 1;
free_head = (free_list_t *)ptr;
previous = free_list_unchecksum_ptr(szone, &free_head->previous);
next = free_list_unchecksum_ptr(szone, &free_head->next);
if (previous && !tiny_meta_header_is_free(previous)) {
malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n",
ptr, previous);
return 0;
}
if (next && !tiny_meta_header_is_free(next)) {
malloc_printf("*** invariant broken for %p (next in free list %p is not a free pointer)\n",
ptr, next);
return 0;
}
follower = FOLLOWING_TINY_PTR(ptr, msize);
if (((uintptr_t)follower != region_end) && (get_tiny_previous_free_msize(follower) != msize)) {
malloc_printf("*** invariant broken for tiny free %p followed by %p in region [%p-%p] "
"(end marker incorrect) should be %d; in fact %d\n",
ptr, follower, TINY_REGION_ADDRESS(region), region_end, msize, get_tiny_previous_free_msize(follower));
return 0;
}
ptr = (uintptr_t)follower;
}
}
if (ptr != region_end) {
malloc_printf("*** invariant broken for region end %p - %p\n", ptr, region_end);
return 0;
}
if (region == tiny_mag_ptr->mag_last_region) {
if (tiny_mag_ptr->mag_bytes_free_at_end) {
msize = get_tiny_meta_header((void *)ptr, &is_free);
if (is_free || (msize != 1)) {
malloc_printf("*** invariant broken for blocker block %p - %d %d\n", ptr, msize, is_free);
}
}
}
return 1;
}
static kern_return_t
tiny_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone,
memory_reader_t reader, vm_range_recorder_t recorder)
{
size_t num_regions;
size_t index;
region_t *regions;
vm_range_t buffer[MAX_RECORDER_BUFFER];
unsigned count = 0;
kern_return_t err;
region_t region;
vm_range_t range;
vm_range_t admin_range;
vm_range_t ptr_range;
unsigned char *mapped_region;
uint32_t *block_header;
uint32_t *in_use;
unsigned block_index;
unsigned block_limit;
boolean_t is_free;
msize_t msize;
void *mapped_ptr;
unsigned bit;
magazine_t *tiny_mag_base = NULL;
region_hash_generation_t *trg_ptr;
err = reader(task, (vm_address_t)szone->tiny_region_generation, sizeof(region_hash_generation_t), (void **)&trg_ptr);
if (err) return err;
num_regions = trg_ptr->num_regions_allocated;
err = reader(task, (vm_address_t)trg_ptr->hashed_regions, sizeof(region_t) * num_regions, (void **)®ions);
if (err) return err;
if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
err = reader(task, (vm_address_t)(szone->tiny_magazines),
szone->num_tiny_magazines*sizeof(magazine_t),(void **)&tiny_mag_base);
if (err) return err;
}
for (index = 0; index < num_regions; ++index) {
region = regions[index];
if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) {
range.address = (vm_address_t)TINY_REGION_ADDRESS(region);
range.size = (vm_size_t)TINY_REGION_SIZE;
if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
admin_range.address = range.address + TINY_METADATA_START;
admin_range.size = TINY_METADATA_SIZE;
recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1);
}
if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) {
ptr_range.address = range.address;
ptr_range.size = NUM_TINY_BLOCKS * TINY_QUANTUM;
recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1);
}
if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
void *mag_last_free;
vm_address_t mag_last_free_ptr = 0;
msize_t mag_last_free_msize = 0;
err = reader(task, range.address, range.size, (void **)&mapped_region);
if (err)
return err;
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(mapped_region);
magazine_t *tiny_mag_ptr = tiny_mag_base + mag_index;
if (DEPOT_MAGAZINE_INDEX != mag_index) {
mag_last_free = tiny_mag_ptr->mag_last_free;
if (mag_last_free) {
mag_last_free_ptr = (uintptr_t) mag_last_free & ~(TINY_QUANTUM - 1);
mag_last_free_msize = (uintptr_t) mag_last_free & (TINY_QUANTUM - 1);
}
} else {
for (mag_index = 0; mag_index < szone->num_tiny_magazines; mag_index++) {
if ((void *)range.address == (tiny_mag_base + mag_index)->mag_last_free_rgn) {
mag_last_free = (tiny_mag_base + mag_index)->mag_last_free;
if (mag_last_free) {
mag_last_free_ptr = (uintptr_t) mag_last_free & ~(TINY_QUANTUM - 1);
mag_last_free_msize = (uintptr_t) mag_last_free & (TINY_QUANTUM - 1);
}
}
}
}
block_header = (uint32_t *)(mapped_region + TINY_METADATA_START + sizeof(region_trailer_t));
in_use = TINY_INUSE_FOR_HEADER(block_header);
block_index = 0;
block_limit = NUM_TINY_BLOCKS;
if (region == tiny_mag_ptr->mag_last_region) {
block_index += TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_start);
block_limit -= TINY_MSIZE_FOR_BYTES(tiny_mag_ptr->mag_bytes_free_at_end);
}
while (block_index < block_limit) {
vm_size_t block_offset = TINY_BYTES_FOR_MSIZE(block_index);
is_free = !BITARRAY_BIT(in_use, block_index);
if (is_free) {
mapped_ptr = mapped_region + block_offset;
if (!BITARRAY_BIT(block_header, (block_index+1)))
msize = TINY_FREE_SIZE(mapped_ptr);
else
msize = 1;
} else if (range.address + block_offset != mag_last_free_ptr) {
msize = 1;
bit = block_index + 1;
while (! BITARRAY_BIT(block_header, bit)) {
bit++;
msize ++;
}
buffer[count].address = range.address + block_offset;
buffer[count].size = TINY_BYTES_FOR_MSIZE(msize);
count++;
if (count >= MAX_RECORDER_BUFFER) {
recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
count = 0;
}
} else {
msize = mag_last_free_msize;
}
if (!msize)
return KERN_FAILURE;
block_index += msize;
}
if (count) {
recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
count = 0;
}
}
}
}
return 0;
}
static void *
tiny_malloc_from_free_list(szone_t *szone, magazine_t *tiny_mag_ptr, mag_index_t mag_index, msize_t msize)
{
free_list_t *ptr;
msize_t this_msize;
grain_t slot = msize - 1;
free_list_t **free_list = tiny_mag_ptr->mag_free_list;
free_list_t **the_slot = free_list + slot;
free_list_t *next;
free_list_t **limit;
#if defined(__LP64__)
uint64_t bitmap;
#else
uint32_t bitmap;
#endif
msize_t leftover_msize;
free_list_t *leftover_ptr;
CHECK_MAGAZINE_PTR_LOCKED(szone, tiny_mag_ptr, __PRETTY_FUNCTION__);
ptr = *the_slot;
if (ptr) {
next = free_list_unchecksum_ptr(szone, &ptr->next);
if (next) {
next->previous = ptr->previous;
} else {
BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot);
}
*the_slot = next;
this_msize = msize;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("in tiny_malloc_from_free_list(), exact match ptr=%p, this_msize=%d\n", ptr, this_msize);
}
#endif
goto return_tiny_alloc;
}
#if defined(__LP64__)
bitmap = ((uint64_t *)(tiny_mag_ptr->mag_bitmap))[0] & ~ ((1ULL << slot) - 1);
#else
bitmap = tiny_mag_ptr->mag_bitmap[0] & ~ ((1 << slot) - 1);
#endif
if (!bitmap)
goto try_tiny_malloc_from_end;
slot = BITMAPV_CTZ(bitmap);
limit = free_list + NUM_TINY_SLOTS - 1;
free_list += slot;
if (free_list < limit) {
ptr = *free_list;
if (ptr) {
next = free_list_unchecksum_ptr(szone, &ptr->next);
*free_list = next;
if (next) {
next->previous = ptr->previous;
} else {
BITMAPV_CLR(tiny_mag_ptr->mag_bitmap, slot);
}
this_msize = get_tiny_free_size(ptr);
goto add_leftover_and_proceed;
}
#if DEBUG_MALLOC
malloc_printf("in tiny_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n",slot);
#endif
}
ptr = *limit;
if (ptr) {
this_msize = get_tiny_free_size(ptr);
next = free_list_unchecksum_ptr(szone, &ptr->next);
if (this_msize - msize >= NUM_TINY_SLOTS) {
leftover_msize = this_msize - msize;
leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
*limit = leftover_ptr;
if (next) {
next->previous.u = free_list_checksum_ptr(szone, leftover_ptr);
}
leftover_ptr->previous = ptr->previous;
leftover_ptr->next = ptr->next;
set_tiny_meta_header_free(leftover_ptr, leftover_msize);
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in tiny_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n",
ptr, msize, this_msize);
}
#endif
this_msize = msize;
goto return_tiny_alloc;
}
if (next) {
next->previous = ptr->previous;
}
*limit = next;
goto add_leftover_and_proceed;
}
try_tiny_malloc_from_end:
if (tiny_mag_ptr->mag_bytes_free_at_end >= TINY_BYTES_FOR_MSIZE(msize)) {
ptr = (free_list_t *)((uintptr_t)TINY_REGION_END(tiny_mag_ptr->mag_last_region) -
tiny_mag_ptr->mag_bytes_free_at_end);
tiny_mag_ptr->mag_bytes_free_at_end -= TINY_BYTES_FOR_MSIZE(msize);
if (tiny_mag_ptr->mag_bytes_free_at_end) {
set_tiny_meta_header_in_use_1((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
}
this_msize = msize;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("in tiny_malloc_from_free_list(), from end ptr=%p, msize=%d\n", ptr, msize);
}
#endif
goto return_tiny_alloc;
}
#if ASLR_INTERNAL
if (tiny_mag_ptr->mag_bytes_free_at_start >= TINY_BYTES_FOR_MSIZE(msize)) {
ptr = (free_list_t *)(TINY_REGION_ADDRESS(tiny_mag_ptr->mag_last_region) +
tiny_mag_ptr->mag_bytes_free_at_start - TINY_BYTES_FOR_MSIZE(msize));
tiny_mag_ptr->mag_bytes_free_at_start -= TINY_BYTES_FOR_MSIZE(msize);
if (tiny_mag_ptr->mag_bytes_free_at_start) {
set_tiny_meta_header_in_use_1((unsigned char *)ptr - TINY_QUANTUM);
}
this_msize = msize;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("in tiny_malloc_from_free_list(), from start ptr=%p, msize=%d\n", ptr, msize);
}
#endif
goto return_tiny_alloc;
}
#endif
return NULL;
add_leftover_and_proceed:
if (!this_msize || (this_msize > msize)) {
leftover_msize = this_msize - msize;
leftover_ptr = (free_list_t *)((unsigned char *)ptr + TINY_BYTES_FOR_MSIZE(msize));
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in tiny_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize);
}
#endif
tiny_free_list_add_ptr(szone, tiny_mag_ptr, leftover_ptr, leftover_msize);
this_msize = msize;
}
return_tiny_alloc:
tiny_mag_ptr->mag_num_objects++;
tiny_mag_ptr->mag_num_bytes_in_objects += TINY_BYTES_FOR_MSIZE(this_msize);
region_trailer_t *node = REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
size_t bytes_used = node->bytes_used + TINY_BYTES_FOR_MSIZE(this_msize);
node->bytes_used = bytes_used;
if (bytes_used < DENSITY_THRESHOLD(TINY_REGION_PAYLOAD_BYTES)) {
} else {
node->recirc_suitable = FALSE;
}
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in tiny_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize);
}
#endif
if (this_msize > 1)
set_tiny_meta_header_in_use(ptr, this_msize);
else
set_tiny_meta_header_in_use_1(ptr);
return ptr;
}
#undef DENSITY_THRESHOLD
#undef K
static INLINE void *
tiny_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested)
{
void *ptr;
mag_index_t mag_index = mag_get_thread_index(szone);
magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
#if DEBUG_MALLOC
if (DEPOT_MAGAZINE_INDEX == mag_index) {
szone_error(szone, 1, "malloc called for magazine index -1", NULL, NULL);
return(NULL);
}
if (!msize) {
szone_error(szone, 1, "invariant broken (!msize) in allocation (region)", NULL, NULL);
return(NULL);
}
#endif
SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
#if TINY_CACHE
ptr = tiny_mag_ptr->mag_last_free;
if ((((uintptr_t)ptr) & (TINY_QUANTUM - 1)) == msize) {
tiny_mag_ptr->mag_last_free = NULL;
tiny_mag_ptr->mag_last_free_rgn = NULL;
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
ptr = (void *)((uintptr_t)ptr & ~ (TINY_QUANTUM - 1));
if (cleared_requested) {
memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
}
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in tiny_malloc_should_clear(), tiny cache ptr=%p, msize=%d\n", ptr, msize);
}
#endif
return ptr;
}
#endif
while (1) {
ptr = tiny_malloc_from_free_list(szone, tiny_mag_ptr, mag_index, msize);
if (ptr) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
if (cleared_requested) {
memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
}
return ptr;
}
if (tiny_get_region_from_depot(szone, tiny_mag_ptr, mag_index, msize)) {
ptr = tiny_malloc_from_free_list(szone, tiny_mag_ptr, mag_index, msize);
if (ptr) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
if (cleared_requested) {
memset(ptr, 0, TINY_BYTES_FOR_MSIZE(msize));
}
return ptr;
}
}
if (!tiny_mag_ptr->alloc_underway) {
void *fresh_region;
tiny_mag_ptr->alloc_underway = TRUE;
OSMemoryBarrier();
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
fresh_region = allocate_pages_securely(szone, TINY_REGION_SIZE, TINY_BLOCKS_ALIGN, VM_MEMORY_MALLOC_TINY);
SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
MAGMALLOC_ALLOCREGION((void *)szone, (int)mag_index, fresh_region, TINY_REGION_SIZE);
if (!fresh_region) { tiny_mag_ptr->alloc_underway = FALSE;
OSMemoryBarrier();
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
return NULL;
}
ptr = tiny_malloc_from_region_no_lock(szone, tiny_mag_ptr, mag_index, msize, fresh_region);
tiny_mag_ptr->alloc_underway = FALSE;
OSMemoryBarrier();
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
return ptr;
} else {
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
pthread_yield_np();
SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
}
}
}
static NOINLINE void
free_tiny_botch(szone_t *szone, free_list_t *ptr)
{
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
szone_error(szone, 1, "double free", ptr, NULL);
}
static INLINE void
free_tiny(szone_t *szone, void *ptr, region_t tiny_region, size_t known_size)
{
msize_t msize;
boolean_t is_free;
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region);
magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
if (known_size) {
msize = TINY_MSIZE_FOR_BYTES(known_size + TINY_QUANTUM - 1);
} else {
msize = get_tiny_meta_header(ptr, &is_free);
if (is_free) {
free_tiny_botch(szone, ptr);
return;
}
}
#if DEBUG_MALLOC
if (!msize) {
malloc_printf("*** free_tiny() block in use is too large: %p\n", ptr);
return;
}
#endif
SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
#if TINY_CACHE
if (DEPOT_MAGAZINE_INDEX != mag_index) {
if (msize < TINY_QUANTUM) { void *ptr2 = tiny_mag_ptr->mag_last_free; region_t rgn2 = tiny_mag_ptr->mag_last_free_rgn;
if (ptr == (void *)((uintptr_t)ptr2 & ~ (TINY_QUANTUM - 1))) {
free_tiny_botch(szone, ptr);
return;
}
if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize)
memset(ptr, 0x55, TINY_BYTES_FOR_MSIZE(msize));
tiny_mag_ptr->mag_last_free = (void *)(((uintptr_t)ptr) | msize);
tiny_mag_ptr->mag_last_free_rgn = tiny_region;
if (!ptr2) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
return;
}
msize = (uintptr_t)ptr2 & (TINY_QUANTUM - 1);
ptr = (void *)(((uintptr_t)ptr2) & ~(TINY_QUANTUM - 1));
tiny_region = rgn2;
}
}
#endif
region_trailer_t *trailer = REGION_TRAILER_FOR_TINY_REGION(tiny_region);
mag_index_t refreshed_index;
while (mag_index != (refreshed_index = trailer->mag_index)) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
mag_index = refreshed_index;
tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
}
if (tiny_free_no_lock(szone, tiny_mag_ptr, mag_index, tiny_region, ptr, msize))
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
}
static void
print_tiny_free_list(szone_t *szone)
{
free_list_t *ptr;
_SIMPLE_STRING b = _simple_salloc();
mag_index_t mag_index;
if (b) {
_simple_sappend(b, "tiny free sizes:\n");
for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
grain_t slot = 0;
_simple_sprintf(b,"\tMagazine %d: ", mag_index);
while (slot < NUM_TINY_SLOTS) {
ptr = szone->tiny_magazines[mag_index].mag_free_list[slot];
if (ptr) {
_simple_sprintf(b, "%s%y[%d]; ", (slot == NUM_TINY_SLOTS-1) ? ">=" : "",
(slot+1)*TINY_QUANTUM, free_list_count(szone, ptr));
}
slot++;
}
_simple_sappend(b,"\n");
}
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
_simple_sfree(b);
}
}
static void
print_tiny_region(boolean_t verbose, region_t region, size_t bytes_at_start, size_t bytes_at_end)
{
unsigned counts[1024];
unsigned in_use = 0;
uintptr_t start = (uintptr_t)TINY_REGION_ADDRESS(region);
uintptr_t current = start + bytes_at_end;
uintptr_t limit = (uintptr_t)TINY_REGION_END(region) - bytes_at_end;
boolean_t is_free;
msize_t msize;
unsigned ci;
_SIMPLE_STRING b;
uintptr_t pgTot = 0;
if (region == HASHRING_REGION_DEALLOCATED) {
if ((b = _simple_salloc()) != NULL) {
_simple_sprintf(b, "Tiny region [unknown address] was returned to the OS\n");
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
_simple_sfree(b);
}
return;
}
memset(counts, 0, sizeof(counts));
while (current < limit) {
msize = get_tiny_meta_header((void *)current, &is_free);
if (is_free & !msize && (current == start)) {
uintptr_t pgLo = round_page(start + sizeof(free_list_t) + sizeof(msize_t));
uintptr_t pgHi = trunc_page(start + TINY_REGION_SIZE - sizeof(msize_t));
if (pgLo < pgHi) {
pgTot += (pgHi - pgLo);
}
break;
}
if (!msize) {
malloc_printf("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize);
break;
}
if (!is_free) {
if (msize > NUM_TINY_SLOTS)
malloc_printf("*** error at %p msize for in_use is %d\n", (void *)current, msize);
if (msize < 1024)
counts[msize]++;
in_use++;
} else {
uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t));
uintptr_t pgHi = trunc_page(current + TINY_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
if (pgLo < pgHi) {
pgTot += (pgHi - pgLo);
}
}
current += TINY_BYTES_FOR_MSIZE(msize);
}
if ((b = _simple_salloc()) != NULL) {
_simple_sprintf(b, "Tiny region [%p-%p, %y] \t", (void *)start, TINY_REGION_END(region), (int)TINY_REGION_SIZE);
_simple_sprintf(b, "Magazine=%d \t", MAGAZINE_INDEX_FOR_TINY_REGION(region));
_simple_sprintf(b, "Allocations in use=%d \t Bytes in use=%ly \t", in_use, BYTES_USED_FOR_TINY_REGION(region));
if (bytes_at_end || bytes_at_start)
_simple_sprintf(b, "Untouched=%ly ", bytes_at_end + bytes_at_start);
if (DEPOT_MAGAZINE_INDEX == MAGAZINE_INDEX_FOR_TINY_REGION(region)) {
_simple_sprintf(b, "Advised MADV_FREE=%ly", pgTot);
} else {
_simple_sprintf(b, "Fragments subject to reclamation=%ly", pgTot);
}
if (verbose && in_use) {
_simple_sappend(b, "\n\tSizes in use: ");
for (ci = 0; ci < 1024; ci++)
if (counts[ci])
_simple_sprintf(b, "%d[%d] ", TINY_BYTES_FOR_MSIZE(ci), counts[ci]);
}
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
_simple_sfree(b);
}
}
static boolean_t
tiny_free_list_check(szone_t *szone, grain_t slot)
{
mag_index_t mag_index;
for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
unsigned count = 0;
free_list_t *ptr = szone->tiny_magazines[mag_index].mag_free_list[slot];
boolean_t is_free;
free_list_t *previous = NULL;
while (ptr) {
is_free = tiny_meta_header_is_free(ptr);
if (! is_free) {
malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
return 0;
}
if (((uintptr_t)ptr) & (TINY_QUANTUM - 1)) {
malloc_printf("*** unaligned ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
return 0;
}
if (!tiny_region_for_ptr_no_lock(szone, ptr)) {
malloc_printf("*** ptr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
return 0;
}
if (free_list_unchecksum_ptr(szone, &ptr->previous) != previous) {
malloc_printf("*** previous incorrectly set slot=%d count=%d ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
return 0;
}
previous = ptr;
ptr = free_list_unchecksum_ptr(szone, &ptr->next);
count++;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
}
return 1;
}
static INLINE void
small_meta_header_set_is_free(msize_t *meta_headers, unsigned index, msize_t msize)
{
meta_headers[index] = msize | SMALL_IS_FREE;
}
static INLINE void
small_meta_header_set_in_use(msize_t *meta_headers, msize_t index, msize_t msize)
{
meta_headers[index] = msize;
}
static INLINE void
small_meta_header_set_middle(msize_t *meta_headers, msize_t index)
{
meta_headers[index] = 0;
}
static void
small_free_list_add_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize)
{
grain_t slot = (msize <= szone->num_small_slots) ? msize - 1 : szone->num_small_slots - 1;
free_list_t *free_ptr = ptr;
free_list_t *free_head = small_mag_ptr->mag_free_list[slot];
void *follower;
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
}
if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) {
szone_error(szone, 1, "small_free_list_add_ptr: Unaligned ptr", ptr, NULL);
}
#endif
small_meta_header_set_is_free(SMALL_META_HEADER_FOR_PTR(ptr), SMALL_META_INDEX_FOR_PTR(ptr), msize);
if (free_head) {
#if DEBUG_MALLOC
if (free_list_unchecksum_ptr(szone, &free_head->previous)) {
szone_error(szone, 1, "small_free_list_add_ptr: Internal invariant broken (free_head->previous)", ptr,
"ptr=%p slot=%d free_head=%p previous=%p\n", ptr, slot, (void *)free_head, free_head->previous.p);
}
if (!SMALL_PTR_IS_FREE(free_head)) {
szone_error(szone, 1, "small_free_list_add_ptr: Internal invariant broken (free_head is not a free pointer)", ptr,
"ptr=%p slot=%d free_head=%p\n", ptr, slot, (void *)free_head);
}
#endif
free_head->previous.u = free_list_checksum_ptr(szone, free_ptr);
} else {
BITMAPN_SET(small_mag_ptr->mag_bitmap, slot);
}
free_ptr->previous.u = free_list_checksum_ptr(szone, NULL);
free_ptr->next.u = free_list_checksum_ptr(szone, free_head);
small_mag_ptr->mag_free_list[slot] = free_ptr;
follower = (void *)((uintptr_t)ptr + SMALL_BYTES_FOR_MSIZE(msize));
SMALL_PREVIOUS_MSIZE(follower) = msize;
}
static void
small_free_list_remove_ptr(szone_t *szone, magazine_t *small_mag_ptr, void *ptr, msize_t msize)
{
grain_t slot = (msize <= szone->num_small_slots) ? msize - 1 : szone->num_small_slots - 1;
free_list_t *free_ptr = ptr, *next, *previous;
next = free_list_unchecksum_ptr(szone, &free_ptr->next);
previous = free_list_unchecksum_ptr(szone, &free_ptr->previous);
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("In %s, ptr=%p, msize=%d\n", __FUNCTION__, ptr, msize);
}
#endif
if (!previous) {
#if DEBUG_MALLOC
if (small_mag_ptr->mag_free_list[slot] != ptr) {
szone_error(szone, 1, "small_free_list_remove_ptr: Internal invariant broken (small_mag_ptr->mag_free_list[slot])", ptr,
"ptr=%p slot=%d msize=%d small_mag_ptr->mag_free_list[slot]=%p\n",
ptr, slot, msize, (void *)small_mag_ptr->mag_free_list[slot]);
return;
}
#endif
small_mag_ptr->mag_free_list[slot] = next;
if (!next) BITMAPN_CLR(small_mag_ptr->mag_bitmap, slot);
} else {
previous->next = free_ptr->next;
}
if (next) {
next->previous = free_ptr->previous;
}
}
static INLINE region_t
small_region_for_ptr_no_lock(szone_t *szone, const void *ptr)
{
rgnhdl_t r = hash_lookup_region_no_lock(szone->small_region_generation->hashed_regions,
szone->small_region_generation->num_regions_allocated,
szone->small_region_generation->num_regions_allocated_shift,
SMALL_REGION_FOR_PTR(ptr));
return r ? *r : r;
}
static void
small_finalize_region(szone_t *szone, magazine_t *small_mag_ptr) {
void *last_block, *previous_block;
msize_t last_msize, previous_msize, last_index;
if (small_mag_ptr->mag_bytes_free_at_end) {
last_block = SMALL_REGION_END(small_mag_ptr->mag_last_region) - small_mag_ptr->mag_bytes_free_at_end;
last_msize = SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end);
last_index = SMALL_META_INDEX_FOR_PTR(last_block);
previous_msize = SMALL_PREVIOUS_MSIZE(last_block);
if (last_index && (previous_msize <= last_index)) {
previous_block = (void *)((uintptr_t)last_block - SMALL_BYTES_FOR_MSIZE(previous_msize));
if (*SMALL_METADATA_FOR_PTR(previous_block) == (previous_msize | SMALL_IS_FREE)) {
msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(last_block);
small_meta_header_set_middle(meta_headers, last_index);
small_free_list_remove_ptr(szone, small_mag_ptr, previous_block, previous_msize);
last_block = (void *)((uintptr_t)last_block - SMALL_BYTES_FOR_MSIZE(previous_msize));
last_msize += previous_msize;
}
}
small_free_list_add_ptr(szone, small_mag_ptr, last_block, last_msize);
small_mag_ptr->mag_bytes_free_at_end = 0;
}
#if ASLR_INTERNAL
if (small_mag_ptr->mag_bytes_free_at_start) {
last_block = SMALL_REGION_ADDRESS(small_mag_ptr->mag_last_region);
last_msize = SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_start);
void *next_block = (void *) ((uintptr_t)last_block + small_mag_ptr->mag_bytes_free_at_start);
if (SMALL_PTR_IS_FREE(next_block)) {
msize_t next_msize = SMALL_PTR_SIZE(next_block);
small_meta_header_set_middle(SMALL_META_HEADER_FOR_PTR(next_block), SMALL_META_INDEX_FOR_PTR(next_block));
small_free_list_remove_ptr(szone, small_mag_ptr, next_block, next_msize);
last_msize += next_msize;
}
small_free_list_add_ptr(szone, small_mag_ptr, last_block, last_msize);
small_mag_ptr->mag_bytes_free_at_start = 0;
}
#endif
small_mag_ptr->mag_last_region = NULL;
}
static int
small_free_detach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r) {
unsigned char *ptr = SMALL_REGION_ADDRESS(r);
msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r);
uintptr_t current = start;
uintptr_t limit = (uintptr_t)SMALL_REGION_END(r);
int total_alloc = 0;
while (current < limit) {
unsigned index = SMALL_META_INDEX_FOR_PTR(current);
msize_t msize_and_free = meta_headers[index];
boolean_t is_free = msize_and_free & SMALL_IS_FREE;
msize_t msize = msize_and_free & ~ SMALL_IS_FREE;
if (!msize) {
#if DEBUG_MALLOC
malloc_printf("*** small_free_detach_region error with %p: msize=%d is_free =%d\n",
(void *)current, msize, is_free);
#endif
break;
}
if (is_free) {
small_free_list_remove_ptr(szone, small_mag_ptr, (void *)current, msize);
} else {
total_alloc++;
}
current += SMALL_BYTES_FOR_MSIZE(msize);
}
return total_alloc;
}
static size_t
small_free_reattach_region(szone_t *szone, magazine_t *small_mag_ptr, region_t r) {
unsigned char *ptr = SMALL_REGION_ADDRESS(r);
msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r);
uintptr_t current = start;
uintptr_t limit = (uintptr_t)SMALL_REGION_END(r);
size_t total_alloc = 0;
while (current < limit) {
unsigned index = SMALL_META_INDEX_FOR_PTR(current);
msize_t msize_and_free = meta_headers[index];
boolean_t is_free = msize_and_free & SMALL_IS_FREE;
msize_t msize = msize_and_free & ~ SMALL_IS_FREE;
if (!msize) {
#if DEBUG_MALLOC
malloc_printf("*** small_free_reattach_region error with %p: msize=%d is_free =%d\n",
(void *)current, msize, is_free);
#endif
break;
}
if (is_free) {
small_free_list_add_ptr(szone, small_mag_ptr, (void *)current, msize);
} else {
total_alloc += SMALL_BYTES_FOR_MSIZE(msize);
}
current += SMALL_BYTES_FOR_MSIZE(msize);
}
return total_alloc;
}
typedef struct {
uint16_t pnum, size;
} small_pg_pair_t;
static void NOINLINE
small_free_scan_madvise_free(szone_t *szone, magazine_t *depot_ptr, region_t r) {
uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(r);
uintptr_t current = start;
uintptr_t limit = (uintptr_t)SMALL_REGION_END(r);
msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(start);
small_pg_pair_t advisory[((SMALL_REGION_PAYLOAD_BYTES + vm_page_size - 1) >> vm_page_shift) >> 1]; int advisories = 0;
while (current < limit) {
unsigned index = SMALL_META_INDEX_FOR_PTR(current);
msize_t msize_and_free = meta_headers[index];
boolean_t is_free = msize_and_free & SMALL_IS_FREE;
msize_t msize = msize_and_free & ~ SMALL_IS_FREE;
if (is_free && !msize && (current == start)) {
#if DEBUG_MALLOC
malloc_printf("*** small_free_scan_madvise_free first block is all free! %p: msize=%d is_free =%d\n",
(void *)current, msize, is_free);
#endif
uintptr_t pgLo = round_page(start + sizeof(free_list_t) + sizeof(msize_t));
uintptr_t pgHi = trunc_page(start + SMALL_REGION_SIZE - sizeof(msize_t));
if (pgLo < pgHi) {
advisory[advisories].pnum = (pgLo - start) >> vm_page_shift;
advisory[advisories].size = (pgHi - pgLo) >> vm_page_shift;
advisories++;
}
break;
}
if (!msize) {
#if DEBUG_MALLOC
malloc_printf("*** small_free_scan_madvise_free error with %p: msize=%d is_free =%d\n",
(void *)current, msize, is_free);
#endif
break;
}
if (is_free) {
uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t));
uintptr_t pgHi = trunc_page(current + SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
if (pgLo < pgHi) {
advisory[advisories].pnum = (pgLo - start) >> vm_page_shift;
advisory[advisories].size = (pgHi - pgLo) >> vm_page_shift;
advisories++;
}
}
current += SMALL_BYTES_FOR_MSIZE(msize);
}
if (advisories > 0) {
int i;
OSAtomicIncrement32Barrier(&(REGION_TRAILER_FOR_SMALL_REGION(r)->pinned_to_depot));
SZONE_MAGAZINE_PTR_UNLOCK(szone, depot_ptr);
for (i = 0; i < advisories; ++i) {
uintptr_t addr = (advisory[i].pnum << vm_page_shift) + start;
size_t size = advisory[i].size << vm_page_shift;
#if TARGET_OS_EMBEDDED
madvise_free_range(szone, r, addr, addr + size, NULL);
#else
madvise_free_range(szone, r, addr, addr + size);
#endif
}
SZONE_MAGAZINE_PTR_LOCK(szone, depot_ptr);
OSAtomicDecrement32Barrier(&(REGION_TRAILER_FOR_SMALL_REGION(r)->pinned_to_depot));
}
}
static region_t
small_free_try_depot_unmap_no_lock(szone_t *szone, magazine_t *depot_ptr, region_trailer_t *node)
{
if (0 < node->bytes_used ||
0 < node->pinned_to_depot ||
depot_ptr->recirculation_entries < (szone->num_small_magazines * 2)) {
return NULL;
}
recirc_list_extract(szone, depot_ptr, node);
region_t sparse_region = SMALL_REGION_FOR_PTR(node);
int objects_in_use = small_free_detach_region(szone, depot_ptr, sparse_region);
if (0 == objects_in_use) {
rgnhdl_t pSlot = hash_lookup_region_no_lock(szone->small_region_generation->hashed_regions,
szone->small_region_generation->num_regions_allocated,
szone->small_region_generation->num_regions_allocated_shift, sparse_region);
if (NULL == pSlot) {
szone_error(szone, 1, "small_free_try_depot_unmap_no_lock hash lookup failed:", NULL, "%p\n", sparse_region);
return NULL;
}
*pSlot = HASHRING_REGION_DEALLOCATED;
depot_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES;
__sync_fetch_and_add( &(szone->num_small_regions_dealloc), 1);
MAGMALLOC_DEALLOCREGION((void *)szone, (void *)sparse_region, SMALL_REGION_SIZE); return sparse_region;
} else {
szone_error(szone, 1, "small_free_try_depot_unmap_no_lock objects_in_use not zero:", NULL, "%d\n", objects_in_use);
return NULL;
}
}
static boolean_t
small_free_do_recirc_to_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index)
{
region_trailer_t *node = small_mag_ptr->firstNode;
while (node && !node->recirc_suitable) {
node = node->next;
}
if (NULL == node) {
#if DEBUG_MALLOC
malloc_printf("*** small_free_do_recirc_to_depot end of list\n");
#endif
return TRUE; }
region_t sparse_region = SMALL_REGION_FOR_PTR(node);
if (sparse_region == small_mag_ptr->mag_last_region && (small_mag_ptr->mag_bytes_free_at_end || small_mag_ptr->mag_bytes_free_at_start)) {
small_finalize_region(szone, small_mag_ptr);
}
recirc_list_extract(szone, small_mag_ptr, node);
int objects_in_use = small_free_detach_region(szone, small_mag_ptr, sparse_region);
magazine_t *depot_ptr = &(szone->small_magazines[DEPOT_MAGAZINE_INDEX]);
SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr);
MAGAZINE_INDEX_FOR_SMALL_REGION(sparse_region) = DEPOT_MAGAZINE_INDEX;
node->pinned_to_depot = 0;
size_t bytes_inplay = small_free_reattach_region(szone, depot_ptr, sparse_region);
small_mag_ptr->mag_num_bytes_in_objects -= bytes_inplay;
small_mag_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES;
small_mag_ptr->mag_num_objects -= objects_in_use;
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
depot_ptr->mag_num_bytes_in_objects += bytes_inplay;
depot_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES;
depot_ptr->mag_num_objects += objects_in_use;
recirc_list_splice_last(szone, depot_ptr, node);
MAGMALLOC_RECIRCREGION((void *)szone, (int)mag_index, (void *)sparse_region, SMALL_REGION_SIZE,
(int)BYTES_USED_FOR_SMALL_REGION(sparse_region));
small_free_scan_madvise_free(szone, depot_ptr, sparse_region);
region_t r_dealloc = small_free_try_depot_unmap_no_lock(szone, depot_ptr, node);
SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
if (r_dealloc)
deallocate_pages(szone, r_dealloc, SMALL_REGION_SIZE, 0);
return FALSE; }
static region_t
small_find_msize_region(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize)
{
free_list_t *ptr;
grain_t slot = (msize <= szone->num_small_slots) ? msize - 1 : szone->num_small_slots - 1;
free_list_t **free_list = small_mag_ptr->mag_free_list;
free_list_t **the_slot = free_list + slot;
free_list_t **limit;
unsigned bitmap;
CHECK_MAGAZINE_PTR_LOCKED(szone, small_mag_ptr, __PRETTY_FUNCTION__);
ptr = *the_slot;
if (ptr)
return SMALL_REGION_FOR_PTR(ptr);
if (szone->is_largemem) {
unsigned idx = slot >> 5;
bitmap = 0;
unsigned mask = ~ ((1 << (slot & 31)) - 1);
for ( ; idx < SMALL_BITMAP_WORDS; ++idx ) {
bitmap = small_mag_ptr->mag_bitmap[idx] & mask;
if (bitmap != 0)
break;
mask = ~0U;
}
if ((bitmap == 0) && (idx == SMALL_BITMAP_WORDS))
return NULL;
slot = BITMAP32_CTZ((&bitmap)) + (idx * 32);
} else {
bitmap = small_mag_ptr->mag_bitmap[0] & ~ ((1 << slot) - 1);
if (!bitmap)
return NULL;
slot = BITMAP32_CTZ((&bitmap));
}
limit = free_list + szone->num_small_slots - 1;
free_list += slot;
if (free_list < limit) {
ptr = *free_list;
if (ptr)
return SMALL_REGION_FOR_PTR(ptr);
else {
#if DEBUG_MALLOC
malloc_printf("in small_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n",slot);
#endif
}
}
ptr = *limit;
if (ptr)
return SMALL_REGION_FOR_PTR(ptr);
return NULL;
}
static boolean_t
small_get_region_from_depot(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize)
{
magazine_t *depot_ptr = &(szone->small_magazines[DEPOT_MAGAZINE_INDEX]);
if (szone->num_small_magazines == 1) return 0;
#if DEBUG_MALLOC
if (DEPOT_MAGAZINE_INDEX == mag_index) {
szone_error(szone, 1, "small_get_region_from_depot called for magazine index -1", NULL, NULL);
return 0;
}
#endif
SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr);
region_trailer_t *node;
region_t sparse_region;
while (1) {
sparse_region = small_find_msize_region(szone, depot_ptr, DEPOT_MAGAZINE_INDEX, msize);
if (NULL == sparse_region) { SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
return 0;
}
node = REGION_TRAILER_FOR_SMALL_REGION(sparse_region);
if (0 >= node->pinned_to_depot)
break;
SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
pthread_yield_np();
SZONE_MAGAZINE_PTR_LOCK(szone,depot_ptr);
}
recirc_list_extract(szone, depot_ptr, node);
int objects_in_use = small_free_detach_region(szone, depot_ptr, sparse_region);
MAGAZINE_INDEX_FOR_SMALL_REGION(sparse_region) = mag_index;
node->pinned_to_depot = 0;
size_t bytes_inplay = small_free_reattach_region(szone, small_mag_ptr, sparse_region);
depot_ptr->mag_num_bytes_in_objects -= bytes_inplay;
depot_ptr->num_bytes_in_magazine -= SMALL_REGION_PAYLOAD_BYTES;
depot_ptr->mag_num_objects -= objects_in_use;
small_mag_ptr->mag_num_bytes_in_objects += bytes_inplay;
small_mag_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES;
small_mag_ptr->mag_num_objects += objects_in_use;
recirc_list_splice_first(szone, small_mag_ptr, node);
SZONE_MAGAZINE_PTR_UNLOCK(szone,depot_ptr);
#if TARGET_OS_EMBEDDED
if (node->failedREUSE) {
#else
if (node->failedREUSE ||
-1 == madvise((void *)sparse_region, SMALL_REGION_PAYLOAD_BYTES, MADV_FREE_REUSE)) {
#endif
#if DEBUG_MADVISE
szone_error(szone, 0, "small_get_region_from_depot madvise(..., MADV_FREE_REUSE) failed",
sparse_region, "length=%d\n", SMALL_REGION_PAYLOAD_BYTES);
#endif
node->failedREUSE = TRUE;
}
MAGMALLOC_DEPOTREGION((void *)szone, (int)mag_index, (void *)sparse_region, SMALL_REGION_SIZE,
(int)BYTES_USED_FOR_SMALL_REGION(sparse_region));
return 1;
}
#define K 1.5 // headroom measured in number of 8Mb regions
#define DENSITY_THRESHOLD(a) \
((a) - ((a) >> 2))
static INLINE boolean_t
small_free_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, region_t region, void *ptr, msize_t msize)
{
msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
unsigned index = SMALL_META_INDEX_FOR_PTR(ptr);
void *original_ptr = ptr;
size_t original_size = SMALL_BYTES_FOR_MSIZE(msize);
unsigned char *next_block = ((unsigned char *)ptr + original_size);
msize_t next_index = index + msize;
msize_t previous_msize, next_msize;
void *previous;
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
}
if (!msize) {
szone_error(szone, 1, "trying to free small block that is too small", ptr,
"in small_free_no_lock(), ptr=%p, msize=%d\n", ptr, msize);
}
#endif
if (index && (SMALL_PREVIOUS_MSIZE(ptr) <= index)) {
previous_msize = SMALL_PREVIOUS_MSIZE(ptr);
if (meta_headers[index - previous_msize] == (previous_msize | SMALL_IS_FREE)) {
previous = (void *)((uintptr_t)ptr - SMALL_BYTES_FOR_MSIZE(previous_msize));
#if DEBUG_MALLOC
if (LOG(szone, ptr) || LOG(szone,previous)) {
malloc_printf("in small_free_no_lock(), coalesced backwards for %p previous=%p\n", ptr, previous);
}
#endif
small_free_list_remove_ptr(szone, small_mag_ptr, previous, previous_msize);
small_meta_header_set_middle(meta_headers, index);
ptr = previous;
msize += previous_msize;
index -= previous_msize;
}
}
if ((next_block < SMALL_REGION_END(region)) && (meta_headers[next_index] & SMALL_IS_FREE)) {
next_msize = meta_headers[next_index] & ~ SMALL_IS_FREE;
#if DEBUG_MALLOC
if (LOG(szone,ptr))
malloc_printf("In small_free_no_lock(), for ptr=%p, msize=%d coalesced next block=%p next_msize=%d\n",
ptr, msize, next_block, next_msize);
#endif
small_free_list_remove_ptr(szone, small_mag_ptr, next_block, next_msize);
small_meta_header_set_middle(meta_headers, next_index);
msize += next_msize;
}
if (szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) {
if (!msize) {
szone_error(szone, 1, "incorrect size information - block header was damaged", ptr, NULL);
} else {
memset(ptr, 0x55, SMALL_BYTES_FOR_MSIZE(msize));
}
}
small_free_list_add_ptr(szone, small_mag_ptr, ptr, msize);
small_mag_ptr->mag_num_objects--;
small_mag_ptr->mag_num_bytes_in_objects -= original_size;
region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(region);
size_t bytes_used = node->bytes_used - original_size;
node->bytes_used = bytes_used;
#if !TARGET_OS_EMBEDDED // Always madvise for embedded platforms
if (szone->num_small_magazines == 1) {
} else if (DEPOT_MAGAZINE_INDEX != mag_index) {
if (bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES)) {
node->recirc_suitable = TRUE;
} else {
}
size_t a = small_mag_ptr->num_bytes_in_magazine; size_t u = small_mag_ptr->mag_num_bytes_in_objects;
if (a - u > ((3 * SMALL_REGION_PAYLOAD_BYTES) / 2) && u < DENSITY_THRESHOLD(a)) {
return small_free_do_recirc_to_depot(szone, small_mag_ptr, mag_index);
}
} else {
#endif
uintptr_t safe_ptr = (uintptr_t)ptr + sizeof(free_list_t) + sizeof(msize_t);
uintptr_t round_safe = round_page(safe_ptr);
uintptr_t safe_extent = (uintptr_t)ptr + SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t);
uintptr_t trunc_extent = trunc_page(safe_extent);
if (round_safe < trunc_extent) { uintptr_t lo = trunc_page((uintptr_t)original_ptr);
uintptr_t hi = round_page((uintptr_t)original_ptr + original_size);
small_free_list_remove_ptr(szone, small_mag_ptr, ptr, msize);
small_meta_header_set_in_use(meta_headers, index, msize);
OSAtomicIncrement32Barrier(&(node->pinned_to_depot));
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
#if TARGET_OS_EMBEDDED
madvise_free_range(szone, region, MAX(round_safe, lo), MIN(trunc_extent, hi), &szone->last_small_advise);
#else
madvise_free_range(szone, region, MAX(round_safe, lo), MIN(trunc_extent, hi));
#endif
SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr);
OSAtomicDecrement32Barrier(&(node->pinned_to_depot));
small_meta_header_set_is_free(meta_headers, index, msize);
small_free_list_add_ptr(szone, small_mag_ptr, ptr, msize);
}
#if !TARGET_OS_EMBEDDED
if (0 < bytes_used || 0 < node->pinned_to_depot) {
} else {
region_t r_dealloc = small_free_try_depot_unmap_no_lock(szone, small_mag_ptr, node);
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
if (r_dealloc)
deallocate_pages(szone, r_dealloc, SMALL_REGION_SIZE, 0);
return FALSE; }
}
#endif
return TRUE; }
static void *
small_malloc_from_region_no_lock(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index,
msize_t msize, void *aligned_address)
{
void *ptr;
if (small_mag_ptr->mag_bytes_free_at_end || small_mag_ptr->mag_bytes_free_at_start)
small_finalize_region(szone, small_mag_ptr);
LOCK(szone->small_regions_lock);
if (szone->small_region_generation->num_regions_allocated < (2 * szone->num_small_regions)) {
region_t *new_regions;
size_t new_size;
size_t new_shift = szone->small_region_generation->num_regions_allocated_shift; new_regions = hash_regions_grow_no_lock(szone, szone->small_region_generation->hashed_regions,
szone->small_region_generation->num_regions_allocated,
&new_shift,
&new_size);
szone->small_region_generation->nextgen->hashed_regions = new_regions;
szone->small_region_generation->nextgen->num_regions_allocated = new_size;
szone->small_region_generation->nextgen->num_regions_allocated_shift = new_shift;
szone->small_region_generation = szone->small_region_generation->nextgen;
OSMemoryBarrier();
}
MAGAZINE_INDEX_FOR_SMALL_REGION(aligned_address) = mag_index;
hash_region_insert_no_lock(szone->small_region_generation->hashed_regions,
szone->small_region_generation->num_regions_allocated,
szone->small_region_generation->num_regions_allocated_shift,
aligned_address);
szone->num_small_regions++;
UNLOCK(szone->small_regions_lock);
small_mag_ptr->mag_last_region = aligned_address;
BYTES_USED_FOR_SMALL_REGION(aligned_address) = SMALL_BYTES_FOR_MSIZE(msize);
#if ASLR_INTERNAL
int offset_msize = malloc_entropy[1] & SMALL_ENTROPY_MASK;
#if DEBUG_MALLOC
if (getenv("MallocASLRForce")) offset_msize = strtol(getenv("MallocASLRForce"), NULL, 0) & SMALL_ENTROPY_MASK;
if (getenv("MallocASLRPrint")) malloc_printf("Region: %p offset: %d\n", aligned_address, offset_msize);
#endif
#else
int offset_msize = 0;
#endif
ptr = (void *)((uintptr_t) aligned_address + SMALL_BYTES_FOR_MSIZE(offset_msize));
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), offset_msize, msize);
small_mag_ptr->mag_num_objects++;
small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(msize);
small_mag_ptr->num_bytes_in_magazine += SMALL_REGION_PAYLOAD_BYTES;
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), offset_msize + msize, NUM_SMALL_BLOCKS - msize - offset_msize);
small_mag_ptr->mag_bytes_free_at_end = SMALL_BYTES_FOR_MSIZE(NUM_SMALL_BLOCKS - msize - offset_msize);
#if ASLR_INTERNAL
small_mag_ptr->mag_bytes_free_at_start = SMALL_BYTES_FOR_MSIZE(offset_msize);
if (offset_msize) {
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), 0, offset_msize);
}
#else
small_mag_ptr->mag_bytes_free_at_start = 0;
#endif
recirc_list_splice_last(szone, small_mag_ptr, REGION_TRAILER_FOR_SMALL_REGION(aligned_address));
return ptr;
}
static INLINE void *
small_try_shrink_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_good_size)
{
msize_t new_msize = SMALL_MSIZE_FOR_BYTES(new_good_size);
msize_t mshrinkage = SMALL_MSIZE_FOR_BYTES(old_size) - new_msize;
if (mshrinkage) {
void *q = (void *)((uintptr_t)ptr + SMALL_BYTES_FOR_MSIZE(new_msize));
magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines,
REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)),
MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)));
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), SMALL_META_INDEX_FOR_PTR(ptr), new_msize);
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(q), SMALL_META_INDEX_FOR_PTR(q), mshrinkage);
small_mag_ptr->mag_num_objects++;
SZONE_MAGAZINE_PTR_UNLOCK(szone,small_mag_ptr);
szone_free(szone, q); }
return ptr;
}
static INLINE boolean_t
small_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size)
{
msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
unsigned index;
msize_t old_msize, new_msize;
unsigned next_index;
void *next_block;
msize_t next_msize_and_free;
boolean_t is_free;
msize_t next_msize, leftover_msize;
void *leftover;
index = SMALL_META_INDEX_FOR_PTR(ptr);
old_msize = SMALL_MSIZE_FOR_BYTES(old_size);
new_msize = SMALL_MSIZE_FOR_BYTES(new_size + SMALL_QUANTUM - 1);
next_index = index + old_msize;
if (next_index >= NUM_SMALL_BLOCKS) {
return 0;
}
next_block = (char *)ptr + old_size;
#if DEBUG_MALLOC
if ((uintptr_t)next_block & (SMALL_QUANTUM - 1)) {
szone_error(szone, 1, "internal invariant broken in realloc(next_block)", next_block, NULL);
}
if (meta_headers[index] != old_msize)
malloc_printf("*** small_try_realloc_in_place incorrect old %d %d\n",
meta_headers[index], old_msize);
#endif
magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines,
REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)),
MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr)));
next_msize_and_free = meta_headers[next_index];
is_free = next_msize_and_free & SMALL_IS_FREE;
if (!is_free) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
return 0; }
next_msize = next_msize_and_free & ~ SMALL_IS_FREE;
if (old_msize + next_msize < new_msize) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
return 0; }
small_free_list_remove_ptr(szone, small_mag_ptr, next_block, next_msize);
small_meta_header_set_middle(meta_headers, next_index);
leftover_msize = old_msize + next_msize - new_msize;
if (leftover_msize) {
leftover = (unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(new_msize);
small_free_list_add_ptr(szone, small_mag_ptr, leftover, leftover_msize);
}
#if DEBUG_MALLOC
if (SMALL_BYTES_FOR_MSIZE(new_msize) > szone->large_threshold) {
malloc_printf("*** realloc in place for %p exceeded msize=%d\n", new_msize);
}
#endif
small_meta_header_set_in_use(meta_headers, index, new_msize);
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in small_try_realloc_in_place(), ptr=%p, msize=%d\n", ptr, *SMALL_METADATA_FOR_PTR(ptr));
}
#endif
small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(new_msize - old_msize);
region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
size_t bytes_used = node->bytes_used + SMALL_BYTES_FOR_MSIZE(new_msize - old_msize);
node->bytes_used = bytes_used;
if (bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES)) {
} else {
node->recirc_suitable = FALSE;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
return 1;
}
static boolean_t
small_check_region(szone_t *szone, region_t region)
{
unsigned char *ptr = SMALL_REGION_ADDRESS(region);
msize_t *meta_headers = SMALL_META_HEADER_FOR_PTR(ptr);
unsigned char *region_end = SMALL_REGION_END(region);
msize_t prev_free = 0;
unsigned index;
msize_t msize_and_free;
msize_t msize;
free_list_t *free_head;
void *previous, *next;
msize_t *follower;
mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
CHECK_MAGAZINE_PTR_LOCKED(szone, small_mag_ptr, __PRETTY_FUNCTION__);
if (region == small_mag_ptr->mag_last_region) {
ptr += small_mag_ptr->mag_bytes_free_at_start;
region_end -= small_mag_ptr->mag_bytes_free_at_end;
}
while (ptr < region_end) {
index = SMALL_META_INDEX_FOR_PTR(ptr);
msize_and_free = meta_headers[index];
if (!(msize_and_free & SMALL_IS_FREE)) {
msize = msize_and_free;
if (!msize) {
malloc_printf("*** invariant broken: null msize ptr=%p num_small_regions=%d end=%p\n",
ptr, szone->num_small_regions, region_end);
return 0;
}
#if !RELAXED_INVARIANT_CHECKS
if (SMALL_BYTES_FOR_MSIZE(msize) > szone->large_threshold) {
malloc_printf("*** invariant broken for %p this small msize=%d - size is too large\n",
ptr, msize_and_free);
return 0;
}
#endif // RELAXED_INVARIANT_CHECKS
ptr += SMALL_BYTES_FOR_MSIZE(msize);
prev_free = 0;
} else {
msize = msize_and_free & ~ SMALL_IS_FREE;
free_head = (free_list_t *)ptr;
follower = (msize_t *)FOLLOWING_SMALL_PTR(ptr, msize);
if (!msize) {
malloc_printf("*** invariant broken for free block %p this msize=%d\n", ptr, msize);
return 0;
}
#if !RELAXED_INVARIANT_CHECKS
if (prev_free) {
malloc_printf("*** invariant broken for %p (2 free in a row)\n", ptr);
return 0;
}
#endif
previous = free_list_unchecksum_ptr(szone, &free_head->previous);
next = free_list_unchecksum_ptr(szone, &free_head->next);
if (previous && !SMALL_PTR_IS_FREE(previous)) {
malloc_printf("*** invariant broken for %p (previous %p is not a free pointer)\n",
ptr, free_head->previous);
return 0;
}
if (next && !SMALL_PTR_IS_FREE(next)) {
malloc_printf("*** invariant broken for %p (next is not a free pointer)\n", ptr);
return 0;
}
if (SMALL_PREVIOUS_MSIZE(follower) != msize) {
malloc_printf("*** invariant broken for small free %p followed by %p in region [%p-%p] "
"(end marker incorrect) should be %d; in fact %d\n",
ptr, follower, SMALL_REGION_ADDRESS(region), region_end, msize, SMALL_PREVIOUS_MSIZE(follower));
return 0;
}
ptr = (unsigned char *)follower;
prev_free = SMALL_IS_FREE;
}
}
return 1;
}
static kern_return_t
small_in_use_enumerator(task_t task, void *context, unsigned type_mask, szone_t *szone,
memory_reader_t reader, vm_range_recorder_t recorder)
{
size_t num_regions;
size_t index;
region_t *regions;
vm_range_t buffer[MAX_RECORDER_BUFFER];
unsigned count = 0;
kern_return_t err;
region_t region;
vm_range_t range;
vm_range_t admin_range;
vm_range_t ptr_range;
unsigned char *mapped_region;
msize_t *block_header;
unsigned block_index;
unsigned block_limit;
msize_t msize_and_free;
msize_t msize;
magazine_t *small_mag_base = NULL;
region_hash_generation_t *srg_ptr;
err = reader(task, (vm_address_t)szone->small_region_generation, sizeof(region_hash_generation_t), (void **)&srg_ptr);
if (err) return err;
num_regions = srg_ptr->num_regions_allocated;
err = reader(task, (vm_address_t)srg_ptr->hashed_regions, sizeof(region_t) * num_regions, (void **)®ions);
if (err) return err;
if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
err = reader(task, (vm_address_t)(szone->small_magazines),
szone->num_small_magazines*sizeof(magazine_t),(void **)&small_mag_base);
if (err) return err;
}
for (index = 0; index < num_regions; ++index) {
region = regions[index];
if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) {
range.address = (vm_address_t)SMALL_REGION_ADDRESS(region);
range.size = SMALL_REGION_SIZE;
if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
admin_range.address = range.address + SMALL_METADATA_START;
admin_range.size = SMALL_METADATA_SIZE;
recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &admin_range, 1);
}
if (type_mask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) {
ptr_range.address = range.address;
ptr_range.size = NUM_SMALL_BLOCKS * SMALL_QUANTUM;
recorder(task, context, MALLOC_PTR_REGION_RANGE_TYPE, &ptr_range, 1);
}
if (type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE) {
void *mag_last_free;
vm_address_t mag_last_free_ptr = 0;
msize_t mag_last_free_msize = 0;
err = reader(task, range.address, range.size, (void **)&mapped_region);
if (err)
return err;
mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(mapped_region);
magazine_t *small_mag_ptr = small_mag_base + mag_index;
if (DEPOT_MAGAZINE_INDEX != mag_index) {
mag_last_free = small_mag_ptr->mag_last_free;
if (mag_last_free) {
mag_last_free_ptr = (uintptr_t) mag_last_free & ~(SMALL_QUANTUM - 1);
mag_last_free_msize = (uintptr_t) mag_last_free & (SMALL_QUANTUM - 1);
}
} else {
for (mag_index = 0; mag_index < szone->num_small_magazines; mag_index++) {
if ((void *)range.address == (small_mag_base + mag_index)->mag_last_free_rgn) {
mag_last_free = (small_mag_base + mag_index)->mag_last_free;
if (mag_last_free) {
mag_last_free_ptr = (uintptr_t) mag_last_free & ~(SMALL_QUANTUM - 1);
mag_last_free_msize = (uintptr_t) mag_last_free & (SMALL_QUANTUM - 1);
}
}
}
}
block_header = (msize_t *)(mapped_region + SMALL_METADATA_START + sizeof(region_trailer_t));
block_index = 0;
block_limit = NUM_SMALL_BLOCKS;
if (region == small_mag_ptr->mag_last_region) {
block_index += SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_start);
block_limit -= SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end);
}
while (block_index < block_limit) {
msize_and_free = block_header[block_index];
msize = msize_and_free & ~ SMALL_IS_FREE;
if (! (msize_and_free & SMALL_IS_FREE) &&
range.address + SMALL_BYTES_FOR_MSIZE(block_index) != mag_last_free_ptr) {
buffer[count].address = range.address + SMALL_BYTES_FOR_MSIZE(block_index);
buffer[count].size = SMALL_BYTES_FOR_MSIZE(msize);
count++;
if (count >= MAX_RECORDER_BUFFER) {
recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
count = 0;
}
}
if (!msize)
return KERN_FAILURE;
block_index += msize;
}
if (count) {
recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE, buffer, count);
count = 0;
}
}
}
}
return 0;
}
static void *
small_malloc_from_free_list(szone_t *szone, magazine_t *small_mag_ptr, mag_index_t mag_index, msize_t msize)
{
free_list_t *ptr;
msize_t this_msize;
grain_t slot = (msize <= szone->num_small_slots) ? msize - 1 : szone->num_small_slots - 1;
free_list_t **free_list = small_mag_ptr->mag_free_list;
free_list_t **the_slot = free_list + slot;
free_list_t *next;
free_list_t **limit;
unsigned bitmap;
msize_t leftover_msize;
free_list_t *leftover_ptr;
CHECK_MAGAZINE_PTR_LOCKED(szone, small_mag_ptr, __PRETTY_FUNCTION__);
ptr = *the_slot;
if (ptr) {
next = free_list_unchecksum_ptr(szone, &ptr->next);
if (next) {
next->previous = ptr->previous;
} else {
BITMAPN_CLR(small_mag_ptr->mag_bitmap, slot);
}
*the_slot = next;
this_msize = msize;
goto return_small_alloc;
}
if (szone->is_largemem) {
unsigned idx = slot >> 5;
bitmap = 0;
unsigned mask = ~ ((1 << (slot & 31)) - 1);
for ( ; idx < SMALL_BITMAP_WORDS; ++idx ) {
bitmap = small_mag_ptr->mag_bitmap[idx] & mask;
if (bitmap != 0)
break;
mask = ~0U;
}
if ((bitmap == 0) && (idx == SMALL_BITMAP_WORDS))
goto try_small_from_end;
slot = BITMAP32_CTZ((&bitmap)) + (idx * 32);
} else {
bitmap = small_mag_ptr->mag_bitmap[0] & ~ ((1 << slot) - 1);
if (!bitmap)
goto try_small_from_end;
slot = BITMAP32_CTZ((&bitmap));
}
limit = free_list + szone->num_small_slots - 1;
free_list += slot;
if (free_list < limit) {
ptr = *free_list;
if (ptr) {
next = free_list_unchecksum_ptr(szone, &ptr->next);
*free_list = next;
if (next) {
next->previous = ptr->previous;
} else {
BITMAPN_CLR(small_mag_ptr->mag_bitmap, slot);
}
this_msize = SMALL_PTR_SIZE(ptr);
goto add_leftover_and_proceed;
}
#if DEBUG_MALLOC
malloc_printf("in small_malloc_from_free_list(), mag_bitmap out of sync, slot=%d\n",slot);
#endif
}
ptr = *limit;
if (ptr) {
this_msize = SMALL_PTR_SIZE(ptr);
next = free_list_unchecksum_ptr(szone, &ptr->next);
if (this_msize - msize >= szone->num_small_slots) {
leftover_msize = this_msize - msize;
leftover_ptr = (free_list_t *)((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize));
*limit = leftover_ptr;
if (next) {
next->previous.u = free_list_checksum_ptr(szone, leftover_ptr);
}
leftover_ptr->previous = ptr->previous;
leftover_ptr->next = ptr->next;
small_meta_header_set_is_free(SMALL_META_HEADER_FOR_PTR(leftover_ptr),
SMALL_META_INDEX_FOR_PTR(leftover_ptr), leftover_msize);
SMALL_PREVIOUS_MSIZE(FOLLOWING_SMALL_PTR(leftover_ptr, leftover_msize)) = leftover_msize; #if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in small_malloc_from_free_list(), last slot ptr=%p, msize=%d this_msize=%d\n", ptr, msize, this_msize);
}
#endif
this_msize = msize;
goto return_small_alloc;
}
if (next) {
next->previous = ptr->previous;
}
*limit = next;
goto add_leftover_and_proceed;
}
try_small_from_end:
if (small_mag_ptr->mag_bytes_free_at_end >= SMALL_BYTES_FOR_MSIZE(msize)) {
ptr = (free_list_t *)(SMALL_REGION_END(small_mag_ptr->mag_last_region) -
small_mag_ptr->mag_bytes_free_at_end);
small_mag_ptr->mag_bytes_free_at_end -= SMALL_BYTES_FOR_MSIZE(msize);
if (small_mag_ptr->mag_bytes_free_at_end) {
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr),
SMALL_META_INDEX_FOR_PTR((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize)),
SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_end));
}
this_msize = msize;
goto return_small_alloc;
}
#if ASLR_INTERNAL
if (small_mag_ptr->mag_bytes_free_at_start >= SMALL_BYTES_FOR_MSIZE(msize)) {
ptr = (free_list_t *)(SMALL_REGION_ADDRESS(small_mag_ptr->mag_last_region) +
small_mag_ptr->mag_bytes_free_at_start - SMALL_BYTES_FOR_MSIZE(msize));
small_mag_ptr->mag_bytes_free_at_start -= SMALL_BYTES_FOR_MSIZE(msize);
if (small_mag_ptr->mag_bytes_free_at_start) {
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), 0, SMALL_MSIZE_FOR_BYTES(small_mag_ptr->mag_bytes_free_at_start));
}
this_msize = msize;
goto return_small_alloc;
}
#endif
return NULL;
add_leftover_and_proceed:
if (this_msize > msize) {
leftover_msize = this_msize - msize;
leftover_ptr = (free_list_t *)((unsigned char *)ptr + SMALL_BYTES_FOR_MSIZE(msize));
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in small_malloc_from_free_list(), adding leftover ptr=%p, this_msize=%d\n", ptr, this_msize);
}
#endif
small_free_list_add_ptr(szone, small_mag_ptr, leftover_ptr, leftover_msize);
this_msize = msize;
}
return_small_alloc:
small_mag_ptr->mag_num_objects++;
small_mag_ptr->mag_num_bytes_in_objects += SMALL_BYTES_FOR_MSIZE(this_msize);
region_trailer_t *node = REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
size_t bytes_used = node->bytes_used + SMALL_BYTES_FOR_MSIZE(this_msize);
node->bytes_used = bytes_used;
if (bytes_used < DENSITY_THRESHOLD(SMALL_REGION_PAYLOAD_BYTES)) {
} else {
node->recirc_suitable = FALSE;
}
#if DEBUG_MALLOC
if (LOG(szone,ptr)) {
malloc_printf("in small_malloc_from_free_list(), ptr=%p, this_msize=%d, msize=%d\n", ptr, this_msize, msize);
}
#endif
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(ptr), SMALL_META_INDEX_FOR_PTR(ptr), this_msize);
return ptr;
}
#undef DENSITY_THRESHOLD
#undef K
static INLINE void *
small_malloc_should_clear(szone_t *szone, msize_t msize, boolean_t cleared_requested)
{
void *ptr;
mag_index_t mag_index = mag_get_thread_index(szone);
magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr);
#if SMALL_CACHE
ptr = (void *)small_mag_ptr->mag_last_free;
if ((((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) == msize) {
small_mag_ptr->mag_last_free = NULL;
small_mag_ptr->mag_last_free_rgn = NULL;
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
ptr = (void *)((uintptr_t)ptr & ~ (SMALL_QUANTUM - 1));
if (cleared_requested) {
memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize));
}
return ptr;
}
#endif
while(1) {
ptr = small_malloc_from_free_list(szone, small_mag_ptr, mag_index, msize);
if (ptr) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
if (cleared_requested) {
memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize));
}
return ptr;
}
if (small_get_region_from_depot(szone, small_mag_ptr, mag_index, msize)) {
ptr = small_malloc_from_free_list(szone, small_mag_ptr, mag_index, msize);
if (ptr) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
if (cleared_requested) {
memset(ptr, 0, SMALL_BYTES_FOR_MSIZE(msize));
}
return ptr;
}
}
if (!small_mag_ptr->alloc_underway) {
void *fresh_region;
small_mag_ptr->alloc_underway = TRUE;
OSMemoryBarrier();
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
fresh_region = allocate_pages_securely(szone, SMALL_REGION_SIZE, SMALL_BLOCKS_ALIGN, VM_MEMORY_MALLOC_SMALL);
SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr);
MAGMALLOC_ALLOCREGION((void *)szone, (int)mag_index, fresh_region, SMALL_REGION_SIZE);
if (!fresh_region) { small_mag_ptr->alloc_underway = FALSE;
OSMemoryBarrier();
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
return NULL;
}
ptr = small_malloc_from_region_no_lock(szone, small_mag_ptr, mag_index, msize, fresh_region);
small_mag_ptr->alloc_underway = FALSE;
OSMemoryBarrier();
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
return ptr;
} else {
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
pthread_yield_np();
SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr);
}
}
}
static NOINLINE void
free_small_botch(szone_t *szone, free_list_t *ptr)
{
mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
szone_error(szone, 1, "double free", ptr, NULL);
}
static INLINE void
free_small(szone_t *szone, void *ptr, region_t small_region, size_t known_size)
{
msize_t msize;
mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
if (known_size) {
msize = SMALL_MSIZE_FOR_BYTES(known_size + SMALL_QUANTUM - 1);
} else {
msize = SMALL_PTR_SIZE(ptr);
if (SMALL_PTR_IS_FREE(ptr)) {
free_small_botch(szone, ptr);
return;
}
}
SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr);
#if SMALL_CACHE
if (DEPOT_MAGAZINE_INDEX != mag_index) {
void *ptr2 = small_mag_ptr->mag_last_free; region_t rgn2 = small_mag_ptr->mag_last_free_rgn;
if (ptr == (void *)((uintptr_t)ptr2 & ~ (SMALL_QUANTUM - 1))) {
free_small_botch(szone, ptr);
return;
}
if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && msize)
memset(ptr, 0x55, SMALL_BYTES_FOR_MSIZE(msize));
small_mag_ptr->mag_last_free = (void *)(((uintptr_t)ptr) | msize);
small_mag_ptr->mag_last_free_rgn = small_region;
if (!ptr2) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
return;
}
msize = (uintptr_t)ptr2 & (SMALL_QUANTUM - 1);
ptr = (void *)(((uintptr_t)ptr2) & ~(SMALL_QUANTUM - 1));
small_region = rgn2;
}
#endif
region_trailer_t *trailer = REGION_TRAILER_FOR_SMALL_REGION(small_region);
mag_index_t refreshed_index;
while (mag_index != (refreshed_index = trailer->mag_index)) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
mag_index = refreshed_index;
small_mag_ptr = &(szone->small_magazines[mag_index]);
SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr);
}
if (small_free_no_lock(szone, small_mag_ptr, mag_index, small_region, ptr, msize))
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
CHECK(szone, __PRETTY_FUNCTION__);
}
static void
print_small_free_list(szone_t *szone)
{
free_list_t *ptr;
_SIMPLE_STRING b = _simple_salloc();
mag_index_t mag_index;
if (b) {
_simple_sappend(b, "small free sizes:\n");
for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
grain_t slot = 0;
_simple_sprintf(b,"\tMagazine %d: ", mag_index);
while (slot < szone->num_small_slots) {
ptr = szone->small_magazines[mag_index].mag_free_list[slot];
if (ptr) {
_simple_sprintf(b, "%s%y[%d]; ", (slot == szone->num_small_slots-1) ? ">=" : "",
(slot + 1) * SMALL_QUANTUM, free_list_count(szone, ptr));
}
slot++;
}
_simple_sappend(b,"\n");
}
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
_simple_sfree(b);
}
}
static void
print_small_region(szone_t *szone, boolean_t verbose, region_t region, size_t bytes_at_start, size_t bytes_at_end)
{
unsigned counts[1024];
unsigned in_use = 0;
uintptr_t start = (uintptr_t)SMALL_REGION_ADDRESS(region);
uintptr_t current = start + bytes_at_start;
uintptr_t limit = (uintptr_t)SMALL_REGION_END(region) - bytes_at_end;
msize_t msize_and_free;
msize_t msize;
unsigned ci;
_SIMPLE_STRING b;
uintptr_t pgTot = 0;
if (region == HASHRING_REGION_DEALLOCATED) {
if ((b = _simple_salloc()) != NULL) {
_simple_sprintf(b, "Small region [unknown address] was returned to the OS\n");
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
_simple_sfree(b);
}
return;
}
memset(counts, 0, sizeof(counts));
while (current < limit) {
msize_and_free = *SMALL_METADATA_FOR_PTR(current);
msize = msize_and_free & ~ SMALL_IS_FREE;
if (!msize) {
malloc_printf("*** error with %p: msize=%d\n", (void *)current, (unsigned)msize);
break;
}
if (!(msize_and_free & SMALL_IS_FREE)) {
if (msize < 1024)
counts[msize]++;
in_use++;
} else {
uintptr_t pgLo = round_page(current + sizeof(free_list_t) + sizeof(msize_t));
uintptr_t pgHi = trunc_page(current + SMALL_BYTES_FOR_MSIZE(msize) - sizeof(msize_t));
if (pgLo < pgHi) {
pgTot += (pgHi - pgLo);
}
}
current += SMALL_BYTES_FOR_MSIZE(msize);
}
if ((b = _simple_salloc()) != NULL) {
_simple_sprintf(b, "Small region [%p-%p, %y] \t", (void *)start, SMALL_REGION_END(region), (int)SMALL_REGION_SIZE);
_simple_sprintf(b, "Magazine=%d \t", MAGAZINE_INDEX_FOR_SMALL_REGION(region));
_simple_sprintf(b, "Allocations in use=%d \t Bytes in use=%ly \t", in_use, BYTES_USED_FOR_SMALL_REGION(region));
if (bytes_at_end || bytes_at_start)
_simple_sprintf(b, "Untouched=%ly ", bytes_at_end + bytes_at_start);
if (DEPOT_MAGAZINE_INDEX == MAGAZINE_INDEX_FOR_SMALL_REGION(region)) {
_simple_sprintf(b, "Advised MADV_FREE=%ly", pgTot);
} else {
_simple_sprintf(b, "Fragments subject to reclamation=%ly", pgTot);
}
if (verbose && in_use) {
_simple_sappend(b, "\n\tSizes in use: ");
for (ci = 0; ci < 1024; ci++)
if (counts[ci])
_simple_sprintf(b, "%d[%d] ", SMALL_BYTES_FOR_MSIZE(ci), counts[ci]);
}
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
_simple_sfree(b);
}
}
static boolean_t
small_free_list_check(szone_t *szone, grain_t slot)
{
mag_index_t mag_index;
for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
SZONE_MAGAZINE_PTR_LOCK(szone, small_mag_ptr);
unsigned count = 0;
free_list_t *ptr = szone->small_magazines[mag_index].mag_free_list[slot];
msize_t msize_and_free;
free_list_t *previous = NULL;
while (ptr) {
msize_and_free = *SMALL_METADATA_FOR_PTR(ptr);
if (!(msize_and_free & SMALL_IS_FREE)) {
malloc_printf("*** in-use ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
return 0;
}
if (((uintptr_t)ptr) & (SMALL_QUANTUM - 1)) {
malloc_printf("*** unaligned ptr in free list slot=%d count=%d ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
return 0;
}
if (!small_region_for_ptr_no_lock(szone, ptr)) {
malloc_printf("*** ptr not in szone slot=%d count=%d ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
return 0;
}
if (free_list_unchecksum_ptr(szone, &ptr->previous) != previous) {
malloc_printf("*** previous incorrectly set slot=%d count=%d ptr=%p\n", slot, count, ptr);
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
return 0;
}
previous = ptr;
ptr = free_list_unchecksum_ptr(szone, &ptr->next);
count++;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
}
return 1;
}
#pragma mark large allocator
#if DEBUG_MALLOC
static void
large_debug_print(szone_t *szone)
{
unsigned index;
large_entry_t *range;
_SIMPLE_STRING b = _simple_salloc();
if (b) {
for (index = 0, range = szone->large_entries; index < szone->num_large_entries; index++, range++)
if (range->address)
_simple_sprintf(b, "%d: %p(%y); ", index, range->address, range->size);
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX, "%s\n", _simple_string(b));
_simple_sfree(b);
}
}
#endif
static large_entry_t *
large_entry_for_pointer_no_lock(szone_t *szone, const void *ptr)
{
unsigned num_large_entries = szone->num_large_entries;
unsigned hash_index;
unsigned index;
large_entry_t *range;
if (!num_large_entries)
return NULL;
hash_index = ((uintptr_t)ptr >> vm_page_shift) % num_large_entries;
index = hash_index;
do {
range = szone->large_entries + index;
if (range->address == (vm_address_t)ptr)
return range;
if (0 == range->address)
return NULL; index++;
if (index == num_large_entries)
index = 0;
} while (index != hash_index);
return NULL;
}
static void
large_entry_insert_no_lock(szone_t *szone, large_entry_t range)
{
unsigned num_large_entries = szone->num_large_entries;
unsigned hash_index = (((uintptr_t)(range.address)) >> vm_page_shift) % num_large_entries;
unsigned index = hash_index;
large_entry_t *entry;
do {
entry = szone->large_entries + index;
if (0 == entry->address) {
*entry = range;
return; }
index++;
if (index == num_large_entries)
index = 0;
} while (index != hash_index);
}
static INLINE void
large_entries_rehash_after_entry_no_lock(szone_t *szone, large_entry_t *entry)
{
unsigned num_large_entries = szone->num_large_entries;
unsigned hash_index = entry - szone->large_entries;
unsigned index = hash_index;
large_entry_t range;
do {
index++;
if (index == num_large_entries)
index = 0;
range = szone->large_entries[index];
if (0 == range.address)
return;
szone->large_entries[index].address = (vm_address_t)0;
szone->large_entries[index].size = 0;
szone->large_entries[index].did_madvise_reusable = FALSE;
large_entry_insert_no_lock(szone, range); } while (index != hash_index);
}
static INLINE large_entry_t *
large_entries_alloc_no_lock(szone_t *szone, unsigned num)
{
size_t size = num * sizeof(large_entry_t);
return allocate_pages(szone, round_page(size), 0, 0, VM_MEMORY_MALLOC_LARGE);
}
static void
large_entries_free_no_lock(szone_t *szone, large_entry_t *entries, unsigned num, vm_range_t *range_to_deallocate)
{
size_t size = num * sizeof(large_entry_t);
range_to_deallocate->address = (vm_address_t)entries;
range_to_deallocate->size = round_page(size);
}
static large_entry_t *
large_entries_grow_no_lock(szone_t *szone, vm_range_t *range_to_deallocate)
{
unsigned old_num_entries = szone->num_large_entries;
large_entry_t *old_entries = szone->large_entries;
unsigned new_num_entries = (old_num_entries) ? old_num_entries * 2 + 1 :
((vm_page_size / sizeof(large_entry_t)) - 1);
large_entry_t *new_entries = large_entries_alloc_no_lock(szone, new_num_entries);
unsigned index = old_num_entries;
large_entry_t oldRange;
if (new_entries == NULL)
return NULL;
szone->num_large_entries = new_num_entries;
szone->large_entries = new_entries;
while (index--) {
oldRange = old_entries[index];
if (oldRange.address) {
large_entry_insert_no_lock(szone, oldRange);
}
}
if (old_entries) {
large_entries_free_no_lock(szone, old_entries, old_num_entries, range_to_deallocate);
} else {
range_to_deallocate->address = (vm_address_t)0;
range_to_deallocate->size = 0;
}
return new_entries;
}
static vm_range_t
large_entry_free_no_lock(szone_t *szone, large_entry_t *entry)
{
vm_range_t range;
range.address = entry->address;
range.size = entry->size;
if (szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) {
protect((void *)range.address, range.size, PROT_READ | PROT_WRITE, szone->debug_flags);
range.address -= vm_page_size;
range.size += 2 * vm_page_size;
}
entry->address = 0;
entry->size = 0;
entry->did_madvise_reusable = FALSE;
large_entries_rehash_after_entry_no_lock(szone, entry);
#if DEBUG_MALLOC
if (large_entry_for_pointer_no_lock(szone, (void *)range.address)) {
malloc_printf("*** freed entry %p still in use; num_large_entries=%d\n",
range.address, szone->num_large_entries);
large_debug_print(szone);
szone_sleep();
}
#endif
return range;
}
static NOINLINE kern_return_t
large_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t large_entries_address,
unsigned num_entries, memory_reader_t reader, vm_range_recorder_t recorder)
{
unsigned index = 0;
vm_range_t buffer[MAX_RECORDER_BUFFER];
unsigned count = 0;
large_entry_t *entries;
kern_return_t err;
vm_range_t range;
large_entry_t entry;
err = reader(task, large_entries_address, sizeof(large_entry_t) * num_entries, (void **)&entries);
if (err)
return err;
index = num_entries;
if (type_mask & MALLOC_ADMIN_REGION_RANGE_TYPE) {
range.address = large_entries_address;
range.size = round_page(num_entries * sizeof(large_entry_t));
recorder(task, context, MALLOC_ADMIN_REGION_RANGE_TYPE, &range, 1);
}
if (type_mask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE)) {
while (index--) {
entry = entries[index];
if (entry.address) {
range.address = entry.address;
range.size = entry.size;
buffer[count++] = range;
if (count >= MAX_RECORDER_BUFFER) {
recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE,
buffer, count);
count = 0;
}
}
}
}
if (count) {
recorder(task, context, MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE,
buffer, count);
}
return 0;
}
static void *
large_malloc(szone_t *szone, size_t num_pages, unsigned char alignment,
boolean_t cleared_requested)
{
void *addr;
vm_range_t range_to_deallocate;
size_t size;
large_entry_t large_entry;
if (!num_pages)
num_pages = 1; size = (size_t)num_pages << vm_page_shift;
range_to_deallocate.size = 0;
range_to_deallocate.address = 0;
#if LARGE_CACHE
if (size < LARGE_CACHE_SIZE_ENTRY_LIMIT) { SZONE_LOCK(szone);
int i, best = -1, idx = szone->large_entry_cache_newest, stop_idx = szone->large_entry_cache_oldest;
size_t best_size = SIZE_T_MAX;
while (1) { size_t this_size = szone->large_entry_cache[idx].size;
addr = (void *)szone->large_entry_cache[idx].address;
if (0 == alignment || 0 == (((uintptr_t) addr) & (((uintptr_t) 1 << alignment) - 1))) {
if (size == this_size) { best = idx;
best_size = this_size;
break;
}
if (size <= this_size && this_size < best_size) { best = idx;
best_size = this_size;
}
}
if (idx == stop_idx) break;
if (idx)
idx--; else
idx = LARGE_ENTRY_CACHE_SIZE - 1; }
if (best > -1 && (best_size - size) < size) { addr = (void *)szone->large_entry_cache[best].address;
boolean_t was_madvised_reusable = szone->large_entry_cache[best].did_madvise_reusable;
if (szone->large_entry_cache_oldest < szone->large_entry_cache_newest) {
for (i = best; i < szone->large_entry_cache_newest; ++i)
szone->large_entry_cache[i] = szone->large_entry_cache[i + 1];
szone->large_entry_cache_newest--;
} else if (szone->large_entry_cache_newest < szone->large_entry_cache_oldest) {
if (best <= szone->large_entry_cache_newest) {
for (i = best; i < szone->large_entry_cache_newest; ++i)
szone->large_entry_cache[i] = szone->large_entry_cache[i + 1];
if (0 < szone->large_entry_cache_newest)
szone->large_entry_cache_newest--;
else
szone->large_entry_cache_newest = LARGE_ENTRY_CACHE_SIZE - 1;
} else {
for ( i = best; i > szone->large_entry_cache_oldest; --i)
szone->large_entry_cache[i] = szone->large_entry_cache[i - 1];
if (szone->large_entry_cache_oldest < LARGE_ENTRY_CACHE_SIZE - 1)
szone->large_entry_cache_oldest++;
else
szone->large_entry_cache_oldest = 0;
}
} else {
szone->large_entry_cache[best].address = 0;
szone->large_entry_cache[best].size = 0;
szone->large_entry_cache[best].did_madvise_reusable = FALSE;
}
if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) {
large_entry_t *entries = large_entries_grow_no_lock(szone, &range_to_deallocate);
if (entries == NULL) {
SZONE_UNLOCK(szone);
return NULL;
}
}
large_entry.address = (vm_address_t)addr;
large_entry.size = best_size;
large_entry.did_madvise_reusable = FALSE;
large_entry_insert_no_lock(szone, large_entry);
szone->num_large_objects_in_use ++;
szone->num_bytes_in_large_objects += best_size;
if (!was_madvised_reusable)
szone->large_entry_cache_reserve_bytes -= best_size;
szone->large_entry_cache_bytes -= best_size;
if (szone->flotsam_enabled && szone->large_entry_cache_bytes < SZONE_FLOTSAM_THRESHOLD_LOW) {
szone->flotsam_enabled = FALSE;
}
SZONE_UNLOCK(szone);
if (range_to_deallocate.size) {
deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0);
}
#if TARGET_OS_EMBEDDED
#endif
if (was_madvised_reusable && -1 == madvise(addr, size, MADV_FREE_REUSE)) {
#if DEBUG_MADVISE
szone_error(szone, 0, "large_malloc madvise(..., MADV_FREE_REUSE) failed",
addr, "length=%d\n", size);
#endif
SZONE_LOCK(szone);
szone->num_large_objects_in_use--;
szone->num_bytes_in_large_objects -= large_entry.size;
large_entry_t *entry = large_entry_for_pointer_no_lock(szone, addr);
if (NULL == entry) {
szone_error(szone, 1, "entry for pointer being discarded from death-row vanished", addr, NULL);
SZONE_UNLOCK(szone);
} else {
range_to_deallocate = large_entry_free_no_lock(szone, entry);
SZONE_UNLOCK(szone);
if (range_to_deallocate.size) {
deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0);
}
}
} else {
if (cleared_requested) {
memset(addr, 0, size);
}
return addr;
}
} else {
SZONE_UNLOCK(szone);
}
}
range_to_deallocate.size = 0;
range_to_deallocate.address = 0;
#endif
addr = allocate_pages(szone, size, alignment, szone->debug_flags, VM_MEMORY_MALLOC_LARGE);
if (addr == NULL) {
return NULL;
}
SZONE_LOCK(szone);
if ((szone->num_large_objects_in_use + 1) * 4 > szone->num_large_entries) {
large_entry_t *entries = large_entries_grow_no_lock(szone, &range_to_deallocate);
if (entries == NULL) {
SZONE_UNLOCK(szone);
return NULL;
}
}
large_entry.address = (vm_address_t)addr;
large_entry.size = size;
large_entry.did_madvise_reusable = FALSE;
large_entry_insert_no_lock(szone, large_entry);
szone->num_large_objects_in_use ++;
szone->num_bytes_in_large_objects += size;
SZONE_UNLOCK(szone);
if (range_to_deallocate.size) {
deallocate_pages(szone, (void *)range_to_deallocate.address, range_to_deallocate.size, 0);
}
return addr;
}
static NOINLINE void
free_large(szone_t *szone, void *ptr)
{
large_entry_t *entry;
vm_range_t vm_range_to_deallocate;
SZONE_LOCK(szone);
entry = large_entry_for_pointer_no_lock(szone, ptr);
if (entry) {
#if LARGE_CACHE
#ifndef MADV_CAN_REUSE
#define MADV_CAN_REUSE 9
#endif
if (entry->size < LARGE_CACHE_SIZE_ENTRY_LIMIT &&
-1 != madvise((void *)(entry->address), entry->size, MADV_CAN_REUSE)) { int idx = szone->large_entry_cache_newest, stop_idx = szone->large_entry_cache_oldest;
large_entry_t this_entry = *entry; boolean_t reusable = TRUE;
boolean_t should_madvise = szone->large_entry_cache_reserve_bytes + this_entry.size > szone->large_entry_cache_reserve_limit;
while (1) { if (szone->large_entry_cache[idx].address == entry->address) {
szone_error(szone, 1, "pointer being freed already on death-row", ptr, NULL);
SZONE_UNLOCK(szone);
return;
}
if (idx == stop_idx) break;
if (idx)
idx--; else
idx = LARGE_ENTRY_CACHE_SIZE - 1; }
SZONE_UNLOCK(szone);
if (szone->debug_flags & SCALABLE_MALLOC_PURGEABLE) { int state = VM_PURGABLE_NONVOLATILE;
if (KERN_SUCCESS != vm_purgable_control(mach_task_self(), this_entry.address, VM_PURGABLE_SET_STATE, &state)) {
malloc_printf("*** can't vm_purgable_control(..., VM_PURGABLE_SET_STATE) for large freed block at %p\n",
this_entry.address);
reusable = FALSE;
}
}
if (szone->large_legacy_reset_mprotect) { int err = mprotect((void *)(this_entry.address), this_entry.size, PROT_READ | PROT_WRITE);
if (err) {
malloc_printf("*** can't reset protection for large freed block at %p\n", this_entry.address);
reusable = FALSE;
}
}
if (should_madvise) {
MAGMALLOC_MADVFREEREGION((void *)szone, (void *)0, (void *)(this_entry.address), this_entry.size);
#if TARGET_OS_EMBEDDED
#endif
if (-1 == madvise((void *)(this_entry.address), this_entry.size, MADV_FREE_REUSABLE)) {
#if DEBUG_MADVISE
szone_error(szone, 0, "free_large madvise(..., MADV_FREE_REUSABLE) failed",
(void *)this_entry.address, "length=%d\n", this_entry.size);
#endif
reusable = FALSE;
}
}
SZONE_LOCK(szone);
entry = large_entry_for_pointer_no_lock(szone, ptr);
if (NULL == entry) {
szone_error(szone, 1, "entry for pointer being freed from death-row vanished", ptr, NULL);
SZONE_UNLOCK(szone);
return;
}
if (reusable) {
int idx = szone->large_entry_cache_newest; vm_address_t addr;
size_t adjsize;
if (szone->large_entry_cache_newest == szone->large_entry_cache_oldest &&
0 == szone->large_entry_cache[idx].address) {
addr = 0;
adjsize = 0;
} else {
if (idx == LARGE_ENTRY_CACHE_SIZE - 1)
idx = 0; else
idx++;
if (idx == szone->large_entry_cache_oldest) { addr = szone->large_entry_cache[idx].address;
adjsize = szone->large_entry_cache[idx].size;
szone->large_entry_cache_bytes -= adjsize;
if (!szone->large_entry_cache[idx].did_madvise_reusable)
szone->large_entry_cache_reserve_bytes -= adjsize;
} else {
addr = 0;
adjsize = 0;
}
}
if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE))
memset((void *)(entry->address), 0x55, entry->size);
entry->did_madvise_reusable = should_madvise; if (!should_madvise) szone->large_entry_cache_reserve_bytes += entry->size;
szone->large_entry_cache_bytes += entry->size;
if (!szone->flotsam_enabled && szone->large_entry_cache_bytes > SZONE_FLOTSAM_THRESHOLD_HIGH) {
szone->flotsam_enabled = TRUE;
}
szone->large_entry_cache[idx] = *entry;
szone->large_entry_cache_newest = idx;
szone->num_large_objects_in_use--;
szone->num_bytes_in_large_objects -= entry->size;
(void)large_entry_free_no_lock(szone, entry);
if (0 == addr) {
SZONE_UNLOCK(szone);
return;
}
if (szone->large_entry_cache_oldest == LARGE_ENTRY_CACHE_SIZE - 1)
szone->large_entry_cache_oldest = 0;
else
szone->large_entry_cache_oldest++;
SZONE_UNLOCK(szone);
deallocate_pages(szone, (void *)addr, (size_t)adjsize, 0);
return;
} else {
}
}
#endif
szone->num_large_objects_in_use--;
szone->num_bytes_in_large_objects -= entry->size;
vm_range_to_deallocate = large_entry_free_no_lock(szone, entry);
} else {
#if DEBUG_MALLOC
large_debug_print(szone);
#endif
szone_error(szone, 1, "pointer being freed was not allocated", ptr, NULL);
SZONE_UNLOCK(szone);
return;
}
SZONE_UNLOCK(szone); CHECK(szone, __PRETTY_FUNCTION__);
if (vm_range_to_deallocate.address) {
#if DEBUG_MALLOC
if (large_entry_for_pointer_no_lock(szone, (void *)vm_range_to_deallocate.address)) {
malloc_printf("*** invariant broken: %p still in use num_large_entries=%d\n",
vm_range_to_deallocate.address, szone->num_large_entries);
large_debug_print(szone);
szone_sleep();
}
#endif
deallocate_pages(szone, (void *)vm_range_to_deallocate.address, (size_t)vm_range_to_deallocate.size, 0);
}
}
static INLINE void *
large_try_shrink_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_good_size)
{
size_t shrinkage = old_size - new_good_size;
if (shrinkage) {
SZONE_LOCK(szone);
large_entry_t *large_entry = large_entry_for_pointer_no_lock(szone, ptr);
if (!large_entry) {
szone_error(szone, 1, "large entry reallocated is not properly in table", ptr, NULL);
SZONE_UNLOCK(szone);
return ptr;
}
large_entry->address = (vm_address_t)ptr;
large_entry->size = new_good_size;
szone->num_bytes_in_large_objects -= shrinkage;
SZONE_UNLOCK(szone);
deallocate_pages(szone, (void *)((uintptr_t)ptr + new_good_size), shrinkage, 0);
}
return ptr;
}
static INLINE int
large_try_realloc_in_place(szone_t *szone, void *ptr, size_t old_size, size_t new_size)
{
vm_address_t addr = (vm_address_t)ptr + old_size;
large_entry_t *large_entry;
kern_return_t err;
SZONE_LOCK(szone);
large_entry = large_entry_for_pointer_no_lock(szone, (void *)addr);
SZONE_UNLOCK(szone);
if (large_entry) { return 0; }
new_size = round_page(new_size);
err = vm_allocate(mach_task_self(), &addr, new_size - old_size, VM_MAKE_TAG(VM_MEMORY_REALLOC));
if (err != KERN_SUCCESS) {
return 0;
}
SZONE_LOCK(szone);
large_entry = large_entry_for_pointer_no_lock(szone, ptr);
if (!large_entry) {
szone_error(szone, 1, "large entry reallocated is not properly in table", ptr, NULL);
SZONE_UNLOCK(szone);
return 0; }
large_entry->address = (vm_address_t)ptr;
large_entry->size = new_size;
szone->num_bytes_in_large_objects += new_size - old_size;
SZONE_UNLOCK(szone);
return 1;
}
static NOINLINE void
szone_free(szone_t *szone, void *ptr)
{
region_t tiny_region;
region_t small_region;
#if DEBUG_MALLOC
if (LOG(szone, ptr))
malloc_printf("in szone_free with %p\n", ptr);
#endif
if (!ptr)
return;
if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) {
szone_error(szone, 1, "Non-aligned pointer being freed", ptr, NULL);
return;
}
if ((tiny_region = tiny_region_for_ptr_no_lock(szone, ptr)) != NULL) {
if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) {
szone_error(szone, 1, "Pointer to metadata being freed", ptr, NULL);
return;
}
free_tiny(szone, ptr, tiny_region, 0);
return;
}
if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) {
szone_error(szone, 1, "Non-aligned pointer being freed (2)", ptr, NULL);
return;
}
if ((small_region = small_region_for_ptr_no_lock(szone, ptr)) != NULL) {
if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) {
szone_error(szone, 1, "Pointer to metadata being freed (2)", ptr, NULL);
return;
}
free_small(szone, ptr, small_region, 0);
return;
}
if ((uintptr_t)ptr & (vm_page_size - 1)) {
szone_error(szone, 1, "non-page-aligned, non-allocated pointer being freed", ptr, NULL);
return;
}
free_large(szone, ptr);
}
static NOINLINE void
szone_free_definite_size(szone_t *szone, void *ptr, size_t size)
{
#if DEBUG_MALLOC
if (LOG(szone, ptr))
malloc_printf("in szone_free_definite_size with %p\n", ptr);
if (0 == size) {
szone_error(szone, 1, "pointer of size zero being freed", ptr, NULL);
return;
}
#endif
if (!ptr)
return;
if ((uintptr_t)ptr & (TINY_QUANTUM - 1)) {
szone_error(szone, 1, "Non-aligned pointer being freed", ptr, NULL);
return;
}
if (size <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) {
if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS) {
szone_error(szone, 1, "Pointer to metadata being freed", ptr, NULL);
return;
}
free_tiny(szone, ptr, TINY_REGION_FOR_PTR(ptr), size);
return;
}
if ((uintptr_t)ptr & (SMALL_QUANTUM - 1)) {
szone_error(szone, 1, "Non-aligned pointer being freed (2)", ptr, NULL);
return;
}
if (size <= szone->large_threshold) {
if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS) {
szone_error(szone, 1, "Pointer to metadata being freed (2)", ptr, NULL);
return;
}
free_small(szone, ptr, SMALL_REGION_FOR_PTR(ptr), size);
return;
}
if ((uintptr_t)ptr & (vm_page_size - 1)) {
szone_error(szone, 1, "non-page-aligned, non-allocated pointer being freed", ptr, NULL);
return;
}
free_large(szone, ptr);
}
static NOINLINE void *
szone_malloc_should_clear(szone_t *szone, size_t size, boolean_t cleared_requested)
{
void *ptr;
msize_t msize;
if (size <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) {
msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
if (!msize)
msize = 1;
ptr = tiny_malloc_should_clear(szone, msize, cleared_requested);
} else if (size <= szone->large_threshold) {
msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
if (!msize)
msize = 1;
ptr = small_malloc_should_clear(szone, msize, cleared_requested);
} else {
size_t num_pages = round_page(size) >> vm_page_shift;
if (num_pages == 0)
ptr = 0;
else
ptr = large_malloc(szone, num_pages, 0, cleared_requested);
}
#if DEBUG_MALLOC
if (LOG(szone, ptr))
malloc_printf("szone_malloc returned %p\n", ptr);
#endif
if ((szone->debug_flags & SCALABLE_MALLOC_DO_SCRIBBLE) && ptr && !cleared_requested && size)
memset(ptr, 0xaa, size);
return ptr;
}
static NOINLINE void *
szone_malloc(szone_t *szone, size_t size) {
return szone_malloc_should_clear(szone, size, 0);
}
static NOINLINE void *
szone_calloc(szone_t *szone, size_t num_items, size_t size)
{
size_t total_bytes = num_items * size;
if (num_items > 1) {
#if __LP64__
if ((num_items | size) & 0xffffffff00000000ul) {
__uint128_t product = ((__uint128_t)num_items) * ((__uint128_t)size);
if ((uint64_t)(product >> 64)) return NULL;
}
#else
if ((num_items | size) & 0xffff0000ul) {
uint64_t product = ((uint64_t)num_items) * ((uint64_t)size);
if ((uint32_t)(product >> 32)) return NULL;
}
#endif
}
return szone_malloc_should_clear(szone, total_bytes, 1);
}
static NOINLINE void *
szone_valloc(szone_t *szone, size_t size)
{
void *ptr;
if (size <= szone->large_threshold) {
ptr = szone_memalign(szone, vm_page_size, size);
} else {
size_t num_pages;
num_pages = round_page(size) >> vm_page_shift;
ptr = large_malloc(szone, num_pages, 0, 0);
}
#if DEBUG_MALLOC
if (LOG(szone, ptr))
malloc_printf("szone_valloc returned %p\n", ptr);
#endif
return ptr;
}
static NOINLINE size_t
szone_size_try_large(szone_t *szone, const void *ptr)
{
size_t size = 0;
large_entry_t *entry;
SZONE_LOCK(szone);
entry = large_entry_for_pointer_no_lock(szone, ptr);
if (entry) {
size = entry->size;
}
SZONE_UNLOCK(szone);
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("szone_size for %p returned %d\n", ptr, (unsigned)size);
}
#endif
return size;
}
static NOINLINE size_t
szone_size(szone_t *szone, const void *ptr)
{
boolean_t is_free;
msize_t msize, msize_and_free;
if (!ptr)
return 0;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("in szone_size for %p (szone=%p)\n", ptr, szone);
}
#endif
if ((uintptr_t)ptr & (TINY_QUANTUM - 1))
return 0;
if (tiny_region_for_ptr_no_lock(szone, ptr)) {
if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS)
return 0;
msize = get_tiny_meta_header(ptr, &is_free);
if (is_free)
return 0;
#if TINY_CACHE
{
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(ptr));
if (DEPOT_MAGAZINE_INDEX != mag_index) {
magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
if (msize < TINY_QUANTUM && ptr == (void *)((uintptr_t)(tiny_mag_ptr->mag_last_free) & ~ (TINY_QUANTUM - 1)))
return 0;
} else {
for (mag_index = 0; mag_index < szone->num_tiny_magazines; mag_index++) {
magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
if (msize < TINY_QUANTUM && ptr == (void *)((uintptr_t)(tiny_mag_ptr->mag_last_free) & ~ (TINY_QUANTUM - 1)))
return 0;
}
}
}
#endif
return TINY_BYTES_FOR_MSIZE(msize);
}
if ((uintptr_t)ptr & (SMALL_QUANTUM - 1))
return 0;
if (small_region_for_ptr_no_lock(szone, ptr)) {
if (SMALL_META_INDEX_FOR_PTR(ptr) >= NUM_SMALL_BLOCKS)
return 0;
msize_and_free = *SMALL_METADATA_FOR_PTR(ptr);
if (msize_and_free & SMALL_IS_FREE)
return 0;
#if SMALL_CACHE
{
mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(ptr));
if (DEPOT_MAGAZINE_INDEX != mag_index) {
magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
if (ptr == (void *)((uintptr_t)(small_mag_ptr->mag_last_free) & ~ (SMALL_QUANTUM - 1)))
return 0;
} else {
for (mag_index = 0; mag_index < szone->num_small_magazines; mag_index++) {
magazine_t *small_mag_ptr = &(szone->small_magazines[mag_index]);
if (ptr == (void *)((uintptr_t)(small_mag_ptr->mag_last_free) & ~ (SMALL_QUANTUM - 1)))
return 0;
}
}
}
#endif
return SMALL_BYTES_FOR_MSIZE(msize_and_free);
}
if ((uintptr_t)ptr & (vm_page_size - 1))
return 0;
return szone_size_try_large(szone, ptr);
}
static NOINLINE void *
szone_realloc(szone_t *szone, void *ptr, size_t new_size)
{
size_t old_size, new_good_size, valid_size;
void *new_ptr;
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("in szone_realloc for %p, %d\n", ptr, (unsigned)new_size);
}
#endif
if (NULL == ptr) {
return szone_malloc(szone, new_size);
} else if (0 == new_size) {
szone_free(szone, ptr);
return szone_malloc(szone, 1);
}
old_size = szone_size(szone, ptr);
if (!old_size) {
szone_error(szone, 1, "pointer being reallocated was not allocated", ptr, NULL);
return NULL;
}
new_good_size = szone_good_size(szone, new_size);
if (new_good_size == old_size) { return ptr;
}
if (new_good_size <= (NUM_TINY_SLOTS - 1) * TINY_QUANTUM) {
if (old_size <= (NUM_TINY_SLOTS - 1) * TINY_QUANTUM) {
if (new_good_size <= (old_size >> 1)) {
return tiny_try_shrink_in_place(szone, ptr, old_size, new_good_size);
} else if (new_good_size <= old_size) {
return ptr;
} else if (tiny_try_realloc_in_place(szone, ptr, old_size, new_good_size)) { return ptr;
}
}
} else if (new_good_size <= szone->large_threshold) {
if ((NUM_TINY_SLOTS - 1) * TINY_QUANTUM < old_size && old_size <= szone->large_threshold) {
if (new_good_size <= (old_size >> 1)) {
return small_try_shrink_in_place(szone, ptr, old_size, new_good_size);
} else if (new_good_size <= old_size) {
return ptr;
} else if (small_try_realloc_in_place(szone, ptr, old_size, new_good_size)) {
return ptr;
}
}
} else if (!(szone->debug_flags & SCALABLE_MALLOC_PURGEABLE) && (old_size > szone->large_threshold) &&
(new_good_size > szone->large_threshold)) {
if (new_good_size <= (old_size >> 1)) {
return large_try_shrink_in_place(szone, ptr, old_size, new_good_size);
} else if (new_good_size <= old_size) {
return ptr;
} else if (large_try_realloc_in_place(szone, ptr, old_size, new_good_size)) {
return ptr;
}
}
if (new_good_size <= (old_size >> 1)) {
} else if (new_good_size <= old_size) {
return ptr;
}
new_ptr = szone_malloc(szone, new_size);
if (new_ptr == NULL)
return NULL;
valid_size = MIN(old_size, new_size);
if ((valid_size < szone->vm_copy_threshold) ||
vm_copy(mach_task_self(), (vm_address_t)ptr, valid_size, (vm_address_t)new_ptr))
memcpy(new_ptr, ptr, valid_size);
szone_free(szone, ptr);
#if DEBUG_MALLOC
if (LOG(szone, ptr)) {
malloc_printf("szone_realloc returned %p for %d\n", new_ptr, (unsigned)new_size);
}
#endif
return new_ptr;
}
static NOINLINE void *
szone_memalign(szone_t *szone, size_t alignment, size_t size)
{
if ((size + alignment) < size) return NULL;
size_t span = size + alignment - 1;
if (alignment <= TINY_QUANTUM) {
return szone_malloc(szone, size);
} else if (span <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) {
msize_t mspan = TINY_MSIZE_FOR_BYTES(span + TINY_QUANTUM - 1);
void *p = szone_malloc(szone, span);
if (NULL == p)
return NULL;
size_t offset = ((uintptr_t) p) & (alignment - 1); size_t pad = (0 == offset) ? 0 : alignment - offset;
msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
msize_t mpad = TINY_MSIZE_FOR_BYTES(pad + TINY_QUANTUM - 1);
msize_t mwaste = mspan - msize - mpad;
if (mpad > 0) {
void *q = (void *)(((uintptr_t) p) + pad);
magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)),
MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)));
set_tiny_meta_header_in_use(q, msize);
tiny_mag_ptr->mag_num_objects++;
if (mwaste > 0)
BITARRAY_SET(TINY_INUSE_FOR_HEADER(TINY_BLOCK_HEADER_FOR_PTR(q)), TINY_INDEX_FOR_PTR(q) + msize);
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
szone_free(szone, p);
p = q; }
if (mwaste > 0) {
void *q = (void *)(((uintptr_t) p) + TINY_BYTES_FOR_MSIZE(msize));
magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
REGION_TRAILER_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)),
MAGAZINE_INDEX_FOR_TINY_REGION(TINY_REGION_FOR_PTR(p)));
set_tiny_meta_header_in_use(q, mwaste);
tiny_mag_ptr->mag_num_objects++;
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
szone_free(szone, q); }
return p;
} else if ((NUM_TINY_SLOTS - 1)*TINY_QUANTUM < size && alignment <= SMALL_QUANTUM) {
return szone_malloc(szone, size);
} else if (span <= szone->large_threshold) {
if (size <= (NUM_TINY_SLOTS - 1)*TINY_QUANTUM) {
size = (NUM_TINY_SLOTS - 1)*TINY_QUANTUM + TINY_QUANTUM; span = size + alignment - 1;
}
msize_t mspan = SMALL_MSIZE_FOR_BYTES(span + SMALL_QUANTUM - 1);
void *p = szone_malloc(szone, span);
if (NULL == p)
return NULL;
size_t offset = ((uintptr_t) p) & (alignment - 1); size_t pad = (0 == offset) ? 0 : alignment - offset;
msize_t msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
msize_t mpad = SMALL_MSIZE_FOR_BYTES(pad + SMALL_QUANTUM - 1);
msize_t mwaste = mspan - msize - mpad;
if (mpad > 0) {
void *q = (void *)(((uintptr_t) p) + pad);
magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines,
REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)),
MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)));
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(p), SMALL_META_INDEX_FOR_PTR(p), mpad);
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(q), SMALL_META_INDEX_FOR_PTR(q), msize + mwaste);
small_mag_ptr->mag_num_objects++;
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
szone_free(szone, p);
p = q; }
if (mwaste > 0) {
void *q = (void *)(((uintptr_t) p) + SMALL_BYTES_FOR_MSIZE(msize));
magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines,
REGION_TRAILER_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)),
MAGAZINE_INDEX_FOR_SMALL_REGION(SMALL_REGION_FOR_PTR(p)));
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(p), SMALL_META_INDEX_FOR_PTR(p), msize);
small_meta_header_set_in_use(SMALL_META_HEADER_FOR_PTR(q), SMALL_META_INDEX_FOR_PTR(q), mwaste);
small_mag_ptr->mag_num_objects++;
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
szone_free(szone, q); }
return p;
} else if (szone->large_threshold < size && alignment <= vm_page_size) {
return szone_malloc(szone, size);
} else {
size_t num_pages = round_page(MAX(szone->large_threshold + 1, size)) >> vm_page_shift;
void *p;
if (num_pages == 0)
p = NULL;
else
p = large_malloc(szone, num_pages, MAX(vm_page_shift, __builtin_ctz(alignment)), 0);
return p;
}
}
static NOINLINE unsigned
szone_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count)
{
msize_t msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
unsigned found = 0;
mag_index_t mag_index = mag_get_thread_index(szone);
magazine_t *tiny_mag_ptr = &(szone->tiny_magazines[mag_index]);
if (size > (NUM_TINY_SLOTS - 1)*TINY_QUANTUM)
return 0;
if (!msize)
msize = 1;
CHECK(szone, __PRETTY_FUNCTION__);
SZONE_MAGAZINE_PTR_LOCK(szone, tiny_mag_ptr);
while (found < count) {
void *ptr = tiny_malloc_from_free_list(szone, tiny_mag_ptr, mag_index, msize);
if (!ptr)
break;
*results++ = ptr;
found++;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
return found;
}
static NOINLINE void
szone_batch_free(szone_t *szone, void **to_be_freed, unsigned count)
{
unsigned cc = 0;
void *ptr;
region_t tiny_region = NULL;
boolean_t is_free;
msize_t msize;
magazine_t *tiny_mag_ptr = NULL;
mag_index_t mag_index = -1;
if (!count)
return;
CHECK(szone, __PRETTY_FUNCTION__);
while (cc < count) {
ptr = to_be_freed[cc];
if (ptr) {
if (NULL == tiny_region || tiny_region != TINY_REGION_FOR_PTR(ptr)) { if (tiny_mag_ptr) { SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
tiny_mag_ptr = NULL;
}
tiny_region = tiny_region_for_ptr_no_lock(szone, ptr);
if (tiny_region) {
tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
REGION_TRAILER_FOR_TINY_REGION(tiny_region),
MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region));
mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(tiny_region);
}
}
if (tiny_region) {
if (TINY_INDEX_FOR_PTR(ptr) >= NUM_TINY_BLOCKS)
break; msize = get_tiny_meta_header(ptr, &is_free);
if (is_free)
break;
if (!tiny_free_no_lock(szone, tiny_mag_ptr, mag_index, tiny_region, ptr, msize)) {
tiny_mag_ptr = NULL;
tiny_region = NULL;
}
to_be_freed[cc] = NULL;
} else {
break;
}
}
cc++;
}
if (tiny_mag_ptr) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
tiny_mag_ptr = NULL;
}
CHECK(szone, __PRETTY_FUNCTION__);
while (count--) {
ptr = to_be_freed[count];
if (ptr)
szone_free(szone, ptr);
}
}
static void
szone_destroy(szone_t *szone)
{
size_t index;
large_entry_t *large;
vm_range_t range_to_deallocate;
#if LARGE_CACHE
SZONE_LOCK(szone);
szone->flotsam_enabled = FALSE;
int idx = szone->large_entry_cache_oldest, idx_max = szone->large_entry_cache_newest;
large_entry_t local_entry_cache[LARGE_ENTRY_CACHE_SIZE];
memcpy((void *)local_entry_cache, (void *)szone->large_entry_cache, sizeof(local_entry_cache));
szone->large_entry_cache_oldest = szone->large_entry_cache_newest = 0;
szone->large_entry_cache[0].address = 0x0;
szone->large_entry_cache[0].size = 0;
szone->large_entry_cache_bytes = 0;
szone->large_entry_cache_reserve_bytes = 0;
SZONE_UNLOCK(szone);
while (idx != idx_max) {
deallocate_pages(szone, (void *) local_entry_cache[idx].address, local_entry_cache[idx].size, 0);
if (++idx == LARGE_ENTRY_CACHE_SIZE) idx = 0;
}
if (0 != local_entry_cache[idx].address && 0 != local_entry_cache[idx].size) {
deallocate_pages(szone, (void *) local_entry_cache[idx].address, local_entry_cache[idx].size, 0);
}
#endif
index = szone->num_large_entries;
while (index--) {
large = szone->large_entries + index;
if (large->address) {
deallocate_pages(szone, (void *)(large->address), large->size, szone->debug_flags);
}
}
large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate);
if (range_to_deallocate.size)
deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0);
for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index)
if ((HASHRING_OPEN_ENTRY != szone->tiny_region_generation->hashed_regions[index]) &&
(HASHRING_REGION_DEALLOCATED != szone->tiny_region_generation->hashed_regions[index]))
deallocate_pages(szone, szone->tiny_region_generation->hashed_regions[index], TINY_REGION_SIZE, 0);
for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index)
if ((HASHRING_OPEN_ENTRY != szone->small_region_generation->hashed_regions[index]) &&
(HASHRING_REGION_DEALLOCATED != szone->small_region_generation->hashed_regions[index]))
deallocate_pages(szone, szone->small_region_generation->hashed_regions[index], SMALL_REGION_SIZE, 0);
if (szone->tiny_region_generation->hashed_regions != szone->initial_tiny_regions) {
size_t size = round_page(szone->tiny_region_generation->num_regions_allocated * sizeof(region_t));
deallocate_pages(szone, szone->tiny_region_generation->hashed_regions, size, 0);
}
if (szone->small_region_generation->hashed_regions != szone->initial_small_regions) {
size_t size = round_page(szone->small_region_generation->num_regions_allocated * sizeof(region_t));
deallocate_pages(szone, szone->small_region_generation->hashed_regions, size, 0);
}
if (szone->cpu_id_key != (pthread_key_t) -1)
(void)pthread_key_delete(szone->cpu_id_key);
deallocate_pages(szone, (void *)&(szone->tiny_magazines[-1]), TINY_MAGAZINE_PAGED_SIZE, SCALABLE_MALLOC_ADD_GUARD_PAGES);
deallocate_pages(szone, (void *)&(szone->small_magazines[-1]), SMALL_MAGAZINE_PAGED_SIZE, SCALABLE_MALLOC_ADD_GUARD_PAGES);
deallocate_pages(szone, (void *)szone, SZONE_PAGED_SIZE, 0);
}
static NOINLINE size_t
szone_good_size(szone_t *szone, size_t size)
{
msize_t msize;
if (size <= (NUM_TINY_SLOTS - 1) * TINY_QUANTUM) {
msize = TINY_MSIZE_FOR_BYTES(size + TINY_QUANTUM - 1);
if (!msize)
msize = 1;
return TINY_BYTES_FOR_MSIZE(msize);
}
if (size <= szone->large_threshold) {
msize = SMALL_MSIZE_FOR_BYTES(size + SMALL_QUANTUM - 1);
if (!msize)
msize = 1;
return SMALL_BYTES_FOR_MSIZE(msize);
}
if (size > round_page(size))
return (size_t)(-1LL);
#if DEBUG_MALLOC
if (size == 0)
malloc_printf("szone_good_size() invariant broken %y\n", size);
#endif
return round_page(size);
}
unsigned szone_check_counter = 0;
unsigned szone_check_start = 0;
unsigned szone_check_modulo = 1;
static NOINLINE boolean_t
szone_check_all(szone_t *szone, const char *function)
{
size_t index;
for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index) {
region_t tiny = szone->tiny_region_generation->hashed_regions[index];
if (HASHRING_REGION_DEALLOCATED == tiny)
continue;
if (tiny) {
magazine_t *tiny_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->tiny_magazines,
REGION_TRAILER_FOR_TINY_REGION(tiny), MAGAZINE_INDEX_FOR_TINY_REGION(tiny));
if (!tiny_check_region(szone, tiny)) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
szone->debug_flags &= ~ CHECK_REGIONS;
szone_error(szone, 1, "check: tiny region incorrect", NULL,
"*** tiny region %ld incorrect szone_check_all(%s) counter=%d\n",
index, function, szone_check_counter);
return 0;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, tiny_mag_ptr);
}
}
for (index = 0; index < NUM_TINY_SLOTS; ++index) {
if (!tiny_free_list_check(szone, index)) {
szone->debug_flags &= ~ CHECK_REGIONS;
szone_error(szone, 1, "check: tiny free list incorrect", NULL,
"*** tiny free list incorrect (slot=%ld) szone_check_all(%s) counter=%d\n",
index, function, szone_check_counter);
return 0;
}
}
for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index) {
region_t small = szone->small_region_generation->hashed_regions[index];
if (HASHRING_REGION_DEALLOCATED == small)
continue;
if (small) {
magazine_t *small_mag_ptr = mag_lock_zine_for_region_trailer(szone, szone->small_magazines,
REGION_TRAILER_FOR_SMALL_REGION(small), MAGAZINE_INDEX_FOR_SMALL_REGION(small));
if (!small_check_region(szone, small)) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
szone->debug_flags &= ~ CHECK_REGIONS;
szone_error(szone, 1, "check: small region incorrect", NULL,
"*** small region %ld incorrect szone_check_all(%s) counter=%d\n",
index, function, szone_check_counter);
return 0;
}
SZONE_MAGAZINE_PTR_UNLOCK(szone, small_mag_ptr);
}
}
for (index = 0; index < szone->num_small_slots; ++index) {
if (!small_free_list_check(szone, index)) {
szone->debug_flags &= ~ CHECK_REGIONS;
szone_error(szone, 1, "check: small free list incorrect", NULL,
"*** small free list incorrect (slot=%ld) szone_check_all(%s) counter=%d\n",
index, function, szone_check_counter);
return 0;
}
}
return 1;
}
static boolean_t
szone_check(szone_t *szone)
{
if ((++szone_check_counter % 10000) == 0)
_malloc_printf(ASL_LEVEL_NOTICE, "at szone_check counter=%d\n", szone_check_counter);
if (szone_check_counter < szone_check_start)
return 1;
if (szone_check_counter % szone_check_modulo)
return 1;
return szone_check_all(szone, "");
}
static kern_return_t
szone_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address,
memory_reader_t reader, vm_range_recorder_t recorder)
{
szone_t *szone;
kern_return_t err;
if (!reader) reader = _szone_default_reader;
err = reader(task, zone_address, sizeof(szone_t), (void **)&szone);
if (err) return err;
err = tiny_in_use_enumerator(task, context, type_mask, szone, reader, recorder);
if (err) return err;
err = small_in_use_enumerator(task, context, type_mask, szone, reader, recorder);
if (err) return err;
err = large_in_use_enumerator(task, context, type_mask,
(vm_address_t)szone->large_entries, szone->num_large_entries, reader, recorder);
return err;
}
void
scalable_zone_info(malloc_zone_t *zone, unsigned *info_to_fill, unsigned count)
{
szone_t *szone = (void *)zone;
unsigned info[13];
size_t s = 0;
unsigned t = 0;
size_t u = 0;
mag_index_t mag_index;
for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_start;
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end;
t += szone->tiny_magazines[mag_index].mag_num_objects;
u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects;
}
info[4] = t;
info[5] = u;
for (t = 0, u = 0, mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
s += szone->small_magazines[mag_index].mag_bytes_free_at_start;
s += szone->small_magazines[mag_index].mag_bytes_free_at_end;
t += szone->small_magazines[mag_index].mag_num_objects;
u += szone->small_magazines[mag_index].mag_num_bytes_in_objects;
}
info[6] = t;
info[7] = u;
info[8] = szone->num_large_objects_in_use;
info[9] = szone->num_bytes_in_large_objects;
info[10] = 0; info[11] = 0;
info[12] = szone->debug_flags;
info[0] = info[4] + info[6] + info[8] + info[10];
info[1] = info[5] + info[7] + info[9] + info[11];
info[3] = (szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE +
(szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE + info[9] + info[11];
info[2] = info[3] - s;
memcpy(info_to_fill, info, sizeof(unsigned)*count);
}
static NOINLINE void
szone_print(szone_t *szone, boolean_t verbose)
{
unsigned info[13];
size_t index;
region_t region;
scalable_zone_info((void *)szone, info, 13);
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
"Scalable zone %p: inUse=%u(%y) touched=%y allocated=%y flags=%d\n",
szone, info[0], info[1], info[2], info[3], info[12]);
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
"\ttiny=%u(%y) small=%u(%y) large=%u(%y) huge=%u(%y)\n",
info[4], info[5], info[6], info[7], info[8], info[9], info[10], info[11]);
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
"%lu tiny regions:\n", szone->num_tiny_regions);
if (szone->num_tiny_regions_dealloc)
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
"[%lu tiny regions have been vm_deallocate'd]\n", szone->num_tiny_regions_dealloc);
for (index = 0; index < szone->tiny_region_generation->num_regions_allocated; ++index) {
region = szone->tiny_region_generation->hashed_regions[index];
if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) {
mag_index_t mag_index = MAGAZINE_INDEX_FOR_TINY_REGION(region);
print_tiny_region(verbose, region,
(region == szone->tiny_magazines[mag_index].mag_last_region) ?
szone->tiny_magazines[mag_index].mag_bytes_free_at_start : 0,
(region == szone->tiny_magazines[mag_index].mag_last_region) ?
szone->tiny_magazines[mag_index].mag_bytes_free_at_end : 0);
}
}
if (verbose)
print_tiny_free_list(szone);
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
"%lu small regions:\n", szone->num_small_regions);
if (szone->num_small_regions_dealloc)
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
"[%lu small regions have been vm_deallocate'd]\n", szone->num_small_regions_dealloc);
for (index = 0; index < szone->small_region_generation->num_regions_allocated; ++index) {
region = szone->small_region_generation->hashed_regions[index];
if (HASHRING_OPEN_ENTRY != region && HASHRING_REGION_DEALLOCATED != region) {
mag_index_t mag_index = MAGAZINE_INDEX_FOR_SMALL_REGION(region);
print_small_region(szone, verbose, region,
(region == szone->small_magazines[mag_index].mag_last_region) ?
szone->small_magazines[mag_index].mag_bytes_free_at_start : 0,
(region == szone->small_magazines[mag_index].mag_last_region) ?
szone->small_magazines[mag_index].mag_bytes_free_at_end : 0);
}
}
if (verbose)
print_small_free_list(szone);
}
static void
szone_log(malloc_zone_t *zone, void *log_address)
{
szone_t *szone = (szone_t *)zone;
szone->log_address = log_address;
}
static void
szone_force_lock(szone_t *szone)
{
mag_index_t i;
for (i = 0; i < szone->num_tiny_magazines; ++i) {
SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->tiny_magazines[i])));
}
SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX])));
for (i = 0; i < szone->num_small_magazines; ++i) {
SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->small_magazines[i])));
}
SZONE_MAGAZINE_PTR_LOCK(szone, (&(szone->small_magazines[DEPOT_MAGAZINE_INDEX])));
SZONE_LOCK(szone);
}
static void
szone_force_unlock(szone_t *szone)
{
mag_index_t i;
SZONE_UNLOCK(szone);
for (i = -1; i < szone->num_small_magazines; ++i) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->small_magazines[i])));
}
for (i = -1; i < szone->num_tiny_magazines; ++i) {
SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->tiny_magazines[i])));
}
}
static boolean_t
szone_locked(szone_t *szone)
{
mag_index_t i;
int tookLock;
tookLock = SZONE_TRY_LOCK(szone);
if (tookLock == 0)
return 1;
SZONE_UNLOCK(szone);
for (i = -1; i < szone->num_small_magazines; ++i) {
tookLock = SZONE_MAGAZINE_PTR_TRY_LOCK(szone, (&(szone->small_magazines[i])));
if (tookLock == 0)
return 1;
SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->small_magazines[i])));
}
for (i = -1; i < szone->num_tiny_magazines; ++i) {
tookLock = SZONE_MAGAZINE_PTR_TRY_LOCK(szone, (&(szone->tiny_magazines[i])));
if (tookLock == 0)
return 1;
SZONE_MAGAZINE_PTR_UNLOCK(szone, (&(szone->tiny_magazines[i])));
}
return 0;
}
static size_t
szone_pressure_relief(szone_t *szone, size_t goal)
{
#if LARGE_CACHE
if (!szone->flotsam_enabled)
return 0;
SZONE_LOCK(szone);
int idx = szone->large_entry_cache_oldest, idx_max = szone->large_entry_cache_newest;
large_entry_t local_entry_cache[LARGE_ENTRY_CACHE_SIZE];
memcpy((void *)local_entry_cache, (void *)szone->large_entry_cache, sizeof(local_entry_cache));
szone->large_entry_cache_oldest = szone->large_entry_cache_newest = 0;
szone->large_entry_cache[0].address = 0x0;
szone->large_entry_cache[0].size = 0;
szone->large_entry_cache_bytes = 0;
szone->large_entry_cache_reserve_bytes = 0;
szone->flotsam_enabled = FALSE;
SZONE_UNLOCK(szone);
size_t total = 0;
while (idx != idx_max) {
deallocate_pages(szone, (void *) local_entry_cache[idx].address, local_entry_cache[idx].size, 0);
total += local_entry_cache[idx].size;
if (++idx == LARGE_ENTRY_CACHE_SIZE) idx = 0;
}
if (0 != local_entry_cache[idx].address && 0 != local_entry_cache[idx].size) {
deallocate_pages(szone, (void *) local_entry_cache[idx].address, local_entry_cache[idx].size, 0);
total += local_entry_cache[idx].size;
}
MAGMALLOC_PRESSURERELIEF((void *)szone, goal, total); return total;
#else
return 0;
#endif
}
boolean_t
scalable_zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats, unsigned subzone)
{
szone_t *szone = (szone_t *)zone;
switch (subzone) {
case 0:
{
size_t s = 0;
unsigned t = 0;
size_t u = 0;
mag_index_t mag_index;
for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_start;
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end;
t += szone->tiny_magazines[mag_index].mag_num_objects;
u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects;
}
stats->blocks_in_use = t;
stats->size_in_use = u;
stats->size_allocated = (szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE;
stats->max_size_in_use = stats->size_allocated - s;
return 1;
}
case 1:
{
size_t s = 0;
unsigned t = 0;
size_t u = 0;
mag_index_t mag_index;
for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
s += szone->small_magazines[mag_index].mag_bytes_free_at_start;
s += szone->small_magazines[mag_index].mag_bytes_free_at_end;
t += szone->small_magazines[mag_index].mag_num_objects;
u += szone->small_magazines[mag_index].mag_num_bytes_in_objects;
}
stats->blocks_in_use = t;
stats->size_in_use = u;
stats->size_allocated = (szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE;
stats->max_size_in_use = stats->size_allocated - s;
return 1;
}
case 2:
stats->blocks_in_use = szone->num_large_objects_in_use;
stats->size_in_use = szone->num_bytes_in_large_objects;
stats->max_size_in_use = stats->size_allocated = stats->size_in_use;
return 1;
case 3:
stats->blocks_in_use = 0; stats->size_in_use = 0; stats->max_size_in_use = stats->size_allocated = 0;
return 1;
}
return 0;
}
static void
szone_statistics(szone_t *szone, malloc_statistics_t *stats)
{
size_t large;
size_t s = 0;
unsigned t = 0;
size_t u = 0;
mag_index_t mag_index;
for (mag_index = -1; mag_index < szone->num_tiny_magazines; mag_index++) {
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_start;
s += szone->tiny_magazines[mag_index].mag_bytes_free_at_end;
t += szone->tiny_magazines[mag_index].mag_num_objects;
u += szone->tiny_magazines[mag_index].mag_num_bytes_in_objects;
}
for (mag_index = -1; mag_index < szone->num_small_magazines; mag_index++) {
s += szone->small_magazines[mag_index].mag_bytes_free_at_start;
s += szone->small_magazines[mag_index].mag_bytes_free_at_end;
t += szone->small_magazines[mag_index].mag_num_objects;
u += szone->small_magazines[mag_index].mag_num_bytes_in_objects;
}
large = szone->num_bytes_in_large_objects + 0;
stats->blocks_in_use = t + szone->num_large_objects_in_use + 0; stats->size_in_use = u + large;
stats->max_size_in_use = stats->size_allocated =
(szone->num_tiny_regions - szone->num_tiny_regions_dealloc) * TINY_REGION_SIZE +
(szone->num_small_regions - szone->num_small_regions_dealloc) * SMALL_REGION_SIZE + large;
stats->max_size_in_use -= s;
}
static void *
legacy_zeroing_large_malloc(szone_t *szone, size_t size) {
if (size > LARGE_THRESHOLD) return szone_calloc(szone, 1, size); else
return szone_malloc(szone, size);
}
static void *
legacy_zeroing_large_valloc(szone_t *szone, size_t size) {
void *p = szone_valloc(szone, size);
memset(p, 0, size); return p;
}
void zeroify_scalable_zone(malloc_zone_t *zone)
{
szone_t *szone = (szone_t *)zone;
if (szone) {
mprotect(szone, sizeof(szone->basic_zone), PROT_READ | PROT_WRITE);
szone->basic_zone.malloc = (void *)legacy_zeroing_large_malloc;
szone->basic_zone.valloc = (void *)legacy_zeroing_large_valloc;
mprotect(szone, sizeof(szone->basic_zone), PROT_READ);
}
}
static const struct malloc_introspection_t szone_introspect = {
(void *)szone_ptr_in_use_enumerator,
(void *)szone_good_size,
(void *)szone_check,
(void *)szone_print,
szone_log,
(void *)szone_force_lock,
(void *)szone_force_unlock,
(void *)szone_statistics,
(void *)szone_locked,
NULL, NULL, NULL, NULL,
};
malloc_zone_t *
create_scalable_zone(size_t initial_size, unsigned debug_flags)
{
szone_t *szone;
uint64_t hw_memsize = 0;
if ((vm_page_size != _vm_page_size) || (vm_page_shift != _vm_page_shift)) {
malloc_printf("*** FATAL ERROR - machine page size does not match our assumptions.\n");
exit(-1);
}
#if defined(__i386__) || defined(__x86_64__)
if (_COMM_PAGE_VERSION_REQD > (*((short *) _COMM_PAGE_VERSION))) { malloc_printf("*** ERROR - comm page version mismatch.\n");
exit(-1);
}
#endif
szone = allocate_pages(NULL, SZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC);
if (!szone)
return NULL;
#if 0
#warning CHECK_REGIONS enabled
debug_flags |= CHECK_REGIONS;
#endif
#if 0
#warning LOG enabled
szone->log_address = ~0;
#endif
szone->trg[0].nextgen = &(szone->trg[1]);
szone->trg[1].nextgen = &(szone->trg[0]);
szone->tiny_region_generation = &(szone->trg[0]);
szone->tiny_region_generation->hashed_regions = szone->initial_tiny_regions;
szone->tiny_region_generation->num_regions_allocated = INITIAL_NUM_REGIONS;
szone->tiny_region_generation->num_regions_allocated_shift = INITIAL_NUM_REGIONS_SHIFT;
szone->srg[0].nextgen = &(szone->srg[1]);
szone->srg[1].nextgen = &(szone->srg[0]);
szone->small_region_generation = &(szone->srg[0]);
szone->small_region_generation->hashed_regions = szone->initial_small_regions;
szone->small_region_generation->num_regions_allocated = INITIAL_NUM_REGIONS;
szone->small_region_generation->num_regions_allocated_shift = INITIAL_NUM_REGIONS_SHIFT;
#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
if ((hw_memsize = *(uint64_t *)(uintptr_t)_COMM_PAGE_MEMORY_SIZE) >= (1ULL << 30))
#else
size_t uint64_t_size = sizeof(hw_memsize);
if (0 == sysctlbyname("hw.memsize", &hw_memsize, &uint64_t_size, 0, 0) &&
hw_memsize >= (1ULL << 30))
#endif
{
szone->is_largemem = 1;
szone->num_small_slots = NUM_SMALL_SLOTS_LARGEMEM;
szone->large_threshold = LARGE_THRESHOLD_LARGEMEM;
szone->vm_copy_threshold = VM_COPY_THRESHOLD_LARGEMEM;
} else {
szone->is_largemem = 0;
szone->num_small_slots = NUM_SMALL_SLOTS;
szone->large_threshold = LARGE_THRESHOLD;
szone->vm_copy_threshold = VM_COPY_THRESHOLD;
}
#if LARGE_CACHE
szone->large_entry_cache_reserve_limit =
hw_memsize >> 10;
int32_t libSystemVersion = NSVersionOfLinkTimeLibrary("System");
if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 112) )
szone->large_legacy_reset_mprotect = TRUE;
else
szone->large_legacy_reset_mprotect = FALSE;
#endif
szone->cookie = (uintptr_t)malloc_entropy[0];
#if __i386__ || __LP64__ || TARGET_OS_EMBEDDED
#if __i386__
uintptr_t stackbase = 0x8fe00000;
int entropic_bits = 3;
#elif __LP64__
uintptr_t stackbase = USRSTACK64;
int entropic_bits = 16;
#else
uintptr_t stackbase = USRSTACK;
int entropic_bits = 3;
#endif
if (0 != _dyld_get_image_slide((const struct mach_header*)_NSGetMachExecuteHeader())) {
if (0 == entropic_address) {
uintptr_t t = stackbase - MAXSSIZ - ((uintptr_t) (malloc_entropy[1] & ((1 << entropic_bits) - 1)) << SMALL_BLOCKS_ALIGN);
(void)__sync_bool_compare_and_swap(&entropic_limit, 0, t); (void)__sync_bool_compare_and_swap(&entropic_address, 0, t - ENTROPIC_KABILLION); }
debug_flags &= ~DISABLE_ASLR;
} else {
debug_flags |= DISABLE_ASLR;
}
#else
debug_flags |= DISABLE_ASLR;
#endif
szone->basic_zone.version = 8;
szone->basic_zone.size = (void *)szone_size;
szone->basic_zone.malloc = (void *)szone_malloc;
szone->basic_zone.calloc = (void *)szone_calloc;
szone->basic_zone.valloc = (void *)szone_valloc;
szone->basic_zone.free = (void *)szone_free;
szone->basic_zone.realloc = (void *)szone_realloc;
szone->basic_zone.destroy = (void *)szone_destroy;
szone->basic_zone.batch_malloc = (void *)szone_batch_malloc;
szone->basic_zone.batch_free = (void *)szone_batch_free;
szone->basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect;
szone->basic_zone.memalign = (void *)szone_memalign;
szone->basic_zone.free_definite_size = (void *)szone_free_definite_size;
szone->basic_zone.pressure_relief = (void *)szone_pressure_relief;
szone->basic_zone.reserved1 = 0;
szone->basic_zone.reserved2 = 0;
mprotect(szone, sizeof(szone->basic_zone), PROT_READ);
szone->debug_flags = debug_flags;
LOCK_INIT(szone->large_szone_lock);
#if defined(__ppc__) || defined(__ppc64__)
zeroify_scalable_zone((malloc_zone_t *)szone);
#endif
#if defined(__i386__) || defined(__x86_64__)
szone->cpu_id_key = (pthread_key_t) -1; #else
int err;
if ((err = pthread_key_create(&(szone->cpu_id_key), NULL))) {
malloc_printf("*** ERROR -pthread_key_create failure err=%d.\n", err);
szone->cpu_id_key = (pthread_key_t) -1;
}
#endif
#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
int nproc = *(uint8_t *)(uintptr_t)_COMM_PAGE_NCPUS;
#else
int nproc = sysconf(_SC_NPROCESSORS_CONF);
#endif
szone->num_tiny_magazines = (nproc > 1) ? MIN(nproc, TINY_MAX_MAGAZINES) : 1;
magazine_t *tiny_magazines = allocate_pages(NULL, TINY_MAGAZINE_PAGED_SIZE, 0,
SCALABLE_MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC);
if (NULL == tiny_magazines)
return NULL;
szone->tiny_magazines = &(tiny_magazines[1]);
szone->num_tiny_magazines_mask_shift = 0;
int i = 1;
while( i <= (szone->num_tiny_magazines - 1) ) {
szone->num_tiny_magazines_mask_shift++;
i <<= 1;
}
if (i > TINY_MAX_MAGAZINES) {
malloc_printf("*** FATAL ERROR - magazine mask exceeds allocated magazines.\n");
exit(-1);
}
szone->num_tiny_magazines_mask = i - 1; #if TARGET_OS_EMBEDDED
szone->last_tiny_advise = 0;
#endif
LOCK_INIT(szone->tiny_regions_lock);
LOCK_INIT(szone->tiny_magazines[DEPOT_MAGAZINE_INDEX].magazine_lock);
for (i = 0; i < szone->num_tiny_magazines; ++i) {
LOCK_INIT(szone->tiny_magazines[i].magazine_lock);
}
szone->num_small_magazines = (nproc > 1) ? MIN(nproc, SMALL_MAX_MAGAZINES) : 1;
magazine_t *small_magazines = allocate_pages(NULL, SMALL_MAGAZINE_PAGED_SIZE, 0,
SCALABLE_MALLOC_ADD_GUARD_PAGES, VM_MEMORY_MALLOC);
if (NULL == small_magazines)
return NULL;
szone->small_magazines = &(small_magazines[1]);
szone->num_small_magazines_mask_shift = 0;
while( i <= (szone->num_small_magazines - 1) ) {
szone->num_small_magazines_mask_shift++;
i <<= 1;
}
if (i > SMALL_MAX_MAGAZINES) {
malloc_printf("*** FATAL ERROR - magazine mask exceeds allocated magazines.\n");
exit(-1);
}
szone->num_small_magazines_mask = i - 1; #if TARGET_OS_EMBEDDED
szone->last_small_advise = 0;
#endif
LOCK_INIT(szone->small_regions_lock);
LOCK_INIT(szone->small_magazines[DEPOT_MAGAZINE_INDEX].magazine_lock);
for (i = 0; i < szone->num_small_magazines; ++i) {
LOCK_INIT(szone->small_magazines[i].magazine_lock);
}
CHECK(szone, __PRETTY_FUNCTION__);
return (malloc_zone_t *)szone;
}
static size_t
purgeable_size(szone_t *szone, const void *ptr)
{
return szone_size_try_large(szone, ptr);
}
static void *
purgeable_malloc(szone_t *szone, size_t size) {
if (size <= szone->large_threshold)
return szone_malloc(szone->helper_zone, size);
else
return szone_malloc(szone, size);
}
static void *
purgeable_calloc(szone_t *szone, size_t num_items, size_t size)
{
size_t total_bytes = num_items * size;
if (num_items > 1) {
#if __LP64__
if ((num_items | size) & 0xffffffff00000000ul) {
__uint128_t product = ((__uint128_t)num_items) * ((__uint128_t)size);
if ((uint64_t)(product >> 64)) return NULL;
}
#else
if ((num_items | size) & 0xffff0000ul) {
uint64_t product = ((uint64_t)num_items) * ((uint64_t)size);
if ((uint32_t)(product >> 32)) return NULL;
}
#endif
}
if (total_bytes <= szone->large_threshold)
return szone_calloc(szone->helper_zone, 1, total_bytes);
else
return szone_calloc(szone, 1, total_bytes);
}
static void *
purgeable_valloc(szone_t *szone, size_t size)
{
if (size <= szone->large_threshold)
return szone_valloc(szone->helper_zone, size);
else
return szone_valloc(szone, size);
}
static void
purgeable_free(szone_t *szone, void *ptr)
{
large_entry_t *entry;
SZONE_LOCK(szone);
entry = large_entry_for_pointer_no_lock(szone, ptr);
SZONE_UNLOCK(szone);
if (entry) {
return free_large(szone, ptr);
} else {
return szone_free(szone->helper_zone, ptr);
}
}
static void
purgeable_free_definite_size(szone_t *szone, void *ptr, size_t size)
{
if (size <= szone->large_threshold)
return szone_free_definite_size(szone->helper_zone, ptr, size);
else
return szone_free_definite_size(szone, ptr, size);
}
static void *
purgeable_realloc(szone_t *szone, void *ptr, size_t new_size)
{
size_t old_size;
if (NULL == ptr) {
return purgeable_malloc(szone, new_size);
} else if (0 == new_size) {
purgeable_free(szone, ptr);
return purgeable_malloc(szone, 1);
}
old_size = purgeable_size(szone, ptr); if (!old_size)
old_size = szone_size(szone->helper_zone, ptr);
if (!old_size) {
szone_error(szone, 1, "pointer being reallocated was not allocated", ptr, NULL);
return NULL;
}
if (old_size <= szone->large_threshold) {
if (new_size <= szone->large_threshold)
return szone_realloc(szone->helper_zone, ptr, new_size);
else {
void * new_ptr = purgeable_malloc(szone, new_size);
if (new_ptr) {
memcpy(new_ptr, ptr, old_size);
szone_free_definite_size(szone->helper_zone, ptr, old_size);
}
return new_ptr; }
} else {
if (new_size <= szone->large_threshold) {
void * new_ptr = szone_malloc(szone->helper_zone, new_size);
if (new_ptr) {
memcpy(new_ptr, ptr, new_size);
purgeable_free_definite_size(szone, ptr, old_size);
}
return new_ptr;
} else {
void * new_ptr = purgeable_malloc(szone, new_size);
if (new_ptr) {
memcpy(new_ptr, ptr, MIN(old_size, new_size));
purgeable_free_definite_size(szone, ptr, old_size);
}
return new_ptr; }
}
}
static void
purgeable_destroy(szone_t *szone)
{
size_t index = szone->num_large_entries;
large_entry_t *large;
vm_range_t range_to_deallocate;
while (index--) {
large = szone->large_entries + index;
if (large->address) {
deallocate_pages(szone, (void *)(large->address), large->size, szone->debug_flags);
}
}
large_entries_free_no_lock(szone, szone->large_entries, szone->num_large_entries, &range_to_deallocate);
if (range_to_deallocate.size)
deallocate_pages(szone, (void *)range_to_deallocate.address, (size_t)range_to_deallocate.size, 0);
deallocate_pages(szone, (void *)szone, SZONE_PAGED_SIZE, 0);
}
static unsigned
purgeable_batch_malloc(szone_t *szone, size_t size, void **results, unsigned count)
{
return szone_batch_malloc(szone->helper_zone, size, results, count);
}
static void
purgeable_batch_free(szone_t *szone, void **to_be_freed, unsigned count)
{
return szone_batch_free(szone->helper_zone, to_be_freed, count);
}
static void *
purgeable_memalign(szone_t *szone, size_t alignment, size_t size)
{
if (size <= szone->large_threshold)
return szone_memalign(szone->helper_zone, alignment, size);
else
return szone_memalign(szone, alignment, size);
}
static kern_return_t
purgeable_ptr_in_use_enumerator(task_t task, void *context, unsigned type_mask, vm_address_t zone_address,
memory_reader_t reader, vm_range_recorder_t recorder)
{
szone_t *szone;
kern_return_t err;
if (!reader) reader = _szone_default_reader;
err = reader(task, zone_address, sizeof(szone_t), (void **)&szone);
if (err) return err;
err = large_in_use_enumerator(task, context, type_mask,
(vm_address_t)szone->large_entries, szone->num_large_entries, reader, recorder);
return err;
}
static size_t
purgeable_good_size(szone_t *szone, size_t size)
{
if (size <= szone->large_threshold)
return szone_good_size(szone->helper_zone, size);
else
return szone_good_size(szone, size);
}
static boolean_t
purgeable_check(szone_t *szone)
{
return 1;
}
static void
purgeable_print(szone_t *szone, boolean_t verbose)
{
_malloc_printf(MALLOC_PRINTF_NOLOG | MALLOC_PRINTF_NOPREFIX,
"Scalable zone %p: inUse=%u(%y) flags=%d\n",
szone, szone->num_large_objects_in_use, szone->num_bytes_in_large_objects, szone->debug_flags);
}
static void
purgeable_log(malloc_zone_t *zone, void *log_address)
{
szone_t *szone = (szone_t *)zone;
szone->log_address = log_address;
}
static void
purgeable_force_lock(szone_t *szone)
{
SZONE_LOCK(szone);
}
static void
purgeable_force_unlock(szone_t *szone)
{
SZONE_UNLOCK(szone);
}
static void
purgeable_statistics(szone_t *szone, malloc_statistics_t *stats)
{
stats->blocks_in_use = szone->num_large_objects_in_use;
stats->size_in_use = stats->max_size_in_use = stats->size_allocated = szone->num_bytes_in_large_objects;
}
static boolean_t
purgeable_locked(szone_t *szone)
{
int tookLock;
tookLock = SZONE_TRY_LOCK(szone);
if (tookLock == 0)
return 1;
SZONE_UNLOCK(szone);
return 0;
}
static size_t
purgeable_pressure_relief(szone_t *szone, size_t goal)
{
return szone_pressure_relief(szone, goal) + szone_pressure_relief(szone->helper_zone, goal);
}
static const struct malloc_introspection_t purgeable_introspect = {
(void *)purgeable_ptr_in_use_enumerator,
(void *)purgeable_good_size,
(void *)purgeable_check,
(void *)purgeable_print,
purgeable_log,
(void *)purgeable_force_lock,
(void *)purgeable_force_unlock,
(void *)purgeable_statistics,
(void *)purgeable_locked,
NULL, NULL, NULL, NULL,
};
__private_extern__ malloc_zone_t *
create_purgeable_zone(size_t initial_size, malloc_zone_t *malloc_default_zone, unsigned debug_flags)
{
szone_t *szone;
uint64_t hw_memsize = 0;
szone = allocate_pages(NULL, SZONE_PAGED_SIZE, 0, 0, VM_MEMORY_MALLOC);
if (!szone)
return NULL;
#if 0
#warning LOG enabled
szone->log_address = ~0;
#endif
#if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
hw_memsize = *(uint64_t *)(uintptr_t)_COMM_PAGE_MEMORY_SIZE;
#else
size_t uint64_t_size = sizeof(hw_memsize);
sysctlbyname("hw.memsize", &hw_memsize, &uint64_t_size, 0, 0);
#endif
szone->is_largemem = 0;
szone->large_threshold = LARGE_THRESHOLD;
szone->vm_copy_threshold = VM_COPY_THRESHOLD;
#if LARGE_CACHE
szone->large_entry_cache_reserve_limit =
hw_memsize >> 10;
int32_t libSystemVersion = NSVersionOfLinkTimeLibrary("System");
if ((-1 != libSystemVersion) && ((libSystemVersion >> 16) < 112) )
szone->large_legacy_reset_mprotect = TRUE;
else
szone->large_legacy_reset_mprotect = FALSE;
#endif
szone->basic_zone.version = 8;
szone->basic_zone.size = (void *)purgeable_size;
szone->basic_zone.malloc = (void *)purgeable_malloc;
szone->basic_zone.calloc = (void *)purgeable_calloc;
szone->basic_zone.valloc = (void *)purgeable_valloc;
szone->basic_zone.free = (void *)purgeable_free;
szone->basic_zone.realloc = (void *)purgeable_realloc;
szone->basic_zone.destroy = (void *)purgeable_destroy;
szone->basic_zone.batch_malloc = (void *)purgeable_batch_malloc;
szone->basic_zone.batch_free = (void *)purgeable_batch_free;
szone->basic_zone.introspect = (struct malloc_introspection_t *)&purgeable_introspect;
szone->basic_zone.memalign = (void *)purgeable_memalign;
szone->basic_zone.free_definite_size = (void *)purgeable_free_definite_size;
szone->basic_zone.pressure_relief = (void *)purgeable_pressure_relief;
szone->basic_zone.reserved1 = 0;
szone->basic_zone.reserved2 = 0;
mprotect(szone, sizeof(szone->basic_zone), PROT_READ);
szone->debug_flags = debug_flags | SCALABLE_MALLOC_PURGEABLE;
if (szone->debug_flags & SCALABLE_MALLOC_ADD_GUARD_PAGES) {
_malloc_printf(ASL_LEVEL_INFO, "purgeable zone does not support guard pages\n");
szone->debug_flags &= ~SCALABLE_MALLOC_ADD_GUARD_PAGES;
}
LOCK_INIT(szone->large_szone_lock);
szone->helper_zone = (struct szone_s *)malloc_default_zone;
CHECK(szone, __PRETTY_FUNCTION__);
return (malloc_zone_t *)szone;
}
static NOINLINE void *
legacy_valloc(szone_t *szone, size_t size)
{
void *ptr;
size_t num_pages;
num_pages = round_page(size) >> vm_page_shift;
ptr = large_malloc(szone, num_pages, 0, TRUE);
#if DEBUG_MALLOC
if (LOG(szone, ptr))
malloc_printf("legacy_valloc returned %p\n", ptr);
#endif
return ptr;
}
__private_extern__ malloc_zone_t *
create_legacy_scalable_zone(size_t initial_size, unsigned debug_flags)
{
malloc_zone_t *mzone = create_scalable_zone(initial_size, debug_flags);
szone_t *szone = (szone_t *)mzone;
if (!szone)
return NULL;
szone->is_largemem = 0;
szone->num_small_slots = NUM_SMALL_SLOTS;
szone->large_threshold = LARGE_THRESHOLD;
szone->vm_copy_threshold = VM_COPY_THRESHOLD;
mprotect(szone, sizeof(szone->basic_zone), PROT_READ | PROT_WRITE);
szone->basic_zone.valloc = (void *)legacy_valloc;
szone->basic_zone.free_definite_size = NULL;
mprotect(szone, sizeof(szone->basic_zone), PROT_READ);
return mzone;
}
#define MALLOC_FREEZEDRY_VERSION 6
typedef struct {
unsigned version;
unsigned nszones;
szone_t *szones;
} malloc_frozen;
static void *
frozen_malloc(szone_t *zone, size_t new_size)
{
return malloc(new_size);
}
static void *
frozen_calloc(szone_t *zone, size_t num_items, size_t size)
{
return calloc(num_items, size);
}
static void *
frozen_valloc(szone_t *zone, size_t new_size)
{
return valloc(new_size);
}
static void *
frozen_realloc(szone_t *zone, void *ptr, size_t new_size)
{
size_t old_size = szone_size(zone, ptr);
void *new_ptr;
if (new_size <= old_size) {
return ptr;
}
new_ptr = malloc(new_size);
if (old_size > 0) {
memcpy(new_ptr, ptr, old_size);
}
return new_ptr;
}
static void
frozen_free(szone_t *zone, void *ptr)
{
}
static void
frozen_destroy(szone_t *zone)
{
}
uintptr_t
malloc_freezedry(void)
{
extern unsigned malloc_num_zones;
extern malloc_zone_t **malloc_zones;
malloc_frozen *data;
unsigned i;
data = (malloc_frozen *) malloc(sizeof(malloc_frozen));
data->version = MALLOC_FREEZEDRY_VERSION;
data->nszones = malloc_num_zones;
data->szones = (szone_t *) calloc(malloc_num_zones, sizeof(szone_t));
for (i = 0; i < malloc_num_zones; i++) {
if (strcmp(malloc_zones[i]->zone_name, "DefaultMallocZone")) {
free(data->szones);
free(data);
return 0;
}
memcpy(&data->szones[i], malloc_zones[i], sizeof(szone_t));
}
return((uintptr_t)data);
}
int
malloc_jumpstart(uintptr_t cookie)
{
malloc_frozen *data = (malloc_frozen *)cookie;
unsigned i;
if (data->version != MALLOC_FREEZEDRY_VERSION) {
return 1;
}
for (i = 0; i < data->nszones; i++) {
data->szones[i].basic_zone.size = (void *) szone_size;
data->szones[i].basic_zone.malloc = (void *) frozen_malloc;
data->szones[i].basic_zone.calloc = (void *) frozen_calloc;
data->szones[i].basic_zone.valloc = (void *) frozen_valloc;
data->szones[i].basic_zone.free = (void *) frozen_free;
data->szones[i].basic_zone.realloc = (void *) frozen_realloc;
data->szones[i].basic_zone.destroy = (void *) frozen_destroy;
data->szones[i].basic_zone.introspect = (struct malloc_introspection_t *)&szone_introspect;
malloc_zone_register(&data->szones[i].basic_zone);
}
return 0;
}