#pragma once
#ifndef __AUTO_ZONE_CORE__
#define __AUTO_ZONE_CORE__
#include "auto_zone.h"
#include "auto_impl_utilities.h"
#include "auto_weak.h"
#include "Bitmap.h"
#include "Configuration.h"
#include "Definitions.h"
#include "Large.h"
#include "Locks.h"
#include "Admin.h"
#include "Region.h"
#include "Statistics.h"
#include "Subzone.h"
#include "SubzonePartition.h"
#include "Thread.h"
#include <algorithm>
#include <cassert>
namespace Auto {
class Monitor;
class ResourceTracker;
class SubzoneBlockRef;
class LargeBlockRef;
typedef PointerArray<VMMemoryAllocator> PointerList;
typedef std::vector<Range, AuxAllocator<Range> > RangeVector;
class ObjectAssociationMap : public PtrPtrMap, public AuxAllocated {}; typedef __gnu_cxx::hash_map<void *, ObjectAssociationMap*, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > AssociationsHashMap;
enum State {
idle, scanning, enlivening, finalizing, reclaiming
};
#define worker_print(fmt, args...)
class Zone {
#define INVALID_THREAD_KEY_VALUE ((Thread *)-1)
public:
malloc_zone_t basic_zone;
auto_collection_control_t control;
spin_lock_t stats_lock;
usword_t num_weak_refs;
usword_t max_weak_refs;
struct weak_entry_t *weak_refs_table;
spin_lock_t weak_refs_table_lock;
dispatch_queue_t _collection_queue;
uint32_t _collection_count;
uint32_t _collector_disable_count; uint8_t _pending_collections[AUTO_ZONE_COLLECT_GLOBAL_MODE_COUNT]; pthread_mutex_t _collection_mutex;
bool _compaction_pending; dispatch_source_t _compaction_timer; dispatch_time_t _compaction_next_time;
private:
static volatile int32_t _zone_count; static Zone *_first_zone;
Thread *_registered_threads; pthread_key_t _registered_threads_key; pthread_mutex_t _registered_threads_mutex; bool _enlivening_enabled; bool _enlivening_complete;
pthread_mutex_t _mark_bits_mutex;
Bitmap _in_subzone; Bitmap _in_large; Large *_large_list; spin_lock_t _large_lock; PtrHashSet _roots; pthread_mutex_t _roots_lock; RangeVector _datasegments; spin_lock_t _datasegments_lock; PtrHashSet _zombies; spin_lock_t _zombies_lock; Region *_region_list; spin_lock_t _region_lock; bool _repair_write_barrier; Range _coverage; spin_lock_t _coverage_lock; Statistics _stats; volatile usword_t _allocation_counter; volatile usword_t _triggered_threshold; ResourceTracker *_resource_tracker_list; pthread_mutex_t _resource_tracker_lock; PointerList _garbage_list; size_t _large_garbage_count; AssociationsHashMap _associations; PtrSizeHashMap _hashes; pthread_rwlock_t _associations_lock; volatile enum State _state; uint64_t _average_collection_time;
volatile int32_t _collection_checking_enabled;
#if UseArena
void *_arena; void *_large_start; Bitmap _large_bits; spin_lock_t _large_bits_lock; #endif
SubzonePartition _partition;
pthread_mutex_t _worker_lock;
pthread_cond_t _worker_cond;
usword_t _worker_count;
usword_t _sleeping_workers;
boolean_t _has_work;
boolean_t (*_worker_func)(void *, boolean_t, boolean_t);
void *_worker_arg;
pthread_mutex_t _compaction_lock;
boolean_t _compaction_disabled;
void deallocate_large(Large *large, void *block);
void deallocate_large_internal(Large *large, void *block);
Region *allocate_region();
void *allocate_large(Thread &thread, usword_t &size, const usword_t layout, bool clear, bool refcount_is_one);
inline Large *find_large(void *block) { return Large::large(block); }
void deallocate_small_medium(void *block);
public:
#if UseArena
void *arena_allocate_region(usword_t newsize);
#endif
void *arena_allocate_large(usword_t size);
void arena_deallocate(void *, size_t size);
static inline const usword_t admin_offset() { return align(sizeof(Zone), page_size); }
static inline const usword_t bytes_needed() {
usword_t in_subzone_size = Bitmap::bytes_needed(subzone_quantum_max);
usword_t in_large_size = Bitmap::bytes_needed(allocate_quantum_large_max);
#if UseArena
usword_t arena_size = Bitmap::bytes_needed(allocate_quantum_large_max);
#else
usword_t arena_size = 0;
#endif
return admin_offset() + in_subzone_size + in_large_size + arena_size;
}
inline void *operator new(const size_t size) {
#if DEBUG
void *allocation_address = allocate_guarded_memory(bytes_needed());
#else
void *allocation_address = allocate_memory(bytes_needed());
#endif
if (!allocation_address) error("Can not allocate zone");
return allocation_address;
}
inline void operator delete(void *zone) {
#if DEBUG
if (zone) deallocate_guarded_memory(zone, bytes_needed());
#else
if (zone) deallocate_memory(zone, bytes_needed());
#endif
}
static void setup_shared();
static pthread_key_t allocate_thread_key();
Zone(pthread_key_t thread_registration_key);
~Zone();
static inline Zone *zone() { return _first_zone; }
inline Thread *threads() { return _registered_threads; }
inline pthread_mutex_t *threads_mutex() { return &_registered_threads_mutex; }
inline Region *region_list() { return _region_list; }
inline spin_lock_t *region_lock() { return &_region_lock; }
inline Large *large_list() { return _large_list; }
inline spin_lock_t *large_lock() { return &_large_lock; }
inline Statistics &statistics() { return _stats; }
inline Range &coverage() { return _coverage; }
inline PointerList &garbage_list() { return _garbage_list; }
inline size_t large_garbage_count() const { return _large_garbage_count; }
dispatch_queue_t collection_queue() const { return _collection_queue; }
inline bool compaction_disabled() const { return _compaction_disabled; }
inline bool compaction_enabled() const { return !_compaction_disabled; }
inline void add_blocks_and_bytes(int64_t block_count, int64_t byte_count) { _stats.add_count(block_count); _stats.add_size(byte_count); }
inline Thread *current_thread_direct() {
if (_pthread_has_direct_tsd()) {
#define CASE_FOR_DIRECT_KEY(key) case key: return (Thread *)_pthread_getspecific_direct(key)
switch (_registered_threads_key) {
CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY0);
CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY1);
CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY2);
CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY3);
CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY4);
CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY5);
CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY6);
CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY7);
CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY8);
CASE_FOR_DIRECT_KEY(__PTK_FRAMEWORK_GC_KEY9);
default: return NULL;
}
} else {
return (Thread *)pthread_getspecific(_registered_threads_key);
}
}
inline Thread *current_thread() {
Thread *thread = current_thread_direct();
if (__builtin_expect(thread == INVALID_THREAD_KEY_VALUE, 0)) {
auto_fatal("Zone::current_thread(): pthread looked up after unregister. Pthreads static key destructor ordering issue?\n");
}
return thread;
}
inline Thread ®istered_thread() {
Thread *thread = current_thread();
if (!thread) {
auto_error(this, "GC operation on unregistered thread. Thread registered implicitly. Break on auto_zone_thread_registration_error() to debug.", NULL);
auto_zone_thread_registration_error();
return register_thread();
}
return *thread;
}
static void destroy_registered_thread(void *key_value);
inline void set_state(enum State ns) { _state = ns; }
inline bool is_state(enum State ns) { return _state == ns; }
inline pthread_mutex_t *roots_lock() { return &_roots_lock; }
inline PtrHashSet &roots() { return _roots; }
inline pthread_rwlock_t *associations_lock() { return &_associations_lock; }
inline AssociationsHashMap &associations() { return _associations; }
#if UseArena
inline void * arena() { return _arena; }
#else
inline void * arena() { return (void *)0; }
#endif
inline void adjust_allocation_counter(usword_t n) { auto_atomic_add(n, &_allocation_counter); }
static inline const usword_t subzone_index(void *address) { return (((usword_t)address & mask(arena_size_log2)) >> subzone_quantum_log2); }
static inline const usword_t subzone_count(const size_t size) { return partition2(size, subzone_quantum_log2); }
inline void activate_subzone(Subzone *subzone) { _in_subzone.set_bit_atomic(subzone_index(subzone)); }
inline bool address_in_arena(const void *address) const {
#if UseArena
return ((usword_t)address & ~mask(arena_size_log2)) == (usword_t)_arena;
#else
return true;
#endif
}
inline const bool in_subzone_memory(void *address) const { return address_in_arena(address) && (bool)_in_subzone.bit(subzone_index(address)); }
inline const bool in_subzone_bitmap(void *address) const { return (bool)_in_subzone.bit(subzone_index(address)); }
inline const bool in_large_memory(void *address) const {
#if UseArena
usword_t arena_q = ((char *)address - (char *)_large_start) >> allocate_quantum_large_log2;
return address_in_arena(address) && (arena_q < allocate_quantum_large_max) && (bool)_large_bits.bit(arena_q);
#else
return address_in_arena(address);
#endif
}
inline const bool in_large_bitmap(void *address) const { return (bool)_in_large.bit(Large::quantum_index(address)); }
static inline const usword_t good_block_size(usword_t size) {
if (size <= allocate_quantum_large) return align2(size, allocate_quantum_medium_log2);
return align2(size, allocate_quantum_small_log2);
}
inline bool is_block(void *address) {
return _coverage.in_range(address) && block_is_start(address);
}
void *block_allocate(Thread &thread, const size_t size, const usword_t layout, const bool clear, bool refcount_is_one);
unsigned batch_allocate(Thread &thread, size_t size, const usword_t layout, const bool clear, const bool refcount_is_one, void **results, unsigned num_requested);
void block_deallocate(SubzoneBlockRef block);
void block_deallocate(LargeBlockRef block);
inline bool block_is_start_large(void *address) {
if (Large::is_start(address)) {
#if UseArena
usword_t arena_q = ((char *)address - (char *)_large_start) >> allocate_quantum_large_log2;
return (arena_q < allocate_quantum_large_max) && in_large_bitmap(address);
#else
return in_large_bitmap(address);
#endif
}
return false;
}
inline bool block_is_start(void *address) {
if (in_subzone_memory(address)) {
usword_t q;
return Subzone::subzone(address)->block_is_start(address, &q);
}
return block_is_start_large(address);
}
Large *block_start_large(void *address);
void *block_start(void *address);
usword_t block_layout(void *address);
void block_set_layout(void *address, const usword_t layout);
private:
inline void close_locks() {
_partition.lock();
}
inline void open_locks() {
_partition.unlock();
}
public:
bool is_locked();
bool add_subzone(Admin *admin);
template <class BlockRef> usword_t block_refcount(BlockRef block) { return block.refcount(); }
template <class BlockRef> usword_t block_increment_refcount(BlockRef block) {
int refcount;
Thread &thread = registered_thread();
refcount = block.inc_refcount();
if (refcount == 1) {
ConditionBarrier barrier(thread.needs_enlivening());
if (barrier) block.enliven();
}
return refcount;
}
template <class BlockRef> usword_t block_decrement_refcount(BlockRef block) { return block.dec_refcount(); }
inline bool is_local(void *block) {
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
return subzone->is_thread_local(subzone->quantum_index_unchecked(block));
}
return false;
}
inline bool block_is_garbage(void *block) {
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
usword_t q;
return subzone->block_is_start(block, &q) && subzone->is_garbage(q);
} else if (block_is_start_large(block)) {
return Large::large(block)->is_garbage();
}
return false;
}
void set_associative_ref(void *block, void *key, void *value);
void *get_associative_ref(void *block, void *key);
size_t get_associative_hash(void *block);
void erase_associations_internal(void *block);
void erase_associations(void *block);
void erase_associations_in_range(const Range &r);
void visit_associations_for_key(void *key, boolean_t (^visitor) (void *object, void *value));
void sort_free_lists();
template <class BlockRef> inline void add_root(void *root, BlockRef value) {
Thread &thread = registered_thread();
thread.block_escaped(value);
UnconditionalBarrier barrier(thread.needs_enlivening());
Mutex lock(&_roots_lock);
if (_roots.find(root) == _roots.end()) {
_roots.insert(root);
}
if (barrier) value.enliven();
*(void **)root = value.address();
}
inline void add_root_no_barrier(void *root) {
#if DEBUG
#endif
Mutex lock(&_roots_lock);
if (_roots.find(root) == _roots.end()) {
_roots.insert(root);
}
}
inline void copy_roots(PointerList &list) {
Mutex lock(&_roots_lock);
usword_t count = _roots.size();
list.clear_count();
list.grow(count);
list.set_count(count);
std::copy(_roots.begin(), _roots.end(), (void**)list.buffer());
}
inline void copy_roots(PtrVector &list) {
Mutex lock(&_roots_lock);
usword_t count = _roots.size();
list.resize(count);
std::copy(_roots.begin(), _roots.end(), list.begin());
}
inline void remove_root(void *root) {
Mutex lock(&_roots_lock);
PtrHashSet::iterator iter = _roots.find(root);
if (iter != _roots.end()) {
_roots.erase(iter);
}
}
inline bool is_root(void *address) {
Mutex lock(&_roots_lock);
PtrHashSet::iterator iter = _roots.find(address);
return (iter != _roots.end());
}
struct RangeLess {
bool operator()(const Range &r1, const Range &r2) const {
return (r1.address() < r2.address()) && (r1.end() <= r2.address()); }
};
inline void add_datasegment(const Range &r) {
SpinLock lock(&_datasegments_lock);
RangeVector::iterator i = std::lower_bound(_datasegments.begin(), _datasegments.end(), r, RangeLess());
_datasegments.insert(i, r);
}
struct RangeExcludes {
Range _range;
RangeExcludes(const Range &r) : _range(r) {}
bool operator()(void *address) { return !_range.in_range(address); }
};
struct RootRemover {
PtrHashSet &_roots;
RootRemover(PtrHashSet &roots) : _roots(roots) {}
void operator()(void *address) {
PtrHashSet::iterator iter = _roots.find(address);
if (iter != _roots.end()) _roots.erase(iter);
}
};
inline void remove_datasegment(const Range &r) {
{
SpinLock lock(&_datasegments_lock);
RangeVector::iterator i = std::lower_bound(_datasegments.begin(), _datasegments.end(), r, RangeLess());
if (i != _datasegments.end()) _datasegments.erase(i);
}
{
Mutex lock(&_roots_lock);
PtrVector rootsToRemove;
std::remove_copy_if(_roots.begin(), _roots.end(), std::back_inserter(rootsToRemove), RangeExcludes(r));
std::for_each(rootsToRemove.begin(), rootsToRemove.end(), RootRemover(_roots));
}
erase_associations_in_range(r);
weak_unregister_data_segment(this, r.address(), r.size());
}
inline void add_datasegment(void *address, size_t size) { add_datasegment(Range(address, size)); }
inline void remove_datasegment(void *address, size_t size) { remove_datasegment(Range(address, size)); }
inline bool is_global_address(void *address) {
SpinLock lock(&_datasegments_lock);
return is_global_address_nolock(address);
}
inline bool is_global_address_nolock(void *address) {
return std::binary_search(_datasegments.begin(), _datasegments.end(), Range(address, sizeof(void*)), RangeLess());
}
#if DEBUG
struct RangePrinter {
void operator() (const Range &r) {
printf("{ address = %p, end = %p }\n", r.address(), r.end());
}
};
inline void print_datasegments() {
SpinLock lock(&_datasegments_lock);
std::for_each(_datasegments.begin(), _datasegments.end(), RangePrinter());
}
void test_datasegments() {
Range r1((void*)0x1000, 512), r2((void*)0xA000, 512);
add_datasegment(r1);
add_datasegment(r2);
print_datasegments();
Range r3(r1), r4(r2);
r3.adjust(r1.size()), r4.adjust(-r2.size());
add_datasegment(r3);
add_datasegment(r4);
print_datasegments();
assert(is_global_address(r1.address()));
assert(is_global_address(displace(r1.address(), 0x10)));
assert(is_global_address(displace(r1.end(), -sizeof(void*))));
assert(is_global_address(displace(r2.address(), 0xA0)));
assert(is_global_address(displace(r3.address(), 0x30)));
assert(is_global_address(displace(r4.address(), 0x40)));
remove_datasegment(r2);
print_datasegments();
assert(!is_global_address(displace(r2.address(), 0xA0)));
remove_datasegment(r1);
assert(!is_global_address(displace(r1.address(), 0x10)));
print_datasegments();
remove_datasegment(r3);
remove_datasegment(r4);
print_datasegments();
}
#endif
inline void erase_weak(void *ptr) {
if (control.weak_layout_for_address) {
const unsigned char* weak_layout = control.weak_layout_for_address((auto_zone_t*)zone, ptr);
if (weak_layout) weak_unregister_with_layout(this, (void**)ptr, weak_layout);
}
}
inline void add_zombie(void *address) {
SpinLock lock(&_zombies_lock);
if (_zombies.find(address) == _zombies.end()) {
_zombies.insert(address);
}
}
inline bool is_zombie(void *address) {
SpinLock lock(&_zombies_lock);
PtrHashSet::iterator iter = _zombies.find(address);
return (iter != _zombies.end());
}
inline void clear_zombies() {
SpinLock lock(&_zombies_lock);
_zombies.clear();
}
template <class BlockRef> void zombify_internal(BlockRef block) {
erase_weak(block.address());
if (control.resurrect) control.resurrect((auto_zone_t *)this, block.address());
block.set_layout(AUTO_OBJECT_UNSCANNED);
block.dec_refcount_no_lock();
}
template <class DestBlock, class ValueBlock> void set_write_barrier(Thread &thread, DestBlock dest_block, const void **dest_addr, ValueBlock value_block, const void *value) {
thread.track_local_assignment(dest_block, value_block);
UnconditionalBarrier barrier(thread.needs_enlivening());
if (barrier) value_block.enliven();
*dest_addr = value;
if (value_block.is_thread_local() || value_block.is_new())
dest_block.mark_card(dest_addr);
}
bool set_write_barrier_range(void *address, const usword_t size);
bool set_write_barrier(void *address);
void mark_write_barriers_untouched();
void clear_untouched_write_barriers();
void clear_all_write_barriers();
void reset_all_marks();
void reset_all_pinned();
inline void set_repair_write_barrier(bool repair) { _repair_write_barrier = repair; }
inline bool repair_write_barrier() const { return _repair_write_barrier; }
void set_needs_enlivening();
void enlivening_barrier();
void clear_needs_enlivening();
void collect_begin();
void collect_end(CollectionTimer &timer, size_t bytes_collected);
usword_t purge_free_space();
bool block_collector();
void unblock_collector();
void collect(bool is_partial, void *current_stack_bottom, CollectionTimer &timer);
void collect_partial(void *current_stack_bottom, CollectionTimer &timer);
void collect_full(void *current_stack_bottom, CollectionTimer &timer);
void analyze_heap(const char *path);
void compact_heap();
void disable_compaction();
void set_in_compaction();
void compaction_barrier();
void clear_in_compaction();
void scavenge_blocks();
void invalidate_garbage(const size_t garbage_count, void *garbage[]);
void handle_overretained_garbage(void *block, int rc, auto_memory_type_t layout);
template <class BlockRef> inline void handle_overretained_garbage(BlockRef block) {
handle_overretained_garbage(block.address(), block.refcount(), block.layout());
}
size_t free_garbage(const size_t subzone_garbage_count, void *subzone_garbage[],
const size_t large_garbage_count, void *large_garbage[],
size_t &blocks_freed, size_t &bytes_freed);
void release_pages() {
}
void recycle_threads();
Thread ®ister_thread();
void unregister_thread();
private:
Thread *firstScannableThread();
Thread *nextScannableThread(Thread *thread);
public:
#ifdef __BLOCKS__
typedef void (^thread_visitor_t) (Thread *thread);
#else
class thread_visitor {
public:
virtual void operator() (Thread *thread) = 0;
};
typedef thread_visitor &thread_visitor_t;
#endif
void scan_registered_threads(thread_visitor_t scanner);
void scan_registered_threads(void (*visitor) (Thread *, void *), void *arg);
void suspend_all_registered_threads();
void resume_all_registered_threads();
void perform_work_with_helper_threads(boolean_t (*work)(void *arg, boolean_t is_dedicated, boolean_t work_to_completion), void *arg);
boolean_t volunteer_for_work(boolean_t work_to_completion) {
if (_has_work > 0) return do_volunteer_for_work(false, work_to_completion);
return false;
}
boolean_t do_volunteer_for_work(boolean_t is_dedicated, boolean_t work_to_completion);
static void worker_thread_loop(void *context, size_t step);
unsigned has_weak_references() { return (num_weak_refs != 0); }
const unsigned char *layout_map_for_block(void *block) {
return control.layout_for_address ? control.layout_for_address((auto_zone_t *)this, block) : NULL;
}
const unsigned char *weak_layout_map_for_block(void *block) {
return control.weak_layout_for_address ? control.weak_layout_for_address((auto_zone_t *)this, block) : NULL;
}
const char *name_for_object(void *object) {
return control.name_for_object ? control.name_for_object((auto_zone_t *)this, object) : "";
}
void *forward_block(Subzone *subzone, usword_t q, void *block);
void move_block(Subzone *subzone, usword_t q, void *block);
void print_all_blocks();
template <class BlockRef> void print_block(BlockRef block, const char *tag);
void malloc_statistics(malloc_statistics_t *stats);
boolean_t should_collect();
void register_resource_tracker(const char *description, boolean_t (^should_collect)(void));
void unregister_resource_tracker(const char *description);
boolean_t resource_tracker_wants_collection();
inline uint32_t const collection_checking_enabled() { return _collection_checking_enabled != 0;}
void enable_collection_checking();
void disable_collection_checking();
void track_pointer(void *pointer);
void increment_check_counts();
void clear_garbage_checking_count(void **garbage, size_t count);
void enumerate_uncollected(auto_zone_collection_checking_callback_t callback);
#ifdef __BLOCKS__
void dump_zone(
auto_zone_stack_dump stack_dump,
auto_zone_register_dump register_dump,
auto_zone_node_dump thread_local_node_dump,
auto_zone_root_dump root_dump,
auto_zone_node_dump global_node_dump,
auto_zone_weak_dump weak_dump_entry
);
void visit_zone(auto_zone_visitor_t *visitor);
#endif
};
};
#endif // __AUTO_ZONE_CORE__