#include "Admin.h"
#include "Bitmap.h"
#include "BlockIterator.h"
#include "Configuration.h"
#include "Definitions.h"
#include "Environment.h"
#include "Large.h"
#include "Locks.h"
#include "Range.h"
#include "Region.h"
#include "Statistics.h"
#include "Subzone.h"
#include "Thread.h"
#include "WriteBarrierIterator.h"
#include "ThreadLocalCollector.h"
#include "Zone.h"
#include "auto_weak.h"
#include "auto_trace.h"
#include "auto_dtrace.h"
#include <mach-o/dyld.h>
#include <mach-o/ldsyms.h>
#include <mach-o/dyld_priv.h>
#include <sys/mman.h>
#include <Block.h>
struct auto_zone_cursor {
auto_zone_t *zone;
size_t garbage_count;
void **garbage;
volatile unsigned index;
size_t block_count;
size_t byte_count;
};
namespace Auto {
#if defined(DEBUG)
#warning DEBUG is set
#endif
class ResourceTracker : public AuxAllocated {
boolean_t (^_should_collect)(void);
public:
ResourceTracker *_next;
ResourceTracker *_prev;
char _description[0];
ResourceTracker(const char *description, boolean_t (^test)(void), ResourceTracker *next) : _should_collect(Block_copy(test)), _next(next), _prev(NULL) {
strcpy(_description, description);
if (_next)
_next->_prev = this;
};
static ResourceTracker *create_resource_tracker(const char *description, boolean_t (^test)(void), ResourceTracker *next) {
ResourceTracker *tracker = new(strlen(description)+1) ResourceTracker(description, test, next);
return tracker;
}
~ResourceTracker() { Block_release(_should_collect); }
const char *description() { return _description; }
boolean_t probe() { return _should_collect(); }
void unlink() {
if (_next)
_next->_prev = _prev;
if (_prev)
_prev->_next = _next;
}
};
Zone *Zone::_first_zone = NULL;
volatile int32_t Zone::_zone_count = 0;
void Zone::setup_shared() {
Environment::initialize();
if (!aux_zone && !Zone::zone()) {
aux_zone = malloc_default_zone();
}
}
pthread_key_t Zone::allocate_thread_key() {
pthread_key_t key = __sync_fetch_and_add(&_zone_count, 1) + __PTK_FRAMEWORK_GC_KEY0;
if (key <= __PTK_FRAMEWORK_GC_KEY9)
return key;
return 0;
}
Zone::Zone(pthread_key_t thread_registration_key)
: _registered_threads_key(thread_registration_key)
{
ASSERTION(page_size == vm_page_size);
static dispatch_once_t is_auto_initialized = 0;
dispatch_once(&is_auto_initialized, ^{ setup_shared(); });
void *next = displace(this, admin_offset());
_registered_threads = NULL;
pthread_key_init_np(_registered_threads_key, destroy_registered_thread);
pthread_mutexattr_t mutex_attr;
pthread_mutexattr_init(&mutex_attr);
pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&_registered_threads_mutex, &mutex_attr);
pthread_mutex_init(&_roots_lock, &mutex_attr);
pthread_mutexattr_destroy(&mutex_attr);
pthread_rwlock_init(&_associations_lock, NULL);
pthread_mutex_init(&_mark_bits_mutex, NULL);
_enlivening_enabled = false;
_enlivening_complete = false;
_in_subzone.initialize(subzone_quantum_max, next);
next = displace(next, Bitmap::bytes_needed(subzone_quantum_max));
_in_large.initialize(allocate_quantum_large_max, next);
next = displace(next, Bitmap::bytes_needed(allocate_quantum_large_max));
#if UseArena
_large_bits.initialize(allocate_quantum_large_max, next);
_large_bits_lock = 0;
next = displace(next, Bitmap::bytes_needed(allocate_quantum_large_max));
_arena = allocate_memory(1ul << arena_size_log2, 1ul << arena_size_log2);
if (!_arena) {
auto_fatal("can't allocate arena for GC\n");
}
_large_start = NULL;
_coverage.set_range(_arena, 1ul << arena_size_log2);
#else
_coverage.set_range((void *)~0, (void *)0);
#endif
_partition.initialize(this);
_large_list = NULL;
_large_lock = 0;
_datasegments_lock = 0;
_zombies_lock = 0;
_region_list = NULL;
_region_lock = 0;
_coverage_lock = 0;
_repair_write_barrier = false;
_state = idle;
_allocation_counter = 0;
_collection_checking_enabled = 0;
allocate_region();
if (_first_zone == NULL)
_first_zone = this;
pthread_mutex_init(&_worker_lock, NULL);
pthread_cond_init(&_worker_cond, NULL);
_has_work = false;
_worker_func = NULL;
_worker_arg = NULL;
_worker_count = 0;
_average_collection_time = 100000; _sleeping_workers = 0;
_stats.idle_timer().start();
pthread_mutex_init(&_compaction_lock, NULL);
_compaction_disabled = true;
#if TARGET_IPHONE_SIMULATOR
# warning no TLV support on iOS simulator
#else
dyld_register_tlv_state_change_handler(dyld_tlv_state_allocated,
^(enum dyld_tlv_states state, const dyld_tlv_info *info)
{
if (this->current_thread()) {
this->add_datasegment(info->tlv_addr, info->tlv_size);
}
});
dyld_register_tlv_state_change_handler(dyld_tlv_state_deallocated,
^(enum dyld_tlv_states state, const dyld_tlv_info *info)
{
if (this->current_thread()) {
this->remove_datasegment(info->tlv_addr, info->tlv_size);
}
});
#endif
}
Zone::~Zone() {
for (Large *large = _large_list; large; ) {
Large *next = large->next();
large->deallocate(this);
large = next;
}
for (Region *region = _region_list; region != NULL; region = region->next()) {
Region *next = region->next();
delete region;
region = next;
}
_region_list = NULL;
if (_registered_threads != NULL)
auto_error(this, "~Zone(): registered threads list not empty", NULL);
}
#if UseArena
void *Zone::arena_allocate_large(usword_t size) {
size = align2(size, allocate_quantum_large_log2);
usword_t nbits = size >> allocate_quantum_large_log2;
usword_t start = 0;
usword_t end = ((1ul << arena_size_log2) - ((uintptr_t)_large_start - (uintptr_t)_arena)) >> allocate_quantum_large_log2;
if (nbits > (end - start)) {
return NULL;
}
end -= nbits; SpinLock lock(&_large_bits_lock);
while (start <= end) {
if (_large_bits.bits_are_clear(start, nbits)) {
_large_bits.set_bits(start, nbits);
void *address = displace(_large_start, start << allocate_quantum_large_log2);
commit_memory(address, size);
return address;
}
start += 1;
}
return NULL;
}
void *Zone::arena_allocate_region(usword_t newsize) {
if (_large_start) return NULL;
usword_t roundedsize = (newsize + subzone_quantum - 1) & ~(subzone_quantum-1);
_large_start = displace(_arena, roundedsize);
return _arena;
}
void Zone::arena_deallocate(void *address, size_t size) {
size = align2(size, allocate_quantum_large_log2);
usword_t nbits = size >> allocate_quantum_large_log2;
usword_t start = ((char *)address - (char *)_large_start) >> allocate_quantum_large_log2;
SpinLock lock(&_large_bits_lock);
_large_bits.clear_bits(start, nbits);
uncommit_memory(address, size);
}
#else
void *Zone::arena_allocate_large(usword_t size) {
return allocate_memory(size, allocate_quantum_large, VM_MEMORY_MALLOC_LARGE);
}
void Zone::arena_deallocate(void *address, size_t size) {
deallocate_memory(address, size);
}
#endif
Region *Zone::allocate_region() {
SpinLock lock(&_region_lock);
Region *r = _region_list;
while (r) {
if (r->subzones_remaining() != 0)
return r; r = r->next();
}
Region *region = Region::new_region(this);
if (region) {
{
SpinLock lock(&_coverage_lock);
_coverage.expand_range(*region);
}
if (_region_list == NULL || region->address() < _region_list->address()) {
region->set_next(_region_list);
_region_list = region;
} else {
Region *r = _region_list;
while (r->next() != NULL && r->next()->address() < region->address()) {
r = r->next();
}
region->set_next(r->next());
r->set_next(region);
}
}
return region;
}
void *Zone::allocate_large(Thread &thread, usword_t &size, const usword_t layout, bool clear, bool refcount_is_one) {
Large *large = Large::allocate(this, size, layout, refcount_is_one);
void *address;
{
SpinLock lock(&_large_lock);
ConditionBarrier barrier(thread.needs_enlivening());
if (large) {
address = large->address();
_in_large.set_bit(Large::quantum_index(address));
if (barrier) LargeBlockRef(large).enliven();
large->add(_large_list);
} else {
return NULL;
}
}
size = large->size(); add_blocks_and_bytes(1, size);
#if UseArena
if (clear || !(layout & AUTO_UNSCANNED)) {
bzero(address, size);
}
#endif
{
SpinLock lock(&_coverage_lock);
Range large_range(address, size);
_coverage.expand_range(large_range);
}
adjust_allocation_counter(size);
return address;
}
void Zone::deallocate_large(Large *large, void *block) {
SpinLock lock(&_large_lock);
deallocate_large_internal(large, block);
}
void Zone::deallocate_large_internal(Large *large, void *block) {
large->remove(_large_list);
_in_large.clear_bit(Large::quantum_index(block));
large->deallocate(this);
}
static inline bool locked(spin_lock_t *lock) {
TrySpinLock attempt(lock);
return !attempt;
}
static inline bool locked(pthread_mutex_t *lock) {
TryMutex attempt(lock);
return !attempt;
}
static inline bool locked(pthread_rwlock_t *lock) {
TryWriteLock attempt(lock);
return !attempt;
}
bool Zone::is_locked() {
bool result = (_partition.locked() || locked(&weak_refs_table_lock) || locked(&_large_lock) ||
locked(&_roots_lock) || locked(&_datasegments_lock) || locked(&_zombies_lock) ||
locked(&_region_lock) || locked(&_coverage_lock) ||
locked(&_associations_lock) ||
#if UseArena
locked(&_large_bits_lock) ||
#endif
locked(&_registered_threads_mutex) ||
_has_work );
if (!result) {
Thread *thread = current_thread();
if (thread != NULL) {
LockedBoolean &needs_enlivening = thread->needs_enlivening();
if (locked(&needs_enlivening.lock))
return true;
}
for (Region *region = _region_list; region != NULL; region = region->next()) {
if (locked(region->subzone_lock())) {
return true;
}
}
}
return result;
}
bool Zone::add_subzone(Admin *admin) {
Region *r = allocate_region();
return (r && r->add_subzone(admin));
}
inline void clear_block(void *block, const size_t size) {
void **end = (void **)displace(block, size);
switch (size >> pointer_size_log2) {
case 12: end[-12] = NULL;
case 11: end[-11] = NULL;
case 10: end[-10] = NULL;
case 9: end[-9] = NULL;
case 8: end[-8] = NULL;
case 7: end[-7] = NULL;
case 6: end[-6] = NULL;
case 5: end[-5] = NULL;
case 4: end[-4] = NULL;
case 3: end[-3] = NULL;
case 2: end[-2] = NULL;
case 1: end[-1] = NULL;
case 0: break;
default:
bzero(block, size);
break;
}
}
void *Zone::block_allocate(Thread &thread, const size_t size, const usword_t layout, bool clear, bool refcount_is_one) {
void *block;
usword_t allocated_size = size;
if (!allocated_size) allocated_size = 1;
if (allocated_size < allocate_quantum_large) {
Admin &admin = _partition.admin(allocated_size, layout, refcount_is_one);
bool is_local = false;
if (allocated_size <= (allocate_quantum_small * max_cached_small_multiple)) {
const bool cannotFinalizeNow = false;
if (ThreadLocalCollector::should_collect(this, thread, cannotFinalizeNow)) {
ThreadLocalCollector tlc(this, (void*)auto_get_sp(), thread);
tlc.collect(cannotFinalizeNow);
}
do {
block = admin.thread_cache_allocate(thread, allocated_size, layout, refcount_is_one, is_local);
} while (!block && add_subzone(&admin));
} else {
do {
block = admin.find_allocation(thread, allocated_size, layout, refcount_is_one, is_local);
} while (!block && add_subzone(&admin));
}
#ifdef MEASURE_TLC_STATS
if (is_local) {
_stats.add_local_allocations(1);
} else {
_stats.add_global_allocations(1);
}
#endif
if (block && !is_local) {
adjust_allocation_counter(allocated_size); }
} else {
block = allocate_large(thread, allocated_size, layout, clear, refcount_is_one);
clear = false;
}
if (block == NULL) return NULL;
if (should_collect()) {
auto_zone_collect((auto_zone_t *)this, AUTO_ZONE_COLLECT_RATIO_COLLECTION|AUTO_ZONE_COLLECT_COALESCE);
}
if (clear) clear_block(block, allocated_size);
if (refcount_is_one)
GARBAGE_COLLECTION_AUTO_REFCOUNT_ONE_ALLOCATION(allocated_size);
#if RECORD_REFCOUNT_STACKS
if (AUTO_RECORD_REFCOUNT_STACKS) {
auto_record_refcount_stack(this, ptr, 0);
}
#endif
return block;
}
unsigned Zone::batch_allocate(Thread &thread, size_t size, const usword_t layout, bool clear, bool refcount_is_one, void **results, unsigned num_requested) {
usword_t allocated_size = size;
unsigned count = 0;
if (!allocated_size) allocated_size = 1;
if (allocated_size < allocate_quantum_large) {
Admin &admin = _partition.admin(allocated_size, layout, refcount_is_one);
count = admin.batch_allocate(thread, allocated_size, layout, refcount_is_one, clear, results, num_requested);
} else {
}
if (count == 0) return 0;
adjust_allocation_counter(allocated_size * count);
if (should_collect()) {
auto_zone_collect((auto_zone_t *)this, AUTO_ZONE_COLLECT_RATIO_COLLECTION|AUTO_ZONE_COLLECT_COALESCE);
}
if (count && refcount_is_one && GARBAGE_COLLECTION_AUTO_REFCOUNT_ONE_ALLOCATION_ENABLED()) {
for (unsigned i=0; i<count; i++)
GARBAGE_COLLECTION_AUTO_REFCOUNT_ONE_ALLOCATION(allocated_size);
}
return count;
}
void Zone::block_deallocate(SubzoneBlockRef block) {
void *address = block.address();
Subzone *subzone = block.subzone();
usword_t q = block.q();
erase_associations(address);
SpinLock adminLock(subzone->admin()->lock());
block.dec_refcount_no_lock();
int layout = subzone->layout(q);
if (layout & AUTO_OBJECT)
erase_weak(address);
if (((layout & AUTO_UNSCANNED) == AUTO_UNSCANNED) && !_enlivening_enabled) {
int64_t block_size = subzone->size(q);
subzone->admin()->deallocate_no_lock(subzone, q, address); add_blocks_and_bytes(-1, -block_size);
}
else {
subzone->set_layout(q, AUTO_MEMORY_UNSCANNED);
}
}
void Zone::block_deallocate(LargeBlockRef block) {
void *address = block.address();
Large *large = block.large();
int layout = large->layout();
if (layout & AUTO_OBJECT)
erase_weak(address);
large->set_layout(AUTO_MEMORY_UNSCANNED);
if (_collection_queue) {
Zone *zone = this;
dispatch_async(_collection_queue, ^{ zone->deallocate_large(large, address); });
}
}
Large *Zone::block_start_large(void *address) {
if (_coverage.in_range(address)) {
usword_t q = Large::quantum_index(address);
if (!_in_large.bit(q)) {
q = _in_large.previous_set(q);
if (q == not_found) return NULL;
}
#if UseArena
Large *large = Large::quantum_large(q, _arena);
#else
Large *large = Large::quantum_large(q, (void *)0);
#endif
if (!large->range().in_range(address)) return NULL;
return large;
}
return NULL;
}
void *Zone::block_start(void *address) {
if (in_subzone_memory(address)) {
Subzone *subzone = Subzone::subzone(address);
usword_t q;
return subzone->block_start(address, q);
} else {
Large *large = block_start_large(address);
return large ? large->address() : NULL;
}
}
usword_t Zone::block_layout(void *block) {
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
usword_t q;
if (!subzone->block_is_start(block, &q)) return AUTO_TYPE_UNKNOWN; return subzone->layout(q);
} else if (block_is_start_large(block)) {
Large *large = Large::large(block);
return large->layout();
}
return AUTO_TYPE_UNKNOWN;
}
void Zone::block_set_layout(void *block, const usword_t layout) {
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
usword_t q;
if (!subzone->block_is_start(block, &q)) return; SpinLock lock(subzone->admin()->lock());
subzone->set_layout(q, layout);
} else if (block_is_start_large(block)) {
Large *large = Large::large(block);
large->set_layout(layout);
}
}
void Zone::set_associative_ref(void *block, void *key, void *value) {
if (value) {
Thread &thread = registered_thread();
thread.block_escaped(value);
thread.block_escaped(block);
UnconditionalBarrier barrier(thread.needs_enlivening());
WriteLock lock(&_associations_lock);
AssociationsHashMap::iterator i = _associations.find(block);
ObjectAssociationMap* refs = (i != _associations.end() ? i->second : NULL);
if (refs == NULL) {
refs = new ObjectAssociationMap();
_associations[block] = refs;
}
(*refs)[key] = value;
if (barrier) thread.enliven_block(value);
} else {
WriteLock lock(&_associations_lock);
AssociationsHashMap::iterator i = _associations.find(block);
if (i != _associations.end()) {
ObjectAssociationMap *refs = i->second;
ObjectAssociationMap::iterator j = refs->find(key);
if (j != refs->end()) {
refs->erase(j);
}
}
}
}
void *Zone::get_associative_ref(void *block, void *key) {
ReadLock lock(&_associations_lock);
AssociationsHashMap::iterator i = _associations.find(block);
if (i != _associations.end()) {
ObjectAssociationMap *refs = i->second;
ObjectAssociationMap::iterator j = refs->find(key);
if (j != refs->end()) return j->second;
}
return NULL;
}
size_t Zone::get_associative_hash(void *block) {
{
ReadLock lock(&_associations_lock);
PtrSizeHashMap::iterator i = _hashes.find(block);
if (i != _hashes.end()) return i->second;
}
{
Thread &thread = registered_thread();
thread.block_escaped(block);
WriteLock lock(&_associations_lock);
PtrSizeHashMap::iterator i = _hashes.find(block);
if (i != _hashes.end()) return i->second;
return (_hashes[block] = random());
}
}
inline void Zone::erase_associations_internal(void *block) {
AssociationsHashMap::iterator i = _associations.find(block);
if (i != _associations.end()) {
ObjectAssociationMap *refs = i->second;
_associations.erase(i);
delete refs;
}
PtrSizeHashMap::iterator h = _hashes.find(block);
if (h != _hashes.end()) {
_hashes.erase(h);
}
}
void Zone::erase_associations(void *block) {
WriteLock lock(&_associations_lock);
erase_associations_internal(block);
}
void Zone::erase_associations_in_range(const Range &r) {
WriteLock lock(&_associations_lock);
PtrVector associationsToRemove;
for (AssociationsHashMap::iterator i = _associations.begin(); i != _associations.end(); i++) {
if (r.in_range(i->first)) associationsToRemove.push_back(i->first);
}
for (PtrVector::iterator i = associationsToRemove.begin(); i != associationsToRemove.end(); i++) {
erase_associations_internal(*i);
}
}
void Zone::visit_associations_for_key(void *key, boolean_t (^visitor) (void *object, void *value)) {
ReadLock lock(&_associations_lock);
for (AssociationsHashMap::iterator i = _associations.begin(); i != _associations.end(); i++) {
ObjectAssociationMap *refs = i->second;
ObjectAssociationMap::iterator j = refs->find(key);
if (j != refs->end()) {
if (!visitor(i->first, j->second))
return;
}
}
}
void Zone::sort_free_lists() {
_partition.for_each(^(Admin &admin){
admin.reset_free_list();
});
SpinLock lock(&_region_lock);
for (Region *region = _region_list; region != NULL; region = region->next()) {
SubzoneRangeIterator iterator(region->subzone_range());
while (Subzone *subzone = iterator.next()) {
usword_t n = subzone->allocation_count();
Admin *admin = subzone->admin();
for (usword_t q = 0; q < n; q = subzone->next_quantum(q)) {
if (subzone->is_free(q)) {
void *address = subzone->quantum_address(q);
FreeListNode *node = new(address) FreeListNode();
admin->append_node(node);
}
}
}
}
}
bool Zone::set_write_barrier_range(void *destination, const usword_t size) {
if (in_subzone_memory(destination)) {
Subzone *subzone = Subzone::subzone(destination);
subzone->write_barrier().mark_cards(destination, size);
return true;
} else if (Large *large = block_start_large(destination)) {
if (large->is_scanned()) large->write_barrier().mark_cards(destination, size);
return true;
}
return false;
}
bool Zone::set_write_barrier(void *address) {
if (in_subzone_memory(address)) {
Subzone *subzone = Subzone::subzone(address);
subzone->write_barrier().mark_card(address);
return true;
}
else if (Large *large = block_start_large(address)) {
if (large->is_scanned()) large->write_barrier().mark_card(address);
return true;
}
return false;
}
struct mark_write_barriers_untouched_visitor {
usword_t _count;
mark_write_barriers_untouched_visitor() : _count(0) {}
inline bool visit(Zone *zone, WriteBarrier &wb) {
_count += wb.mark_cards_untouched();
return true;
}
};
void Zone::mark_write_barriers_untouched() {
mark_write_barriers_untouched_visitor visitor;
visitWriteBarriers(this, visitor);
}
struct clear_untouched_write_barriers_visitor {
usword_t _count;
clear_untouched_write_barriers_visitor() : _count(0) {}
inline bool visit(Zone *zone, WriteBarrier &wb) {
_count += wb.clear_untouched_cards();
return true;
}
};
void Zone::clear_untouched_write_barriers() {
clear_untouched_write_barriers_visitor visitor;
visitWriteBarriers(this, visitor);
}
struct clear_all_write_barriers_visitor {
inline bool visit(Zone *zone, WriteBarrier &wb) {
wb.clear();
return true;
}
};
void Zone::clear_all_write_barriers() {
clear_all_write_barriers_visitor visitor;
visitWriteBarriers(this, visitor);
}
void Zone::reset_all_marks() {
for (Region *region = _region_list; region != NULL; region = region->next()) {
region->clear_marks();
}
SpinLock lock(&_large_lock);
for (Large *large = _large_list; large != NULL; large = large->next()) {
large->clear_mark();
}
}
void Zone::reset_all_pinned() {
for (Region *region = _region_list; region != NULL; region = region->next()) {
region->pinned().clear();
}
}
void Zone::malloc_statistics(malloc_statistics_t *stats) {
stats->blocks_in_use = _stats.count();
stats->size_in_use = _stats.size();
stats->max_size_in_use = stats->size_allocated = 0;
{
SpinLock lock(&_large_lock);
Large *l = _large_list;
while (l) {
stats->max_size_in_use += l->size();
stats->size_allocated += l->vm_size();
l = l->next();
}
}
{
SubzonePartition::Lock lock(_partition);
for (Region *region = region_list(); region != NULL; region = region->next()) {
SubzoneRangeIterator iterator(region->subzone_range());
for (Subzone *sz = iterator.next(); sz != NULL; sz = iterator.next()) {
size_t bytes_per_quantum = (1L<<sz->quantum_log2());
stats->max_size_in_use += sz->allocation_count() * bytes_per_quantum;
stats->size_allocated += sz->allocation_limit() * bytes_per_quantum;
}
}
}
}
void Zone::set_needs_enlivening() {
close_locks();
Mutex lock(&_registered_threads_mutex);
_enlivening_enabled = true;
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
LockedBoolean &needs_enlivening = thread->needs_enlivening();
assert(needs_enlivening.state == false);
SpinLock lock(&needs_enlivening.lock);
needs_enlivening.state = true;
}
open_locks();
}
void Zone::enlivening_barrier() {
Mutex lock(&_registered_threads_mutex);
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
LockedBoolean &needs_enlivening = thread->needs_enlivening();
spin_lock(&needs_enlivening.lock);
}
_enlivening_complete = true;
}
void Zone::clear_needs_enlivening() {
Mutex lock(&_registered_threads_mutex);
_enlivening_enabled = false;
_enlivening_complete = false;
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
LockedBoolean &needs_enlivening = thread->needs_enlivening();
assert(needs_enlivening.state && needs_enlivening.lock != 0);
needs_enlivening.state = false;
spin_unlock(&needs_enlivening.lock);
}
}
bool Zone::block_collector() {
if (pthread_mutex_trylock(&_mark_bits_mutex) != 0)
return false;
if (pthread_mutex_trylock(&_registered_threads_mutex) != 0) {
pthread_mutex_unlock(&_mark_bits_mutex);
return false;
}
return true;
}
void Zone::unblock_collector() {
pthread_mutex_unlock(&_registered_threads_mutex);
pthread_mutex_unlock(&_mark_bits_mutex);
}
void Zone::collect_begin() {
usword_t allocated = _allocation_counter;
adjust_allocation_counter(-allocated);
auto_atomic_add(allocated, &_triggered_threshold);
_garbage_list.commit();
}
void Zone::collect(bool is_partial, void *current_stack_bottom, CollectionTimer &timer) {
GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)this, AUTO_TRACE_SCANNING_PHASE);
set_needs_enlivening();
pthread_mutex_lock(&_mark_bits_mutex);
recycle_threads();
if (is_partial) collect_partial(current_stack_bottom, timer);
else collect_full(current_stack_bottom, timer);
GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)this, AUTO_TRACE_SCANNING_PHASE, _stats.blocks_scanned(), _stats.bytes_scanned());
scavenge_blocks();
auto_weak_callback_block_t *callbacks = NULL;
if (has_weak_references()) {
GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)this, AUTO_TRACE_WEAK_REFERENCE_PHASE);
uintptr_t weak_referents, weak_references;
callbacks = weak_clear_references(this, _garbage_list.count(), (vm_address_t *)_garbage_list.buffer(), &weak_referents, &weak_references);
GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)this, AUTO_TRACE_WEAK_REFERENCE_PHASE, (uint64_t)weak_referents, (uint64_t)(weak_references * sizeof(void*)));
}
if (!is_partial) {
if (!_repair_write_barrier) {
_repair_write_barrier = true;
mark_write_barriers_untouched();
}
} else if (_repair_write_barrier) {
clear_untouched_write_barriers();
_repair_write_barrier = false;
}
clear_needs_enlivening();
reset_all_marks();
pthread_mutex_unlock(&_mark_bits_mutex);
weak_call_callbacks(callbacks);
if (!is_partial)
purge_free_space();
}
void Zone::collect_end(CollectionTimer &timer, size_t bytes_collected) {
usword_t triggered = _triggered_threshold;
auto_atomic_add(-triggered, &_triggered_threshold);
_average_collection_time = (_average_collection_time * 7 + timer.total_time().microseconds()) >> 3;
_garbage_list.uncommit();
}
usword_t Zone::purge_free_space() {
SubzonePartition::Lock lock(_partition);
usword_t bytes_purged = _partition.purge_free_space_no_lock();
return bytes_purged;
}
struct scavenge_blocks_visitor {
PointerList& _list; size_t &_large_count;
scavenge_blocks_visitor(PointerList& list, size_t &large_count) : _list(list), _large_count(large_count) {}
inline bool visit(Zone *zone, Subzone *subzone, usword_t q) {
if (subzone->is_thread_local(q)) return true;
if (subzone->is_new(q)) subzone->mature(q);
if (!subzone->is_marked(q)) {
subzone->mark_global_garbage(q);
_list.add(subzone->quantum_address(q));
}
return true;
}
inline bool visit(Zone *zone, Large *large) {
if (large->is_new()) large->mature();
if (!large->is_marked()) {
large->mark_garbage();
_list.add(large->address());
++_large_count;
}
return true;
}
};
void Zone::scavenge_blocks() {
_garbage_list.clear_count();
_large_garbage_count = 0;
scavenge_blocks_visitor visitor(_garbage_list, _large_garbage_count);
visitAllocatedBlocks(this, visitor);
#ifdef MEASURE_TLC_STATS
_stats.add_global_collected(_garbage_list.count() - _large_garbage_count);
#endif
}
void Zone::recycle_threads() {
Thread *unbound_threads = NULL;
{
Mutex lock(&_registered_threads_mutex);
Thread::scavenge_threads(&_registered_threads, &unbound_threads);
}
while (unbound_threads != NULL) {
Thread *next = unbound_threads->next();
delete unbound_threads;
unbound_threads = next;
}
}
static void foreach_block_do(auto_zone_cursor_t cursor, void (*op) (void *ptr, void *data), void *data) {
Zone *azone = (Auto::Zone *)cursor->zone;
while (cursor->index < cursor->garbage_count) {
void *ptr = (void *)cursor->garbage[cursor->index++];
auto_memory_type_t type = auto_zone_get_layout_type((auto_zone_t *)azone, ptr);
if (type & AUTO_OBJECT) {
#if DEBUG
if (ptr == WatchPoint) {
malloc_printf("auto_zone invalidating watchpoint: %p\n", WatchPoint);
}
#endif
op(ptr, data);
cursor->block_count++;
cursor->byte_count += auto_zone_size((auto_zone_t *)azone, ptr);
}
}
}
void Zone::invalidate_garbage(const size_t garbage_count, void *garbage[]) {
#if DEBUG
for (size_t index = 0; index < garbage_count; index++) {
void *ptr = (void *)garbage[index];
auto_block_info_sieve<AUTO_BLOCK_INFO_REFCOUNT> block_info(this, ptr);
if (block_info.refcount() > 0)
malloc_printf("invalidate_garbage: garbage ptr = %p, has non-zero refcount = %d\n", ptr, block_info.refcount());
}
#endif
struct auto_zone_cursor cursor = { (auto_zone_t *)this, garbage_count, garbage, 0, 0, 0 };
if (control.batch_invalidate) {
control.batch_invalidate((auto_zone_t *)this, foreach_block_do, &cursor, sizeof(cursor));
}
}
void Zone::handle_overretained_garbage(void *block, int rc, auto_memory_type_t layout) {
char *name;
if (is_object(layout)) {
if (control.name_for_address) {
name = control.name_for_address((auto_zone_t *)this, (vm_address_t)block, 0);
} else {
name = (char *)"object";
}
} else {
name = (char *)"non-object";
}
malloc_printf("garbage block %p(%s) was over-retained during finalization, refcount = %d\n"
"This could be an unbalanced CFRetain(), or CFRetain() balanced with -release.\n"
"Break on auto_zone_resurrection_error() to debug.\n", block, name, rc);
auto_zone_resurrection_error();
if (Auto::Environment::resurrection_is_fatal) {
auto_fatal("fatal resurrection error for garbage block %p(%s): over-retained during finalization, refcount = %d", block, name, rc);
}
if (is_object(layout) && control.name_for_address) free(name);
}
size_t Zone::free_garbage(const size_t subzone_garbage_count, void *subzone_garbage[],
const size_t large_garbage_count, void *large_garbage[],
size_t &blocks_freed, size_t &bytes_freed) {
blocks_freed = bytes_freed = 0;
if (collection_checking_enabled()) {
clear_garbage_checking_count(subzone_garbage, subzone_garbage_count);
clear_garbage_checking_count(large_garbage, large_garbage_count);
}
size_t subzone_overretained_count = 0;
size_t large_overretained_count = 0;
{
WriteLock lock(associations_lock());
if (subzone_garbage_count) {
SubzonePartition::Lock lock(_partition);
for (size_t index = 0; index < subzone_garbage_count; index++) {
void *block = subzone_garbage[index];
Subzone *subzone = Subzone::subzone(block);
usword_t q = subzone->quantum_index_unchecked(block);
if (!subzone->has_refcount(q)) {
if ((subzone->layout(q) & AUTO_OBJECT)) erase_weak(block);
blocks_freed++;
bytes_freed += subzone->size(q);
if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(zone), uintptr_t(block), 0, 0, 0);
erase_associations_internal(block);
subzone->admin()->deallocate_no_lock(subzone, q, block);
} else if (is_zombie(block)) {
SubzoneBlockRef ref(subzone, q);
zombify_internal(ref);
} else {
subzone_garbage[subzone_overretained_count++] = block;
}
}
}
if (large_garbage_count) {
SpinLock largeLock(&_large_lock);
for (size_t index = 0; index < large_garbage_count; index++) {
void *block = large_garbage[index];
Large *large = Large::large(block);
int rc = large->refcount();
if (rc == 0) {
if (large->layout() & AUTO_OBJECT) erase_weak(block);
blocks_freed++;
bytes_freed += large->size();
if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(zone), uintptr_t(block), 0, 0, 0);
erase_associations_internal(block);
deallocate_large_internal(large, block);
} else if (is_zombie(block)) {
LargeBlockRef ref(large);
zombify_internal(ref);
} else {
large_garbage[large_overretained_count++] = large;
}
}
}
}
for (size_t index = 0; index < subzone_overretained_count; index++) {
SubzoneBlockRef ref(subzone_garbage[index]);
handle_overretained_garbage(ref);
}
for (size_t index = 0; index < large_overretained_count; index++) {
LargeBlockRef ref((Large *)large_garbage[index]);
handle_overretained_garbage(ref);
}
add_blocks_and_bytes(-(int64_t)blocks_freed, -(int64_t)bytes_freed);
return bytes_freed;
}
inline bool is_dispatch_thread() {
return _pthread_getspecific_direct(__PTK_LIBDISPATCH_KEY0) != NULL && !pthread_main_np();
}
Thread &Zone::register_thread() {
Thread *thread = current_thread();
if (thread == NULL) {
thread = new Thread(this);
if (_compaction_timer && is_dispatch_thread()) {
if (_compaction_pending) {
if (_compaction_next_time != DISPATCH_TIME_FOREVER)
dispatch_source_set_timer(_compaction_timer, DISPATCH_TIME_FOREVER, 0, 0);
_compaction_pending = false;
}
}
Mutex lock(&_registered_threads_mutex);
thread->set_next(_registered_threads);
LockedBoolean &needs_enlivening = thread->needs_enlivening();
needs_enlivening.state = _enlivening_enabled;
_registered_threads = thread;
if (_enlivening_complete)
spin_lock(&needs_enlivening.lock);
#if ! TARGET_IPHONE_SIMULATOR
dyld_enumerate_tlv_storage(
^(enum dyld_tlv_states state, const dyld_tlv_info *info)
{
this->add_datasegment(info->tlv_addr, info->tlv_size);
});
#endif
}
pthread_setspecific(_registered_threads_key, thread);
return *thread;
}
void Zone::unregister_thread() {
}
void Zone::destroy_registered_thread(void *key_value) {
if (key_value != INVALID_THREAD_KEY_VALUE) {
Thread *thread = (Thread *)key_value;
Zone *zone = thread->zone();
pthread_key_t thread_key = zone->_registered_threads_key;
if (thread->increment_tsd_count() == PTHREAD_DESTRUCTOR_ITERATIONS) {
thread->unbind();
key_value = INVALID_THREAD_KEY_VALUE;
}
pthread_setspecific(thread_key, key_value);
}
}
inline Thread *Zone::firstScannableThread() {
Mutex lock(&_registered_threads_mutex);
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
if (thread->lockForScanning()) return thread;
}
return NULL;
}
inline Thread *Zone::nextScannableThread(Thread *thread) {
Mutex lock(&_registered_threads_mutex);
thread->unlockForScanning();
for (thread = thread->next(); thread != NULL; thread = thread->next()) {
if (thread->lockForScanning()) return thread;
}
return NULL;
}
void Zone::scan_registered_threads(thread_visitor_t visitor) {
for (Thread *thread = firstScannableThread(); thread != NULL; thread = nextScannableThread(thread)) {
visitor(thread);
}
}
#ifndef __BLOCKS__
class Zone_thread_visitor_helper : public Zone::thread_visitor {
void (*_visitor) (Thread *, void *);
void *_arg;
public:
Zone_thread_visitor_helper(void (*visitor) (Thread *, void *), void *arg) : _visitor(visitor), _arg(arg) {}
virtual void operator () (Thread *thread) { _visitor(thread, _arg); }
};
#endif
void Zone::scan_registered_threads(void (*visitor) (Thread *, void *), void *arg) {
#ifdef __BLOCKS__
scan_registered_threads(^(Thread *thread) { visitor(thread, arg); });
#else
Zone_thread_visitor_helper helper(visitor, arg);
scan_registered_threads(helper);
#endif
}
void Zone::suspend_all_registered_threads() {
pthread_mutex_lock(&_registered_threads_mutex);
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
thread->suspend();
}
}
void Zone::resume_all_registered_threads() {
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
thread->resume();
}
pthread_mutex_unlock(&_registered_threads_mutex);
}
void Zone::worker_thread_loop(void *context, size_t step) {
Zone *zone = (Zone *)context;
zone->do_volunteer_for_work(true, true);
}
void Zone::perform_work_with_helper_threads(boolean_t (*work)(void *arg, boolean_t is_dedicated, boolean_t work_to_completion), void *arg) {
pthread_mutex_lock(&_worker_lock);
assert(_worker_count == 0);
assert(_worker_func == NULL);
assert(_worker_arg == NULL);
_worker_arg = arg;
_worker_func = work;
_has_work = true;
pthread_mutex_unlock(&_worker_lock);
dispatch_queue_t q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, DISPATCH_QUEUE_OVERCOMMIT);
dispatch_apply_f((auto_ncpus()+1)/2, q, this, worker_thread_loop);
pthread_mutex_lock(&_worker_lock);
while (_worker_count != 0) {
pthread_cond_wait(&_worker_cond, &_worker_lock);
}
_has_work = false;
_worker_arg = NULL;
_worker_func = NULL;
pthread_mutex_unlock(&_worker_lock);
}
boolean_t Zone::do_volunteer_for_work(boolean_t is_dedicated, boolean_t work_to_completion) {
boolean_t more_work = false;
pthread_mutex_lock(&_worker_lock);
if (_has_work && (_worker_count < (size_t)auto_ncpus())) {
_worker_count++;
worker_print("starting (dedicated = %s, work_to_completion = %s)\n", is_dedicated ? "true" : "false", work_to_completion ? "true" : "false");
do {
pthread_mutex_unlock(&_worker_lock);
more_work = _worker_func(_worker_arg, is_dedicated, work_to_completion);
pthread_mutex_lock(&_worker_lock);
if (more_work) {
if (_sleeping_workers > 0) {
pthread_cond_broadcast(&_worker_cond);
}
} else {
if (work_to_completion) {
if ((_sleeping_workers + 1) < _worker_count) {
_sleeping_workers++;
pthread_cond_wait(&_worker_cond, &_worker_lock);
_sleeping_workers--;
more_work = _has_work;
}
}
}
} while (more_work && work_to_completion);
worker_print("exiting (dedicated = %s, work_to_completion = %s)\n", is_dedicated ? "true" : "false", work_to_completion ? "true" : "false");
if (work_to_completion && _has_work) {
_has_work = false;
}
_worker_count--;
if (_worker_count == _sleeping_workers) {
pthread_cond_broadcast(&_worker_cond);
}
}
pthread_mutex_unlock(&_worker_lock);
return more_work;
}
void Zone::register_resource_tracker(const char *description, boolean_t (^should_collect)(void))
{
Mutex lock(&_resource_tracker_lock);
ResourceTracker *tracker = ResourceTracker::create_resource_tracker(description, should_collect, _resource_tracker_list);
_resource_tracker_list = tracker;
}
void Zone::unregister_resource_tracker(const char *description)
{
Mutex lock(&_resource_tracker_lock);
ResourceTracker *tracker = _resource_tracker_list;
while (tracker && strcmp(tracker->description(), description))
tracker = tracker->_next;
if (tracker) {
if (tracker == _resource_tracker_list)
_resource_tracker_list = tracker->_next;
tracker->unlink();
delete tracker;
}
}
boolean_t Zone::resource_tracker_wants_collection() {
bool collect = false;
Mutex lock(&_resource_tracker_lock);
if (_resource_tracker_list) {
ResourceTracker *tracker = _resource_tracker_list;
while (tracker && !tracker->probe())
tracker = tracker->_next;
if (tracker) {
if (control.log & AUTO_LOG_COLLECTIONS) {
malloc_printf("triggering collection due to external resource tracker: %s\n", tracker->description());
}
collect = true;
}
}
return collect;
}
boolean_t Zone::should_collect() {
boolean_t collect = false;
volatile int64_t *last_should_collect_time = _stats.last_should_collect_time();
int64_t start = *last_should_collect_time;
WallClockTimeDataSource wallTime;
int64_t current_time = wallTime.current_time();
if ((wallTime.microseconds_duration(start, current_time) > 10 * 1000 ) &&
OSAtomicCompareAndSwap64(start, current_time, last_should_collect_time)) {
if (_allocation_counter > control.collection_threshold) {
WallClockTimer &idle_timer = _stats.idle_timer();
uint64_t elapsed = idle_timer.elapsed_microseconds();
if (elapsed > 10 * USEC_PER_SEC) {
collect = true;
} else {
double target_duty_cycle = Environment::default_duty_cycle;
uint64_t target_idle_time = ((double)_average_collection_time / target_duty_cycle) * (1.0 - target_duty_cycle);
collect = elapsed > target_idle_time;
}
}
if (!collect) {
collect = resource_tracker_wants_collection();
}
}
return collect;
}
struct print_all_blocks_visitor {
Region *_last_region; Subzone *_last_subzone; bool _is_large;
print_all_blocks_visitor() : _last_region(NULL), _is_large(false) {}
inline bool visit(Zone *zone, Subzone *subzone, usword_t q) {
if (_last_region != subzone->region()) {
_last_region = subzone->region();
malloc_printf("Region [%p..%p]\n", _last_region->address(), _last_region->end());
}
void *block = subzone->quantum_address(q);
if (subzone->is_start(q)) {
zone->print_block(SubzoneBlockRef(subzone, q), "");
} else {
FreeListNode *node = (FreeListNode *)block;
malloc_printf(" %p(%6d) ### free\n", block, node->size());
}
return true;
}
inline bool visit(Zone *zone, Large *large) {
if (!_is_large) {
malloc_printf("Large Blocks\n");
_is_large = true;
}
zone->print_block(LargeBlockRef(large), "");
return true;
}
};
void Zone::print_all_blocks() {
SpinLock lock(&_region_lock);
print_all_blocks_visitor visitor;
visitAllBlocks(this, visitor);
}
template <class BlockRef> void Zone::print_block(BlockRef block, const char *tag) {
char *name = NULL;
if (block.is_object()) {
if (control.name_for_address) {
name = control.name_for_address((auto_zone_t *)this, (vm_address_t)block.address(), 0);
}
}
char desc[64];
block.get_description(desc, sizeof(desc));
malloc_printf("%s%p(%6d) %s %s %s %s rc(%d) %s %s\n",
tag, block.address(), (unsigned)block.size(),
block.is_scanned() ? "scn" : " ",
block.is_object() ? "obj" : "mem",
block.is_new() ? "new" : " ",
block.is_marked() ? "mark" : " ",
block.refcount(),
desc,
name ? name : "");
if (name) free(name);
}
};