#include "Admin.h"
#include "Bitmap.h"
#include "Configuration.h"
#include "Definitions.h"
#include "Zone.h"
#include "Locks.h"
#include "BlockRef.h"
#include <stdio.h>
#include <sys/mman.h>
namespace Auto {
void Admin::initialize(Zone *zone, const usword_t quantum_log2, const usword_t layout) {
_zone = zone;
_quantum_log2 = quantum_log2;
_active_subzone = NULL;
_admin_lock = 0;
_freelist_search_cap = 0;
_layout = layout;
}
usword_t Admin::unused_count() {
return _active_subzone->allocation_limit() - _active_subzone->allocation_count();
}
usword_t Admin::free_space() {
SpinLock lock(&_admin_lock);
usword_t empty_space = 0;
for (usword_t m = 0; m < AllocationCache::cache_size; m++) {
for (FreeListNode *node = _cache[m].head(); node != NULL; node = node->next()) {
empty_space += node->size();
}
}
return empty_space;
}
const usword_t min_purgeable_size = Auto::page_size;
template <typename PurgeVisitor> void Admin::visit_purgeable_nodes(PurgeVisitor &visitor) {
const usword_t quantum_size = (1 << _quantum_log2);
if (is_medium()) {
for (usword_t m = maximum_quanta, block_size = quantum_size * maximum_quanta; m > 0 && block_size > min_purgeable_size; --m, block_size -= quantum_size) {
for (FreeListNode *node = _cache[m].head(); node != NULL; node = node->next()) {
if (!node->is_purged()) {
visitor(node);
}
}
}
}
for (FreeListNode *node = _cache[0].head(); node != NULL; node = node->next()) {
if (node->size() > min_purgeable_size && !node->is_purged()) {
visitor(node);
}
}
for (Subzone *subzone = _purgeable_subzones; subzone != NULL; subzone = subzone->nextSubzone()) {
if (!subzone->is_purged()) {
visitor(subzone);
}
}
}
usword_t Admin::purgeable_free_space() {
SpinLock lock(&_admin_lock);
return purgeable_free_space_no_lock();
}
struct PurgeableFreeSpaceVisitor {
usword_t purgeable_bytes;
PurgeableFreeSpaceVisitor() : purgeable_bytes(0) {}
void operator() (FreeListNode *node) {
Range r(node->purgeable_range());
usword_t r_size = r.size();
if (r_size >= min_purgeable_size) {
purgeable_bytes += r_size;
}
}
void operator() (Subzone *subzone) {
Range r(subzone->purgeable_range());
usword_t r_size = r.size();
if (r_size >= min_purgeable_size) {
purgeable_bytes += r_size;
}
}
};
usword_t Admin::purgeable_free_space_no_lock() {
PurgeableFreeSpaceVisitor visitor;
visit_purgeable_nodes(visitor);
return visitor.purgeable_bytes;
}
usword_t Admin::purge_free_space() {
SpinLock lock(&_admin_lock);
return purge_free_space_no_lock();
}
struct PurgingVisitor {
usword_t bytes_purged;
usword_t subzone_bytes_purged;
PurgingVisitor() : bytes_purged(0), subzone_bytes_purged(0) {}
void operator() (FreeListNode *node) {
Range r(node->purgeable_range());
usword_t r_size = r.size();
if (r_size >= min_purgeable_size) {
madvise(r.address(), r_size, MADV_FREE_REUSABLE);
node->set_purged(true);
bytes_purged += r_size;
}
}
void operator() (Subzone *subzone) {
Range r(subzone->purgeable_range());
usword_t r_size = r.size();
if (r_size >= min_purgeable_size) {
madvise(r.address(), r_size, MADV_FREE_REUSABLE);
subzone->set_purged(true);
bytes_purged += r_size;
subzone_bytes_purged += r_size;
}
}
};
usword_t Admin::purge_free_space_no_lock() {
PurgingVisitor visitor;
visit_purgeable_nodes(visitor);
return visitor.bytes_purged;
}
static void reuse_node(FreeListNode *node) {
if (node->is_purged()) {
Range r(node->purgeable_range());
madvise(r.address(), r.size(), MADV_FREE_REUSE);
node->set_purged(false);
}
}
usword_t Admin::empty_space() {
SpinLock lock(&_admin_lock);
usword_t empty_space = 0;
for (FreeListNode *node = _cache[0].head(); node != NULL; node = node->next()) {
empty_space += node->size();
}
return empty_space;
}
#if DEBUG
bool Admin::test_node_integrity(FreeListNode *node) {
bool node_is_valid = false;
const Range &coverage = _zone->coverage();
do {
if (!coverage.in_range(node)) break;
Subzone *subzone = Subzone::subzone((void *)node);
usword_t q = subzone->quantum_index(node->address());
if (q >= subzone->allocation_limit()) break;
if (subzone->quantum_address(q) != node->address()) break;
if (!subzone->is_free(q)) break;
FreeListNode *next = node->next();
if (next && !coverage.in_range(next)) break;
FreeListNode *prev = node->prev();
if (prev && !coverage.in_range(prev)) break;
if (node->size() != node->size_again()) break;
node_is_valid = true;
} while (0);
if (!node_is_valid) {
static char buffer[256];
if (coverage.in_range(node)) {
snprintf(buffer, sizeof(buffer), "test_node_integrity: FreeListNode %p { _prev = %p, _next = %p, _size = %lu } failed integrity check.\n",
node, node->prev(), node->next(), node->size());
} else {
snprintf(buffer, sizeof(buffer), "test_node_integrity: FreeListNode %p failed integrity check.\n", node);
}
auto_fatal("%s", buffer);
}
return node_is_valid;
}
#else
bool Admin::test_node_integrity(FreeListNode *node) { return true; }
#endif
bool Admin::test_freelist_integrity() {
SpinLock lock(&_admin_lock);
for (usword_t m = 0; m < AllocationCache::cache_size; m++) {
for (FreeListNode *node = _cache[m].head(), *prev_node = NULL; node; node = node->next()) {
Subzone *subzone = Subzone::subzone((void *)node);
usword_t q = subzone->quantum_index(node->address());
if (q >= subzone->allocation_limit()) return false;
if (subzone->quantum_address(q) != node->address()) return false;
if (subzone->is_used(q)) return false;
if (node->prev() != prev_node) return false;
if (node->size() != node->size_again()) return false;
prev_node = node;
}
}
return true;
}
inline FreeListNode *Admin::pop_node(usword_t index) {
FreeListNode *node = _cache[index].pop();
if (node && test_node_integrity(node)) {
if (is_medium()) reuse_node(node);
return node;
}
return NULL;
}
inline void Admin::push_node(usword_t index, void *address, usword_t size) {
_cache[index].push(address, size);
if (index > _freelist_search_cap || _freelist_search_cap == 0)
_freelist_search_cap = index;
}
inline void Admin::insert_node(usword_t index, void *address, usword_t size) {
_cache[index].insert(address, size);
if (index > _freelist_search_cap || _freelist_search_cap == 0)
_freelist_search_cap = index;
}
void Admin::append_node(FreeListNode *node) {
usword_t index = cache_slot(node->size());
_cache[index].append(node);
if (index > _freelist_search_cap || _freelist_search_cap == 0)
_freelist_search_cap = index;
}
void Admin::mark_allocated(void *address, const usword_t n, const usword_t layout, const bool refcount_is_one, const bool is_local) {
Subzone *subzone = Subzone::subzone(address);
*(void **)address = NULL;
subzone->allocate(subzone->quantum_index(address), n, layout, refcount_is_one, is_local);
}
void Admin::mark_cached(Subzone *subzone, usword_t q, const usword_t n) {
subzone->cache(q, n);
}
void Admin::mark_cached_range(void *address, usword_t n) {
Subzone *subzone = Subzone::subzone(address);
const usword_t maximum_block_size = maximum_quanta << _quantum_log2;
while (n >= maximum_quanta) {
subzone->cache(subzone->quantum_index(address), maximum_quanta);
address = displace(address, maximum_block_size);
n -= maximum_quanta;
}
if (n) subzone->cache(subzone->quantum_index(address), n);
}
void Admin::activate_purged_subzone() {
ASSERTION(_active_subzone == NULL);
Subzone *subzone = _purgeable_subzones, *prev_subzone = NULL;
while (subzone != NULL) {
usword_t top = subzone->allocation_count();
usword_t unused = subzone->allocation_limit() - top;
if (unused > AllocationCache::cache_size) {
if (prev_subzone)
prev_subzone->setNextSubzone(subzone->nextSubzone());
else
_purgeable_subzones = subzone->nextSubzone();
subzone->setNextSubzone(NULL);
subzone->set_purgeable(false);
_active_subzone = subzone;
if (subzone->is_purged()) {
Range r(subzone->purgeable_range());
madvise(r.address(), r.size(), MADV_FREE_REUSE);
subzone->set_purged(false);
}
break;
}
prev_subzone = subzone;
subzone = subzone->nextSubzone();
}
}
inline void *Admin::find_allocation_no_lock(usword_t n) {
FreeListNode *node = pop_node(n);
if (node) {
return node->address();
}
if (Environment::guard_pages) {
if (_active_subzone == NULL) return NULL;
const usword_t quantum_size = (1 << _quantum_log2);
Subzone *subzone = _active_subzone;
usword_t first_quantum = subzone->allocation_count(), last_quantum = subzone->allocation_limit();
usword_t available_size = (last_quantum - first_quantum) * quantum_size;
void *slop_address = subzone->quantum_address(first_quantum);
void *guard_address = align_up(slop_address);
usword_t block_size = n << _quantum_log2;
usword_t slop_size = (uintptr_t)guard_address - (uintptr_t)slop_address;
while (block_size > slop_size) {
guard_address = displace(guard_address, page_size);
slop_size += page_size;
}
void *end_address = displace(guard_address, page_size);
usword_t total_size = (uintptr_t)end_address - (uintptr_t)slop_address;
if (available_size < total_size) {
set_active_subzone(NULL);
return NULL;
}
subzone->raise_allocation_count(total_size >> _quantum_log2);
usword_t slop_count = ((slop_size - block_size) >> _quantum_log2);
if (slop_count) mark_cached_range(slop_address, slop_count);
guard_page(guard_address);
mark_cached_range(guard_address, (page_size >> _quantum_log2));
usword_t remainder_size = available_size - total_size;
if (remainder_size < (2 * page_size)) {
set_active_subzone(NULL);
}
return displace(guard_address, -block_size);
}
void *address = NULL;
for (usword_t i = n + 1; node == NULL && i <= _freelist_search_cap; i++) {
node = pop_node(i);
}
if (!node) {
node = pop_node(0);
if (_freelist_search_cap >= n)
_freelist_search_cap = n-1; }
if (node) {
address = node->address();
Subzone *subzone = Subzone::subzone(address);
usword_t allocation_size = subzone->quantum_size(n);
ASSERTION(node->size() >= allocation_size);
usword_t remainder_size = node->size() - allocation_size;
if (remainder_size) {
void *remainder_address = displace(address, allocation_size);
usword_t m = cache_slot(remainder_size);
push_node(m, remainder_address, remainder_size);
}
} else if (_active_subzone) {
usword_t top = _active_subzone->allocation_count();
usword_t unused = _active_subzone->allocation_limit() - top;
ASSERTION(unused >= n);
address = _active_subzone->quantum_address(top);
*(void **)address = NULL;
_active_subzone->raise_allocation_count(n);
unused -= n;
if (unused == 0) {
set_active_subzone(NULL);
} else if (unused < AllocationCache::cache_size) {
push_node(unused, _active_subzone->quantum_address(top+n), _active_subzone->quantum_size(unused));
_active_subzone->raise_allocation_count(unused);
set_active_subzone(NULL);
}
if (_active_subzone == NULL) {
activate_purged_subzone();
}
} else {
return NULL;
}
return address;
}
unsigned Admin::batch_allocate_from_freelist_slot_no_lock(usword_t cache_index, usword_t requested_size, const bool clear, void **results, unsigned num_requested) {
unsigned count = 0;
FreeListNode *node;
assert(num_requested > 0);
do {
node = pop_node(cache_index);
if (node) {
void *addr = node->address();
usword_t node_size = node->size();
usword_t alloc_count = node_size / requested_size;
if (alloc_count > num_requested) alloc_count = num_requested;
usword_t alloc_size = alloc_count * requested_size;
if (clear)
bzero(addr, alloc_size);
for (usword_t i=0; i<alloc_count; i++) {
results[count++] = addr;
addr = displace(addr, requested_size);
}
if (node_size > alloc_size) {
usword_t remainder_size = node_size - alloc_size;
usword_t m = cache_slot(remainder_size);
push_node(m, addr, remainder_size);
}
num_requested -= alloc_count;
}
} while (node && num_requested);
return count;
}
unsigned Admin::batch_allocate_from_subzone_no_lock(Subzone *subzone, size_t size, const bool clear, void **results, unsigned num_requested) {
usword_t top = subzone->allocation_count();
usword_t unused = subzone->allocation_limit() - top;
usword_t quantum_size = quantum_count(size);
usword_t available = unused / quantum_size;
unsigned count = 0;
if (available > 0) {
if (available > num_requested)
available = num_requested;
void *address = subzone->quantum_address(top);
do {
results[count++] = address;
address = displace(address, size);
} while (count < available);
if (clear)
bzero(results[0], count * size);
subzone->raise_allocation_count(quantum_size*count);
unused -= quantum_size * count;
if ((unused > 0) && (unused < AllocationCache::cache_size)) {
push_node(unused, address, subzone->quantum_size(unused));
subzone->raise_allocation_count(unused);
unused = 0;
}
if (unused == 0 && subzone == _active_subzone) {
set_active_subzone(NULL);
activate_purged_subzone();
}
}
return count;
}
unsigned Admin::batch_allocate(Thread &thread, size_t &size, const usword_t layout, const bool refcount_is_one, const bool clear, void **results, unsigned num_requested) {
if (Environment::guard_pages) return 0;
usword_t n = quantum_count(size);
size = (n << _quantum_log2);
SpinLock lock(&_admin_lock);
unsigned count = 0;
for (usword_t cache_index = n; cache_index < AllocationCache::cache_size && count < num_requested; ++cache_index) {
count += batch_allocate_from_freelist_slot_no_lock(cache_index, size, clear, &results[count], num_requested - count);
}
if (count < num_requested) {
count += batch_allocate_from_freelist_slot_no_lock(0, size, clear, &results[count], num_requested - count);
}
if (count < num_requested) {
if (!_active_subzone)
activate_purged_subzone();
while (count < num_requested && _active_subzone) {
count += batch_allocate_from_subzone_no_lock(_active_subzone, size, clear, &results[count], num_requested - count);
if (!_active_subzone)
activate_purged_subzone();
}
}
UnconditionalBarrier barrier(thread.needs_enlivening());
for (usword_t i=0; i<count; i++) {
mark_allocated(results[i], n, layout, refcount_is_one, false);
if (barrier) SubzoneBlockRef(results[i]).enliven();
}
_zone->set_write_barrier_range(results, count * sizeof(void *));
_zone->add_blocks_and_bytes(count, count * size);
return count;
}
void *Admin::thread_cache_allocate(Thread &thread, usword_t &size, const usword_t layout, const bool refcount_is_one, bool &is_local) {
usword_t n = partition2(size, allocate_quantum_small_log2);
FreeList &list = thread.allocation_cache(layout)[n];
if (!list.head()) {
SpinLock lock(&_admin_lock);
usword_t node_size = (n << allocate_quantum_small_log2);
ConditionBarrier barrier(thread.needs_enlivening());
int alloc_count;
for (alloc_count = 0; alloc_count < cached_list_node_initial_count; ++alloc_count) {
void *node = find_allocation_no_lock(n);
if (!node) break; Subzone *subzone = Subzone::subzone(node);
usword_t q = subzone->quantum_index_unchecked(node);
mark_cached(subzone, q, n);
list.push(node, node_size);
if (barrier) SubzoneBlockRef(subzone, q).enliven();
}
zone()->add_blocks_and_bytes(alloc_count, alloc_count * (1L << allocate_quantum_small_log2) * n);
}
void *address = list.pop()->address();
if (!address) return NULL;
size = n << allocate_quantum_small_log2; if (refcount_is_one || !Environment::thread_collections) {
mark_allocated(address, n, layout, refcount_is_one, false);
is_local = false;
}
else {
mark_allocated(address, n, layout, false, true);
thread.add_local_allocation(address);
is_local = true;
}
return address;
}
void *Admin::find_allocation(Thread &thread, usword_t &size, const usword_t layout, const bool refcount_is_one, bool &is_local) {
SpinLock lock(&_admin_lock);
usword_t n = quantum_count(size);
ASSERTION(n < AllocationCache::cache_size);
void *address = find_allocation_no_lock(n);
if (address) {
size = n << _quantum_log2;
ConditionBarrier barrier(thread.needs_enlivening());
if (refcount_is_one || !Environment::thread_collections) {
mark_allocated(address, n, layout, refcount_is_one, false);
is_local = false;
} else {
mark_allocated(address, n, layout, false, true);
thread.add_local_allocation(address);
is_local = true;
}
if (barrier) SubzoneBlockRef(address).enliven();
zone()->add_blocks_and_bytes(1, (1L << _quantum_log2) * n);
}
return address;
}
FreeList *Admin::lowest_available_list(usword_t n) {
FreeList *candidate_list = &_cache[0];
FreeListNode *candidate_node = candidate_list->head();
for (usword_t i = n; i <= _freelist_search_cap; i++) {
FreeListNode *node = _cache[i].head();
if (node != NULL && (candidate_node == NULL || node->address() < candidate_node->address())) {
candidate_list = &_cache[i];
candidate_node = node;
}
}
return candidate_list;
}
void *Admin::allocate_lower_block_no_lock(Subzone *subzone, usword_t q, void *block_address) {
const usword_t size = subzone->size(q);
const unsigned layout = subzone->layout(q);
usword_t n = quantum_count(size);
ASSERTION(n < AllocationCache::cache_size);
FreeList *list = lowest_available_list(n);
FreeListNode *node = list->head();
if (node == NULL || node->address() > block_address) {
return block_address;
}
if (is_medium()) reuse_node(node);
list->pop();
void *address = node->address();
ASSERTION(node->size() >= size);
usword_t remainder_size = node->size() - size;
if (remainder_size) {
void *remainder_address = displace(address, size);
usword_t m = cache_slot(remainder_size);
insert_node(m, remainder_address, remainder_size);
}
Subzone *copySubzone = Subzone::subzone(address);
usword_t copyQ = copySubzone->quantum_index_unchecked(address);
copySubzone->allocate(copyQ, n, layout, false, false);
return address;
}
void *Admin::allocate_different_block_no_lock(Subzone *subzone, usword_t q, void *block_address) {
const usword_t size = subzone->size(q);
const unsigned layout = subzone->layout(q);
usword_t n = quantum_count(size);
ASSERTION(n < AllocationCache::cache_size);
void *address = find_allocation_no_lock(n);
if (address) {
Subzone *copySubzone = Subzone::subzone(address);
usword_t copyQ = copySubzone->quantum_index_unchecked(address);
copySubzone->allocate(copyQ, n, layout, false, false);
return address;
}
return block_address;
}
void *Admin::allocate_destination_block_no_lock(Subzone *subzone, usword_t q, void *block_address) {
static void *(Admin::*block_allocator)(Subzone *subzone, usword_t q, void *block_address) = (Environment::scramble_heap ? &Admin::allocate_different_block_no_lock : &Admin::allocate_lower_block_no_lock);
return (this->*block_allocator)(subzone, q, block_address);
}
void Admin::deallocate(Subzone *subzone, usword_t q, void *address) {
SpinLock lock(&_admin_lock);
deallocate_no_lock(subzone, q, address);
}
void Admin::deallocate_no_lock(Subzone *subzone, usword_t q, void *address) {
AUTO_PROBE(auto_probe_admin_deallocate(address));
usword_t n = subzone->length(q);
ASSERTION(!subzone->is_free(q));
if (subzone->is_free(q)) {
malloc_printf("Admin::deallocate: attempting to free already freed block %p\n", address);
return;
}
void *free_address = address;
usword_t free_size = subzone->quantum_size(n);
usword_t next_q = q + n;
usword_t highwater = subzone->allocation_count();
if (!Environment::guard_pages) {
if (next_q < highwater && subzone->is_free(next_q)) {
FreeListNode *next_node = (FreeListNode *)displace(free_address, free_size);
if (test_node_integrity(next_node)) {
if (is_medium()) reuse_node(next_node);
usword_t next_size = next_node->size();
usword_t m = cache_slot(next_size);
_cache[m].remove(next_node);
free_size += next_size;
}
}
if (q && subzone->is_free(q - 1)) {
FreeListNode *this_node = (FreeListNode *)address;
FreeListNode *prev_node = this_node->prior_node();
if (test_node_integrity(prev_node)) {
if (is_medium()) reuse_node(prev_node);
free_address = prev_node->address();
usword_t prev_size = prev_node->size();
free_size += prev_size;
usword_t m = cache_slot(prev_size);
_cache[m].remove(prev_node);
}
}
}
if (Environment::dirty_all_deleted) {
memset(free_address, 0x55, free_size);
}
if (next_q == highwater) {
subzone->lower_allocation_count(quantum_count(free_size));
if (subzone != _active_subzone && !subzone->is_purgeable()) {
subzone->setNextSubzone(_purgeable_subzones);
_purgeable_subzones = subzone;
subzone->set_purgeable(true);
}
} else {
usword_t m = cache_slot(free_size);
push_node(m, free_address, free_size);
}
subzone->deallocate(q, n);
}
};