ExecutableAllocatorFixedVMPool.cpp [plain text]
#include "config.h"
#include "ExecutableAllocator.h"
#if ENABLE(ASSEMBLER) && OS(DARWIN) && CPU(X86_64)
#include <errno.h>
#include "TCSpinLock.h"
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#include <sys/mman.h>
#include <unistd.h>
#include <wtf/AVLTree.h>
#include <wtf/VMTags.h>
using namespace WTF;
namespace JSC {
#define TWO_GB (2u * 1024u * 1024u * 1024u)
#define SIXTEEN_MB (16u * 1024u * 1024u)
struct FreeListEntry {
FreeListEntry(void* pointer, size_t size)
: pointer(pointer)
, size(size)
, nextEntry(0)
, less(0)
, greater(0)
, balanceFactor(0)
{
}
void* pointer;
size_t size;
FreeListEntry* nextEntry;
FreeListEntry* less;
FreeListEntry* greater;
int balanceFactor;
};
struct AVLTreeAbstractorForFreeList {
typedef FreeListEntry* handle;
typedef int32_t size;
typedef size_t key;
handle get_less(handle h) { return h->less; }
void set_less(handle h, handle lh) { h->less = lh; }
handle get_greater(handle h) { return h->greater; }
void set_greater(handle h, handle gh) { h->greater = gh; }
int get_balance_factor(handle h) { return h->balanceFactor; }
void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; }
static handle null() { return 0; }
int compare_key_key(key va, key vb) { return va - vb; }
int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); }
int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); }
};
static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr)
{
FreeListEntry* left = *(FreeListEntry**)leftPtr;
FreeListEntry* right = *(FreeListEntry**)rightPtr;
return (intptr_t)(right->pointer) - (intptr_t)(left->pointer);
}
static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr)
{
void* left = *(void**)leftPtr;
void* right = *(void**)rightPtr;
return (intptr_t)right - (intptr_t)left;
}
class FixedVMPoolAllocator
{
typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree;
#if HAVE(MADV_FREE_REUSE)
void release(void* position, size_t size)
{
while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
}
void reuse(void* position, size_t size)
{
while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
}
#elif HAVE(MADV_DONTNEED)
void release(void* position, size_t size)
{
while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
}
void reuse(void*, size_t) {}
#else
void release(void*, size_t) {}
void reuse(void*, size_t) {}
#endif
void addToFreeList(FreeListEntry* entry)
{
ASSERT(!entry->nextEntry);
if (entry->size == m_commonSize) {
m_commonSizedAllocations.append(entry->pointer);
delete entry;
} else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) {
entry->nextEntry = entryInFreeList->nextEntry;
entryInFreeList->nextEntry = entry;
} else
m_freeList.insert(entry);
}
void coalesceFreeSpace()
{
Vector<FreeListEntry*> freeListEntries;
SizeSortedFreeTree::Iterator iter;
iter.start_iter_least(m_freeList);
for (FreeListEntry* entry; (entry = *iter); ++iter) {
FreeListEntry* next;
do {
next = entry->nextEntry;
entry->nextEntry = 0;
freeListEntries.append(entry);
} while ((entry = next));
}
m_freeList.purge();
qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer);
qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations);
Vector<void*> newCommonSizedAllocations;
while (freeListEntries.size() || m_commonSizedAllocations.size()) {
FreeListEntry* coalescionEntry = 0;
if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) {
void* begin = m_commonSizedAllocations.last();
void* end = (void*)((intptr_t)begin + m_commonSize);
m_commonSizedAllocations.removeLast();
if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
coalescionEntry = freeListEntries.last();
freeListEntries.removeLast();
coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize);
coalescionEntry->size += m_commonSize;
} else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
m_commonSizedAllocations.removeLast();
coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize);
} else {
newCommonSizedAllocations.append(begin);
continue;
}
} else {
ASSERT(freeListEntries.size());
ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last()));
coalescionEntry = freeListEntries.last();
freeListEntries.removeLast();
}
ASSERT(coalescionEntry);
while (true) {
void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size);
if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
FreeListEntry* coalescee = freeListEntries.last();
freeListEntries.removeLast();
coalescionEntry->size += coalescee->size;
delete coalescee;
} else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
m_commonSizedAllocations.removeLast();
coalescionEntry->size += m_commonSize;
} else
break; }
addToFreeList(coalescionEntry);
}
ASSERT(m_commonSizedAllocations.size() == 0);
m_commonSizedAllocations.append(newCommonSizedAllocations);
}
public:
FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize)
: m_commonSize(commonSize)
, m_countFreedSinceLastCoalesce(0)
, m_totalHeapSize(totalHeapSize)
{
intptr_t randomLocation = arc4random() & ((1 << 25) - 1);
randomLocation += (1 << 24);
randomLocation <<= 21;
m_base = mmap(reinterpret_cast<void*>(randomLocation), m_totalHeapSize, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
if (!m_base)
CRASH();
release(m_base, m_totalHeapSize);
m_freeList.insert(new FreeListEntry(m_base, m_totalHeapSize));
}
void* alloc(size_t size)
{
void* result;
if ((size == m_commonSize) && m_commonSizedAllocations.size()) {
result = m_commonSizedAllocations.last();
m_commonSizedAllocations.removeLast();
} else {
FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
if (!entry) {
coalesceFreeSpace();
entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
if (!entry)
CRASH();
}
ASSERT(entry->size != m_commonSize);
if (FreeListEntry* next = entry->nextEntry) {
entry->nextEntry = next->nextEntry;
next->nextEntry = 0;
entry = next;
} else
m_freeList.remove(entry->size);
ASSERT(entry->size >= size);
result = entry->pointer;
if (entry->size == size)
delete entry;
else {
entry->pointer = (void*)((intptr_t)entry->pointer + size);
entry->size -= size;
addToFreeList(entry);
}
}
ASSERT(isWithinVMPool(result, size));
reuse(result, size);
return result;
}
void free(void* pointer, size_t size)
{
ASSERT(isWithinVMPool(pointer, size));
release(pointer, size);
if (size == m_commonSize)
m_commonSizedAllocations.append(pointer);
else
addToFreeList(new FreeListEntry(pointer, size));
m_countFreedSinceLastCoalesce += size;
if (m_countFreedSinceLastCoalesce >= SIXTEEN_MB) {
m_countFreedSinceLastCoalesce = 0;
coalesceFreeSpace();
}
}
private:
#ifndef NDEBUG
bool isWithinVMPool(void* pointer, size_t size)
{
return pointer >= m_base && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_base) + m_totalHeapSize);
}
#endif
const size_t m_commonSize;
Vector<void*> m_commonSizedAllocations;
SizeSortedFreeTree m_freeList;
size_t m_countFreedSinceLastCoalesce;
void* m_base;
size_t m_totalHeapSize;
};
void ExecutableAllocator::intializePageSize()
{
ExecutableAllocator::pageSize = getpagesize();
}
static FixedVMPoolAllocator* allocator = 0;
static SpinLock spinlock = SPINLOCK_INITIALIZER;
ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
{
SpinLockHolder lock_holder(&spinlock);
if (!allocator)
allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, TWO_GB);
ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size};
return alloc;
}
void ExecutablePool::systemRelease(const ExecutablePool::Allocation& allocation)
{
SpinLockHolder lock_holder(&spinlock);
ASSERT(allocator);
allocator->free(allocation.pages, allocation.size);
}
}
#endif // HAVE(ASSEMBLER)