ExecutableAllocator.cpp [plain text]
#include "config.h"
#include "ExecutableAllocator.h"
#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
#include "CodeProfiling.h"
#include <wtf/HashSet.h>
#include <wtf/MetaAllocator.h>
#include <wtf/PageReservation.h>
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
#include <wtf/PassOwnPtr.h>
#endif
#include <wtf/ThreadingPrimitives.h>
#include <wtf/VMTags.h>
#endif
#if ENABLE(ASSEMBLER)
using namespace WTF;
namespace JSC {
#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
class DemandExecutableAllocator : public MetaAllocator {
public:
DemandExecutableAllocator()
: MetaAllocator(32) {
MutexLocker lock(allocatorsMutex());
allocators().add(this);
}
virtual ~DemandExecutableAllocator()
{
{
MutexLocker lock(allocatorsMutex());
allocators().remove(this);
}
for (unsigned i = 0; i < reservations.size(); ++i)
reservations.at(i).deallocate();
}
static size_t bytesAllocatedByAllAllocators()
{
size_t total = 0;
MutexLocker lock(allocatorsMutex());
for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
total += (*allocator)->bytesAllocated();
return total;
}
static size_t bytesCommittedByAllocactors()
{
size_t total = 0;
MutexLocker lock(allocatorsMutex());
for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
total += (*allocator)->bytesCommitted();
return total;
}
#if ENABLE(META_ALLOCATOR_PROFILE)
static void dumpProfileFromAllAllocators()
{
MutexLocker lock(allocatorsMutex());
for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
(*allocator)->dumpProfile();
}
#endif
protected:
virtual void* allocateNewSpace(size_t& numPages)
{
size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize();
ASSERT(newNumPages >= numPages);
numPages = newNumPages;
#ifdef EXECUTABLE_MEMORY_LIMIT
if (bytesAllocatedByAllAllocators() >= EXECUTABLE_MEMORY_LIMIT)
return 0;
#endif
PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
if (!reservation)
CRASH();
reservations.append(reservation);
return reservation.base();
}
virtual void notifyNeedPage(void* page)
{
OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true);
}
virtual void notifyPageIsFree(void* page)
{
OSAllocator::decommit(page, pageSize());
}
private:
Vector<PageReservation, 16> reservations;
static HashSet<DemandExecutableAllocator*>& allocators()
{
DEFINE_STATIC_LOCAL(HashSet<DemandExecutableAllocator*>, sAllocators, ());
return sAllocators;
}
static Mutex& allocatorsMutex()
{
DEFINE_STATIC_LOCAL(Mutex, mutex, ());
return mutex;
}
};
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
void ExecutableAllocator::initializeAllocator()
{
}
#else
static DemandExecutableAllocator* gAllocator;
namespace {
static inline DemandExecutableAllocator* allocator()
{
return gAllocator;
}
}
void ExecutableAllocator::initializeAllocator()
{
ASSERT(!gAllocator);
gAllocator = new DemandExecutableAllocator();
CodeProfiling::notifyAllocator(gAllocator);
}
#endif
ExecutableAllocator::ExecutableAllocator(JSGlobalData&)
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
: m_allocator(adoptPtr(new DemandExecutableAllocator()))
#endif
{
ASSERT(allocator());
}
ExecutableAllocator::~ExecutableAllocator()
{
}
bool ExecutableAllocator::isValid() const
{
return true;
}
bool ExecutableAllocator::underMemoryPressure()
{
#ifdef EXECUTABLE_MEMORY_LIMIT
return DemandExecutableAllocator::bytesAllocatedByAllAllocators() > EXECUTABLE_MEMORY_LIMIT / 2;
#else
return false;
#endif
}
double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
{
double result;
#ifdef EXECUTABLE_MEMORY_LIMIT
size_t bytesAllocated = DemandExecutableAllocator::bytesAllocatedByAllAllocators() + addedMemoryUsage;
if (bytesAllocated >= EXECUTABLE_MEMORY_LIMIT)
bytesAllocated = EXECUTABLE_MEMORY_LIMIT;
result = static_cast<double>(EXECUTABLE_MEMORY_LIMIT) /
(EXECUTABLE_MEMORY_LIMIT - bytesAllocated);
#else
UNUSED_PARAM(addedMemoryUsage);
result = 1.0;
#endif
if (result < 1.0)
result = 1.0;
return result;
}
PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
RefPtr<ExecutableMemoryHandle> result = allocator()->allocate(sizeInBytes, ownerUID);
if (!result && effort == JITCompilationMustSucceed)
CRASH();
return result.release();
}
size_t ExecutableAllocator::committedByteCount()
{
return DemandExecutableAllocator::bytesCommittedByAllocactors();
}
#if ENABLE(META_ALLOCATOR_PROFILE)
void ExecutableAllocator::dumpProfile()
{
DemandExecutableAllocator::dumpProfileFromAllAllocators();
}
#endif
#endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
#if OS(WINDOWS)
#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
#endif
void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting)
{
size_t pageSize = WTF::pageSize();
intptr_t startPtr = reinterpret_cast<intptr_t>(start);
intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
void* pageStart = reinterpret_cast<void*>(pageStartPtr);
size += (startPtr - pageStartPtr);
size += (pageSize - 1);
size &= ~(pageSize - 1);
mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
}
#endif
}
#endif // HAVE(ASSEMBLER)