ExecutableAllocatorFixedVMPool.cpp [plain text]
#include "config.h"
#include "ExecutableAllocator.h"
#include "JSCInlines.h"
#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
#include "CodeProfiling.h"
#include <errno.h>
#include <unistd.h>
#include <wtf/MetaAllocator.h>
#include <wtf/PageReservation.h>
#include <wtf/VMTags.h>
#if OS(DARWIN)
#include <sys/mman.h>
#endif
#if OS(LINUX)
#include <stdio.h>
#endif
#if PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090
#define WTF_USE_MADV_FREE_FOR_JIT_MEMORY 1
#endif
using namespace WTF;
namespace JSC {
uintptr_t startOfFixedExecutableMemoryPool;
class FixedVMPoolExecutableAllocator : public MetaAllocator {
WTF_MAKE_FAST_ALLOCATED;
public:
FixedVMPoolExecutableAllocator()
: MetaAllocator(jitAllocationGranule) {
m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
if (m_reservation) {
ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize);
addFreshFreeSpace(m_reservation.base(), m_reservation.size());
startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(m_reservation.base());
}
}
virtual ~FixedVMPoolExecutableAllocator();
protected:
virtual void* allocateNewSpace(size_t&) override
{
return 0;
}
virtual void notifyNeedPage(void* page) override
{
#if USE(MADV_FREE_FOR_JIT_MEMORY)
UNUSED_PARAM(page);
#else
m_reservation.commit(page, pageSize());
#endif
}
virtual void notifyPageIsFree(void* page) override
{
#if USE(MADV_FREE_FOR_JIT_MEMORY)
for (;;) {
int result = madvise(page, pageSize(), MADV_FREE);
if (!result)
return;
ASSERT(result == -1);
if (errno != EAGAIN) {
RELEASE_ASSERT_NOT_REACHED(); break; }
}
#else
m_reservation.decommit(page, pageSize());
#endif
}
private:
PageReservation m_reservation;
};
static FixedVMPoolExecutableAllocator* allocator;
void ExecutableAllocator::initializeAllocator()
{
ASSERT(!allocator);
allocator = new FixedVMPoolExecutableAllocator();
CodeProfiling::notifyAllocator(allocator);
}
ExecutableAllocator::ExecutableAllocator(VM&)
{
ASSERT(allocator);
}
ExecutableAllocator::~ExecutableAllocator()
{
}
FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator()
{
m_reservation.deallocate();
}
bool ExecutableAllocator::isValid() const
{
return !!allocator->bytesReserved();
}
bool ExecutableAllocator::underMemoryPressure()
{
MetaAllocator::Statistics statistics = allocator->currentStatistics();
return statistics.bytesAllocated > statistics.bytesReserved / 2;
}
double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
{
MetaAllocator::Statistics statistics = allocator->currentStatistics();
ASSERT(statistics.bytesAllocated <= statistics.bytesReserved);
size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage;
if (bytesAllocated >= statistics.bytesReserved)
bytesAllocated = statistics.bytesReserved;
double result = 1.0;
size_t divisor = statistics.bytesReserved - bytesAllocated;
if (divisor)
result = static_cast<double>(statistics.bytesReserved) / divisor;
if (result < 1.0)
result = 1.0;
return result;
}
PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM& vm, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
if (!result) {
if (effort == JITCompilationCanFail)
return result;
releaseExecutableMemory(vm);
result = allocator->allocate(sizeInBytes, ownerUID);
RELEASE_ASSERT(result);
}
return result.release();
}
size_t ExecutableAllocator::committedByteCount()
{
return allocator->bytesCommitted();
}
#if ENABLE(META_ALLOCATOR_PROFILE)
void ExecutableAllocator::dumpProfile()
{
allocator->dumpProfile();
}
#endif
}
#endif // ENABLE(EXECUTABLE_ALLOCATOR_FIXED)