#include "config.h"
#include "WasmMemory.h"
#if ENABLE(WEBASSEMBLY)
#include "VM.h"
#include "WasmThunks.h"
#include <atomic>
#include <wtf/MonotonicTime.h>
#include <wtf/NeverDestroyed.h>
#include <wtf/Platform.h>
#include <wtf/PrintStream.h>
#include <wtf/VMTags.h>
namespace JSC { namespace Wasm {
namespace {
constexpr bool verbose = false;
NEVER_INLINE NO_RETURN_DUE_TO_CRASH void webAssemblyCouldntGetFastMemory() { CRASH(); }
NEVER_INLINE NO_RETURN_DUE_TO_CRASH void webAssemblyCouldntUnmapMemory() { CRASH(); }
NEVER_INLINE NO_RETURN_DUE_TO_CRASH void webAssemblyCouldntUnprotectMemory() { CRASH(); }
void* mmapBytes(size_t bytes)
{
void* location = mmap(nullptr, bytes, PROT_NONE, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_WEBASSEMBLY_MEMORY, 0);
return location == MAP_FAILED ? nullptr : location;
}
void munmapBytes(void* memory, size_t size)
{
if (UNLIKELY(munmap(memory, size)))
webAssemblyCouldntUnmapMemory();
}
void zeroAndUnprotectBytes(void* start, size_t bytes)
{
if (bytes) {
dataLogLnIf(verbose, "Zeroing and unprotecting ", bytes, " from ", RawPointer(start));
memset(start, 0, bytes);
if (UNLIKELY(mprotect(start, bytes, PROT_NONE)))
webAssemblyCouldntUnprotectMemory();
}
}
constexpr size_t fastMemoryCacheHardLimit { 16 };
constexpr size_t fastMemoryAllocationSoftLimit { 32 }; static_assert(fastMemoryAllocationSoftLimit >= fastMemoryCacheHardLimit, "The cache shouldn't be bigger than the total number we'll ever allocate");
size_t fastMemoryPreallocateCount { 0 };
std::atomic<void*> fastMemoryCache[fastMemoryCacheHardLimit] = { ATOMIC_VAR_INIT(nullptr) };
std::atomic<void*> currentlyActiveFastMemories[fastMemoryAllocationSoftLimit] = { ATOMIC_VAR_INIT(nullptr) };
std::atomic<size_t> currentlyAllocatedFastMemories = ATOMIC_VAR_INIT(0);
std::atomic<size_t> observedMaximumFastMemory = ATOMIC_VAR_INIT(0);
std::atomic<size_t> currentSlowMemoryCapacity = ATOMIC_VAR_INIT(0);
size_t fastMemoryAllocatedBytesSoftLimit()
{
return fastMemoryAllocationSoftLimit * Memory::fastMappedBytes();
}
void* tryGetCachedFastMemory()
{
for (unsigned idx = 0; idx < fastMemoryPreallocateCount; ++idx) {
if (void* previous = fastMemoryCache[idx].exchange(nullptr, std::memory_order_acq_rel))
return previous;
}
return nullptr;
}
bool tryAddToCachedFastMemory(void* memory)
{
for (unsigned i = 0; i < fastMemoryPreallocateCount; ++i) {
void* expected = nullptr;
if (fastMemoryCache[i].compare_exchange_strong(expected, memory, std::memory_order_acq_rel)) {
dataLogLnIf(verbose, "Cached fast memory ", RawPointer(memory));
return true;
}
}
return false;
}
bool tryAddToCurrentlyActiveFastMemories(void* memory)
{
for (size_t idx = 0; idx < fastMemoryAllocationSoftLimit; ++idx) {
void* expected = nullptr;
if (currentlyActiveFastMemories[idx].compare_exchange_strong(expected, memory, std::memory_order_acq_rel))
return true;
}
return false;
}
void removeFromCurrentlyActiveFastMemories(void* memory)
{
for (size_t idx = 0; idx < fastMemoryAllocationSoftLimit; ++idx) {
void* expected = memory;
if (currentlyActiveFastMemories[idx].compare_exchange_strong(expected, nullptr, std::memory_order_acq_rel))
return;
}
RELEASE_ASSERT_NOT_REACHED();
}
void* tryGetFastMemory(VM& vm)
{
void* memory = nullptr;
if (LIKELY(Options::useWebAssemblyFastMemory())) {
memory = tryGetCachedFastMemory();
if (memory)
dataLogLnIf(verbose, "tryGetFastMemory re-using ", RawPointer(memory));
else if (currentlyAllocatedFastMemories.load(std::memory_order_acquire) >= 1) {
dataLogLnIf(verbose, "tryGetFastMemory waiting on GC and retrying");
vm.heap.collectNow(Sync, CollectionScope::Full);
memory = tryGetCachedFastMemory();
dataLogLnIf(verbose, "tryGetFastMemory waited on GC and retried ", memory? "successfully" : "unseccessfully");
}
bool atAllocationSoftLimit = currentlyAllocatedFastMemories.load(std::memory_order_acquire) >= fastMemoryAllocationSoftLimit;
dataLogLnIf(verbose && atAllocationSoftLimit, "tryGetFastMemory reached allocation soft limit of ", fastMemoryAllocationSoftLimit);
if (!memory && !atAllocationSoftLimit) {
memory = mmapBytes(Memory::fastMappedBytes());
if (memory) {
size_t currentlyAllocated = 1 + currentlyAllocatedFastMemories.fetch_add(1, std::memory_order_acq_rel);
size_t currentlyObservedMaximum = observedMaximumFastMemory.load(std::memory_order_acquire);
if (currentlyAllocated > currentlyObservedMaximum) {
size_t expected = currentlyObservedMaximum;
bool success = observedMaximumFastMemory.compare_exchange_strong(expected, currentlyAllocated, std::memory_order_acq_rel);
if (success)
dataLogLnIf(verbose, "tryGetFastMemory currently observed maximum is now ", currentlyAllocated);
else
ASSERT(expected >= currentlyAllocated);
}
dataLogLnIf(verbose, "tryGetFastMemory allocated ", RawPointer(memory), ", currently allocated is ", currentlyAllocated);
}
}
}
if (memory) {
if (UNLIKELY(!tryAddToCurrentlyActiveFastMemories(memory))) {
dataLogLnIf(verbose, "tryGetFastMemory found a fast memory but had to give it up");
munmapBytes(memory, Memory::fastMappedBytes());
currentlyAllocatedFastMemories.fetch_sub(1, std::memory_order_acq_rel);
memory = nullptr;
}
}
if (!memory) {
dataLogLnIf(verbose, "tryGetFastMemory couldn't re-use or allocate a fast memory");
if (UNLIKELY(Options::crashIfWebAssemblyCantFastMemory()))
webAssemblyCouldntGetFastMemory();
}
return memory;
}
bool slowMemoryCapacitySoftMaximumExceeded()
{
size_t maximum = fastMemoryAllocatedBytesSoftLimit();
size_t currentCapacity = currentSlowMemoryCapacity.load(std::memory_order_acquire);
if (UNLIKELY(currentCapacity > maximum)) {
dataLogLnIf(verbose, "Slow memory capacity limit reached");
return true;
}
return false;
}
void* tryGetSlowMemory(size_t bytes)
{
if (slowMemoryCapacitySoftMaximumExceeded())
return nullptr;
void* memory = mmapBytes(bytes);
if (memory)
currentSlowMemoryCapacity.fetch_add(bytes, std::memory_order_acq_rel);
dataLogLnIf(memory && verbose, "Obtained slow memory ", RawPointer(memory), " with capacity ", bytes);
dataLogLnIf(!memory && verbose, "Failed obtaining slow memory with capacity ", bytes);
return memory;
}
void relinquishMemory(void* memory, size_t writableSize, size_t mappedCapacity, MemoryMode mode)
{
switch (mode) {
case MemoryMode::Signaling: {
RELEASE_ASSERT(Options::useWebAssemblyFastMemory());
RELEASE_ASSERT(mappedCapacity == Memory::fastMappedBytes());
removeFromCurrentlyActiveFastMemories(memory);
zeroAndUnprotectBytes(memory, writableSize);
if (tryAddToCachedFastMemory(memory))
return;
dataLogLnIf(verbose, "relinquishMemory unable to cache fast memory, freeing instead ", RawPointer(memory));
munmapBytes(memory, Memory::fastMappedBytes());
currentlyAllocatedFastMemories.fetch_sub(1, std::memory_order_acq_rel);
return;
}
case MemoryMode::BoundsChecking:
dataLogLnIf(verbose, "relinquishFastMemory freeing slow memory ", RawPointer(memory));
munmapBytes(memory, mappedCapacity);
currentSlowMemoryCapacity.fetch_sub(mappedCapacity, std::memory_order_acq_rel);
return;
case MemoryMode::NumberOfMemoryModes:
break;
}
RELEASE_ASSERT_NOT_REACHED();
}
bool makeNewMemoryReadWriteOrRelinquish(void* memory, size_t initialBytes, size_t mappedCapacityBytes, MemoryMode mode)
{
ASSERT(memory && initialBytes <= mappedCapacityBytes);
if (initialBytes) {
dataLogLnIf(verbose, "Marking WebAssembly memory's ", RawPointer(memory), "'s initial ", initialBytes, " bytes as read+write");
if (mprotect(memory, initialBytes, PROT_READ | PROT_WRITE)) {
const char* why = strerror(errno);
dataLogLnIf(verbose, "Failed making memory ", RawPointer(memory), " readable and writable: ", why);
relinquishMemory(memory, 0, mappedCapacityBytes, mode);
return false;
}
}
return true;
}
}
const char* makeString(MemoryMode mode)
{
switch (mode) {
case MemoryMode::BoundsChecking: return "BoundsChecking";
case MemoryMode::Signaling: return "Signaling";
case MemoryMode::NumberOfMemoryModes: break;
}
RELEASE_ASSERT_NOT_REACHED();
return "";
}
void Memory::initializePreallocations()
{
if (UNLIKELY(!Options::useWebAssemblyFastMemory()))
return;
MonotonicTime startTime;
if (verbose)
startTime = MonotonicTime::now();
const size_t desiredFastMemories = std::min<size_t>(Options::webAssemblyFastMemoryPreallocateCount(), fastMemoryCacheHardLimit);
auto allocateContiguousFastMemories = [&] (size_t numContiguous) -> bool {
if (void *memory = mmapBytes(Memory::fastMappedBytes() * numContiguous)) {
for (size_t subMemory = 0; subMemory < numContiguous; ++subMemory) {
void* startAddress = reinterpret_cast<char*>(memory) + Memory::fastMappedBytes() * subMemory;
bool inserted = false;
for (size_t cacheEntry = 0; cacheEntry < fastMemoryCacheHardLimit; ++cacheEntry) {
if (fastMemoryCache[cacheEntry].load(std::memory_order_relaxed) == nullptr) {
fastMemoryCache[cacheEntry].store(startAddress, std::memory_order_relaxed);
inserted = true;
break;
}
}
RELEASE_ASSERT(inserted);
}
return true;
}
return false;
};
size_t fragments = 0;
size_t numFastMemories = 0;
size_t contiguousMemoryAllocationAttempt = desiredFastMemories;
while (numFastMemories != desiredFastMemories && contiguousMemoryAllocationAttempt != 0) {
if (allocateContiguousFastMemories(contiguousMemoryAllocationAttempt)) {
numFastMemories += contiguousMemoryAllocationAttempt;
contiguousMemoryAllocationAttempt = std::min(contiguousMemoryAllocationAttempt - 1, desiredFastMemories - numFastMemories);
} else
--contiguousMemoryAllocationAttempt;
++fragments;
}
fastMemoryPreallocateCount = numFastMemories;
currentlyAllocatedFastMemories.store(fastMemoryPreallocateCount, std::memory_order_relaxed);
observedMaximumFastMemory.store(fastMemoryPreallocateCount, std::memory_order_relaxed);
if (verbose) {
MonotonicTime endTime = MonotonicTime::now();
for (size_t cacheEntry = 0; cacheEntry < fastMemoryPreallocateCount; ++cacheEntry) {
void* startAddress = fastMemoryCache[cacheEntry].load(std::memory_order_relaxed);
ASSERT(startAddress);
dataLogLn("Pre-allocation of WebAssembly fast memory at ", RawPointer(startAddress));
}
dataLogLn("Pre-allocated ", fastMemoryPreallocateCount, " WebAssembly fast memories in ", fastMemoryPreallocateCount == 0 ? 0 : fragments, fragments == 1 ? " fragment, took " : " fragments, took ", endTime - startTime);
}
}
Memory::Memory(PageCount initial, PageCount maximum)
: m_initial(initial)
, m_maximum(maximum)
{
ASSERT(!initial.bytes());
ASSERT(m_mode == MemoryMode::BoundsChecking);
dataLogLnIf(verbose, "Memory::Memory allocating ", *this);
}
Memory::Memory(void* memory, PageCount initial, PageCount maximum, size_t mappedCapacity, MemoryMode mode)
: m_memory(memory)
, m_size(initial.bytes())
, m_initial(initial)
, m_maximum(maximum)
, m_mappedCapacity(mappedCapacity)
, m_mode(mode)
{
dataLogLnIf(verbose, "Memory::Memory allocating ", *this);
}
RefPtr<Memory> Memory::create(VM& vm, PageCount initial, PageCount maximum)
{
ASSERT(initial);
RELEASE_ASSERT(!maximum || maximum >= initial);
const size_t initialBytes = initial.bytes();
const size_t maximumBytes = maximum ? maximum.bytes() : 0;
size_t mappedCapacityBytes = 0;
MemoryMode mode;
if (UNLIKELY(!Thunks::singleton().stub(throwExceptionFromWasmThunkGenerator)))
return nullptr;
if (maximum && !maximumBytes) {
RELEASE_ASSERT(!initialBytes);
return adoptRef(new Memory(initial, maximum));
}
void* memory = nullptr;
memory = tryGetFastMemory(vm);
if (memory) {
mappedCapacityBytes = Memory::fastMappedBytes();
mode = MemoryMode::Signaling;
}
if (!memory && maximum) {
memory = tryGetSlowMemory(maximumBytes);
if (memory) {
mappedCapacityBytes = maximumBytes;
mode = MemoryMode::BoundsChecking;
}
}
if (!memory) {
if (!initialBytes)
return adoptRef(new Memory(initial, maximum));
memory = tryGetSlowMemory(initialBytes);
if (memory) {
mappedCapacityBytes = initialBytes;
mode = MemoryMode::BoundsChecking;
}
}
if (!memory)
return nullptr;
if (!makeNewMemoryReadWriteOrRelinquish(memory, initialBytes, mappedCapacityBytes, mode))
return nullptr;
return adoptRef(new Memory(memory, initial, maximum, mappedCapacityBytes, mode));
}
Memory::~Memory()
{
if (m_memory) {
dataLogLnIf(verbose, "Memory::~Memory ", *this);
relinquishMemory(m_memory, m_size, m_mappedCapacity, m_mode);
}
}
size_t Memory::fastMappedRedzoneBytes()
{
return static_cast<size_t>(PageCount::pageSize) * Options::webAssemblyFastMemoryRedzonePages();
}
size_t Memory::fastMappedBytes()
{
static_assert(sizeof(uint64_t) == sizeof(size_t), "We rely on allowing the maximum size of Memory we map to be 2^32 + redzone which is larger than fits in a 32-bit integer that we'd pass to mprotect if this didn't hold.");
return static_cast<size_t>(std::numeric_limits<uint32_t>::max()) + fastMappedRedzoneBytes();
}
size_t Memory::maxFastMemoryCount()
{
return observedMaximumFastMemory.load(std::memory_order_relaxed);
}
bool Memory::addressIsInActiveFastMemory(void* address)
{
for (size_t idx = 0; idx < fastMemoryAllocationSoftLimit; ++idx) {
char* start = static_cast<char*>(currentlyActiveFastMemories[idx].load(std::memory_order_acquire));
if (start <= address && address <= start + fastMappedBytes())
return true;
}
return false;
}
bool Memory::grow(PageCount newSize)
{
RELEASE_ASSERT(newSize > PageCount::fromBytes(m_size));
dataLogLnIf(verbose, "Memory::grow to ", newSize, " from ", *this);
if (maximum() && newSize > maximum())
return false;
size_t desiredSize = newSize.bytes();
switch (mode()) {
case MemoryMode::BoundsChecking:
RELEASE_ASSERT(maximum().bytes() != 0);
break;
case MemoryMode::Signaling:
RELEASE_ASSERT(m_memory);
break;
case MemoryMode::NumberOfMemoryModes:
RELEASE_ASSERT_NOT_REACHED();
}
if (m_memory && desiredSize <= m_mappedCapacity) {
uint8_t* startAddress = static_cast<uint8_t*>(m_memory) + m_size;
size_t extraBytes = desiredSize - m_size;
RELEASE_ASSERT(extraBytes);
dataLogLnIf(verbose, "Marking WebAssembly memory's ", RawPointer(m_memory), " as read+write in range [", RawPointer(startAddress), ", ", RawPointer(startAddress + extraBytes), ")");
if (mprotect(startAddress, extraBytes, PROT_READ | PROT_WRITE)) {
dataLogLnIf(verbose, "Memory::grow in-place failed ", *this);
return false;
}
m_size = desiredSize;
dataLogLnIf(verbose, "Memory::grow in-place ", *this);
return true;
}
RELEASE_ASSERT(mode() != MemoryMode::Signaling);
void* newMemory = tryGetSlowMemory(desiredSize);
if (!newMemory)
return false;
if (!makeNewMemoryReadWriteOrRelinquish(newMemory, desiredSize, desiredSize, mode()))
return false;
if (m_memory) {
memcpy(newMemory, m_memory, m_size);
relinquishMemory(m_memory, m_size, m_size, m_mode);
}
m_memory = newMemory;
m_mappedCapacity = desiredSize;
m_size = desiredSize;
dataLogLnIf(verbose, "Memory::grow ", *this);
return true;
}
void Memory::dump(PrintStream& out) const
{
out.print("Memory at ", RawPointer(m_memory), ", size ", m_size, "B capacity ", m_mappedCapacity, "B, initial ", m_initial, " maximum ", m_maximum, " mode ", makeString(m_mode));
}
}
}
#endif // ENABLE(WEBASSEMBLY)