#pragma once
#include "CellAttributes.h"
#include "DestructionMode.h"
#include "HeapCell.h"
#include "IterationStatus.h"
#include "WeakSet.h"
#include <wtf/Atomics.h>
#include <wtf/Bitmap.h>
#include <wtf/HashFunctions.h>
#include <wtf/CountingLock.h>
#include <wtf/StdLibExtras.h>
namespace JSC {
class AlignedMemoryAllocator;
class FreeList;
class Heap;
class JSCell;
class BlockDirectory;
class MarkedSpace;
class SlotVisitor;
class Subspace;
typedef uint32_t HeapVersion;
class MarkedBlock {
WTF_MAKE_NONCOPYABLE(MarkedBlock);
friend class LLIntOffsetsExtractor;
friend struct VerifyMarked;
public:
class Footer;
class Handle;
private:
friend class Footer;
friend class Handle;
public:
static constexpr size_t atomSize = 16;
#if CPU(PPC64) || CPU(PPC64LE) || CPU(PPC) || CPU(UNKNOWN)
static constexpr size_t blockSize = 64 * KB;
#else
static constexpr size_t blockSize = 16 * KB;
#endif
static constexpr size_t blockMask = ~(blockSize - 1);
static constexpr size_t atomsPerBlock = blockSize / atomSize;
static_assert(!(MarkedBlock::atomSize & (MarkedBlock::atomSize - 1)), "MarkedBlock::atomSize must be a power of two.");
static_assert(!(MarkedBlock::blockSize & (MarkedBlock::blockSize - 1)), "MarkedBlock::blockSize must be a power of two.");
struct VoidFunctor {
typedef void ReturnType;
void returnValue() { }
};
class CountFunctor {
public:
typedef size_t ReturnType;
CountFunctor() : m_count(0) { }
void count(size_t count) const { m_count += count; }
ReturnType returnValue() const { return m_count; }
private:
mutable ReturnType m_count;
};
class Handle {
WTF_MAKE_NONCOPYABLE(Handle);
WTF_MAKE_FAST_ALLOCATED;
friend class LLIntOffsetsExtractor;
friend class MarkedBlock;
friend struct VerifyMarked;
public:
~Handle();
MarkedBlock& block();
MarkedBlock::Footer& blockFooter();
void* cellAlign(void*);
bool isEmpty();
void lastChanceToFinalize();
BlockDirectory* directory() const;
Subspace* subspace() const;
AlignedMemoryAllocator* alignedMemoryAllocator() const;
Heap* heap() const;
inline MarkedSpace* space() const;
VM* vm() const;
WeakSet& weakSet();
enum SweepMode { SweepOnly, SweepToFreeList };
void sweep(FreeList*);
template<typename DestroyFunc>
void finishSweepKnowingHeapCellType(FreeList*, const DestroyFunc&);
void unsweepWithNoNewlyAllocated();
void zap(const FreeList&);
void shrink();
void visitWeakSet(SlotVisitor&);
void reapWeakSet();
void didConsumeFreeList(); void stopAllocating(const FreeList&);
void resumeAllocating(FreeList&);
size_t cellSize();
inline unsigned cellsPerBlock();
const CellAttributes& attributes() const;
DestructionMode destruction() const;
bool needsDestruction() const;
HeapCell::Kind cellKind() const;
size_t markCount();
size_t size();
bool isAllocated();
bool isLive(HeapVersion markingVersion, HeapVersion newlyAllocatedVersion, bool isMarking, const HeapCell*);
inline bool isLiveCell(HeapVersion markingVersion, HeapVersion newlyAllocatedVersion, bool isMarking, const void*);
bool isLive(const HeapCell*);
bool isLiveCell(const void*);
bool isFreeListedCell(const void* target) const;
template <typename Functor> IterationStatus forEachCell(const Functor&);
template <typename Functor> inline IterationStatus forEachLiveCell(const Functor&);
template <typename Functor> inline IterationStatus forEachDeadCell(const Functor&);
template <typename Functor> inline IterationStatus forEachMarkedCell(const Functor&);
JS_EXPORT_PRIVATE bool areMarksStale();
bool areMarksStaleForSweep();
void assertMarksNotStale();
bool isFreeListed() const { return m_isFreeListed; }
size_t index() const { return m_index; }
void removeFromDirectory();
void didAddToDirectory(BlockDirectory*, size_t index);
void didRemoveFromDirectory();
void dumpState(PrintStream&);
private:
Handle(Heap&, AlignedMemoryAllocator*, void*);
enum SweepDestructionMode { BlockHasNoDestructors, BlockHasDestructors, BlockHasDestructorsAndCollectorIsRunning };
enum ScribbleMode { DontScribble, Scribble };
enum EmptyMode { IsEmpty, NotEmpty };
enum NewlyAllocatedMode { HasNewlyAllocated, DoesNotHaveNewlyAllocated };
enum MarksMode { MarksStale, MarksNotStale };
SweepDestructionMode sweepDestructionMode();
EmptyMode emptyMode();
ScribbleMode scribbleMode();
NewlyAllocatedMode newlyAllocatedMode();
MarksMode marksMode();
template<bool, EmptyMode, SweepMode, SweepDestructionMode, ScribbleMode, NewlyAllocatedMode, MarksMode, typename DestroyFunc>
void specializedSweep(FreeList*, EmptyMode, SweepMode, SweepDestructionMode, ScribbleMode, NewlyAllocatedMode, MarksMode, const DestroyFunc&);
void setIsFreeListed();
MarkedBlock::Handle* m_prev { nullptr };
MarkedBlock::Handle* m_next { nullptr };
size_t m_atomsPerCell { std::numeric_limits<size_t>::max() };
size_t m_endAtom { std::numeric_limits<size_t>::max() };
CellAttributes m_attributes;
bool m_isFreeListed { false };
AlignedMemoryAllocator* m_alignedMemoryAllocator { nullptr };
BlockDirectory* m_directory { nullptr };
size_t m_index { std::numeric_limits<size_t>::max() };
WeakSet m_weakSet;
MarkedBlock* m_block { nullptr };
};
private:
static constexpr size_t atomAlignmentMask = atomSize - 1;
typedef char Atom[atomSize];
public:
class Footer {
public:
Footer(VM&, Handle&);
~Footer();
private:
friend class LLIntOffsetsExtractor;
friend class MarkedBlock;
Handle& m_handle;
VM* m_vm;
Subspace* m_subspace;
CountingLock m_lock;
int16_t m_biasedMarkCount;
int16_t m_markCountBias;
HeapVersion m_markingVersion;
HeapVersion m_newlyAllocatedVersion;
Bitmap<atomsPerBlock> m_marks;
Bitmap<atomsPerBlock> m_newlyAllocated;
};
private:
Footer& footer();
const Footer& footer() const;
public:
static constexpr size_t endAtom = (blockSize - sizeof(Footer)) / atomSize;
static constexpr size_t payloadSize = endAtom * atomSize;
static constexpr size_t footerSize = blockSize - payloadSize;
static_assert(payloadSize == ((blockSize - sizeof(MarkedBlock::Footer)) & ~(atomSize - 1)), "Payload size computed the alternate way should give the same result");
static_assert(footerSize >= minimumDistanceBetweenCellsFromDifferentOrigins, "Footer is not big enough to create the necessary distance between objects from different origins");
static MarkedBlock::Handle* tryCreate(Heap&, AlignedMemoryAllocator*);
Handle& handle();
const Handle& handle() const;
VM* vm() const;
inline Heap* heap() const;
inline MarkedSpace* space() const;
static bool isAtomAligned(const void*);
static MarkedBlock* blockFor(const void*);
size_t atomNumber(const void*);
size_t markCount();
bool isMarked(const void*);
bool isMarked(HeapVersion markingVersion, const void*);
bool isMarked(const void*, Dependency);
bool testAndSetMarked(const void*, Dependency);
bool isAtom(const void*);
void clearMarked(const void*);
bool isNewlyAllocated(const void*);
void setNewlyAllocated(const void*);
void clearNewlyAllocated(const void*);
const Bitmap<atomsPerBlock>& newlyAllocated() const;
HeapVersion newlyAllocatedVersion() const { return footer().m_newlyAllocatedVersion; }
inline bool isNewlyAllocatedStale() const;
inline bool hasAnyNewlyAllocated();
void resetAllocated();
size_t cellSize();
const CellAttributes& attributes() const;
bool hasAnyMarked() const;
void noteMarked();
#if ASSERT_DISABLED
void assertValidCell(VM&, HeapCell*) const { }
#else
void assertValidCell(VM&, HeapCell*) const;
#endif
WeakSet& weakSet();
JS_EXPORT_PRIVATE bool areMarksStale();
bool areMarksStale(HeapVersion markingVersion);
Dependency aboutToMark(HeapVersion markingVersion);
#if ASSERT_DISABLED
void assertMarksNotStale() { }
#else
JS_EXPORT_PRIVATE void assertMarksNotStale();
#endif
void resetMarks();
bool isMarkedRaw(const void* p);
HeapVersion markingVersion() const { return footer().m_markingVersion; }
const Bitmap<atomsPerBlock>& marks() const;
CountingLock& lock() { return footer().m_lock; }
Subspace* subspace() const { return footer().m_subspace; }
static constexpr size_t offsetOfFooter = endAtom * atomSize;
private:
MarkedBlock(VM&, Handle&);
~MarkedBlock();
Atom* atoms();
JS_EXPORT_PRIVATE void aboutToMarkSlow(HeapVersion markingVersion);
void clearHasAnyMarked();
void noteMarkedSlow();
inline bool marksConveyLivenessDuringMarking(HeapVersion markingVersion);
inline bool marksConveyLivenessDuringMarking(HeapVersion myMarkingVersion, HeapVersion markingVersion);
};
inline MarkedBlock::Footer& MarkedBlock::footer()
{
return *bitwise_cast<MarkedBlock::Footer*>(atoms() + endAtom);
}
inline const MarkedBlock::Footer& MarkedBlock::footer() const
{
return const_cast<MarkedBlock*>(this)->footer();
}
inline MarkedBlock::Handle& MarkedBlock::handle()
{
return footer().m_handle;
}
inline const MarkedBlock::Handle& MarkedBlock::handle() const
{
return const_cast<MarkedBlock*>(this)->handle();
}
inline MarkedBlock& MarkedBlock::Handle::block()
{
return *m_block;
}
inline MarkedBlock::Footer& MarkedBlock::Handle::blockFooter()
{
return block().footer();
}
inline MarkedBlock::Atom* MarkedBlock::atoms()
{
return reinterpret_cast<Atom*>(this);
}
inline bool MarkedBlock::isAtomAligned(const void* p)
{
return !(reinterpret_cast<uintptr_t>(p) & atomAlignmentMask);
}
inline void* MarkedBlock::Handle::cellAlign(void* p)
{
uintptr_t base = reinterpret_cast<uintptr_t>(block().atoms());
uintptr_t bits = reinterpret_cast<uintptr_t>(p);
bits -= base;
bits -= bits % cellSize();
bits += base;
return reinterpret_cast<void*>(bits);
}
inline MarkedBlock* MarkedBlock::blockFor(const void* p)
{
return reinterpret_cast<MarkedBlock*>(reinterpret_cast<uintptr_t>(p) & blockMask);
}
inline BlockDirectory* MarkedBlock::Handle::directory() const
{
return m_directory;
}
inline AlignedMemoryAllocator* MarkedBlock::Handle::alignedMemoryAllocator() const
{
return m_alignedMemoryAllocator;
}
inline Heap* MarkedBlock::Handle::heap() const
{
return m_weakSet.heap();
}
inline VM* MarkedBlock::Handle::vm() const
{
return m_weakSet.vm();
}
inline VM* MarkedBlock::vm() const
{
return footer().m_vm;
}
inline WeakSet& MarkedBlock::Handle::weakSet()
{
return m_weakSet;
}
inline WeakSet& MarkedBlock::weakSet()
{
return handle().weakSet();
}
inline void MarkedBlock::Handle::shrink()
{
m_weakSet.shrink();
}
inline void MarkedBlock::Handle::visitWeakSet(SlotVisitor& visitor)
{
return m_weakSet.visit(visitor);
}
inline void MarkedBlock::Handle::reapWeakSet()
{
m_weakSet.reap();
}
inline size_t MarkedBlock::Handle::cellSize()
{
return m_atomsPerCell * atomSize;
}
inline size_t MarkedBlock::cellSize()
{
return handle().cellSize();
}
inline const CellAttributes& MarkedBlock::Handle::attributes() const
{
return m_attributes;
}
inline const CellAttributes& MarkedBlock::attributes() const
{
return handle().attributes();
}
inline bool MarkedBlock::Handle::needsDestruction() const
{
return m_attributes.destruction == NeedsDestruction;
}
inline DestructionMode MarkedBlock::Handle::destruction() const
{
return m_attributes.destruction;
}
inline HeapCell::Kind MarkedBlock::Handle::cellKind() const
{
return m_attributes.cellKind;
}
inline size_t MarkedBlock::Handle::markCount()
{
return m_block->markCount();
}
inline size_t MarkedBlock::Handle::size()
{
return markCount() * cellSize();
}
inline size_t MarkedBlock::atomNumber(const void* p)
{
return (reinterpret_cast<uintptr_t>(p) - reinterpret_cast<uintptr_t>(this)) / atomSize;
}
inline bool MarkedBlock::areMarksStale(HeapVersion markingVersion)
{
return markingVersion != footer().m_markingVersion;
}
inline Dependency MarkedBlock::aboutToMark(HeapVersion markingVersion)
{
HeapVersion version = footer().m_markingVersion;
if (UNLIKELY(version != markingVersion))
aboutToMarkSlow(markingVersion);
return Dependency::fence(version);
}
inline void MarkedBlock::Handle::assertMarksNotStale()
{
block().assertMarksNotStale();
}
inline bool MarkedBlock::isMarkedRaw(const void* p)
{
return footer().m_marks.get(atomNumber(p));
}
inline bool MarkedBlock::isMarked(HeapVersion markingVersion, const void* p)
{
HeapVersion version = footer().m_markingVersion;
if (UNLIKELY(version != markingVersion))
return false;
return footer().m_marks.get(atomNumber(p), Dependency::fence(version));
}
inline bool MarkedBlock::isMarked(const void* p, Dependency dependency)
{
assertMarksNotStale();
return footer().m_marks.get(atomNumber(p), dependency);
}
inline bool MarkedBlock::testAndSetMarked(const void* p, Dependency dependency)
{
assertMarksNotStale();
return footer().m_marks.concurrentTestAndSet(atomNumber(p), dependency);
}
inline const Bitmap<MarkedBlock::atomsPerBlock>& MarkedBlock::marks() const
{
return footer().m_marks;
}
inline bool MarkedBlock::isNewlyAllocated(const void* p)
{
return footer().m_newlyAllocated.get(atomNumber(p));
}
inline void MarkedBlock::setNewlyAllocated(const void* p)
{
footer().m_newlyAllocated.set(atomNumber(p));
}
inline void MarkedBlock::clearNewlyAllocated(const void* p)
{
footer().m_newlyAllocated.clear(atomNumber(p));
}
inline const Bitmap<MarkedBlock::atomsPerBlock>& MarkedBlock::newlyAllocated() const
{
return footer().m_newlyAllocated;
}
inline bool MarkedBlock::isAtom(const void* p)
{
ASSERT(MarkedBlock::isAtomAligned(p));
size_t atomNumber = this->atomNumber(p);
if (atomNumber % handle().m_atomsPerCell) return false;
if (atomNumber >= handle().m_endAtom) return false;
return true;
}
template <typename Functor>
inline IterationStatus MarkedBlock::Handle::forEachCell(const Functor& functor)
{
HeapCell::Kind kind = m_attributes.cellKind;
for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) {
HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(&m_block->atoms()[i]);
if (functor(cell, kind) == IterationStatus::Done)
return IterationStatus::Done;
}
return IterationStatus::Continue;
}
inline bool MarkedBlock::hasAnyMarked() const
{
return footer().m_biasedMarkCount != footer().m_markCountBias;
}
inline void MarkedBlock::noteMarked()
{
int16_t biasedMarkCount = footer().m_biasedMarkCount;
++biasedMarkCount;
footer().m_biasedMarkCount = biasedMarkCount;
if (UNLIKELY(!biasedMarkCount))
noteMarkedSlow();
}
}
namespace WTF {
struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
static unsigned hash(JSC::MarkedBlock* const& key)
{
return reinterpret_cast<uintptr_t>(key) / JSC::MarkedBlock::blockSize;
}
};
template<> struct DefaultHash<JSC::MarkedBlock*> {
typedef MarkedBlockHash Hash;
};
void printInternal(PrintStream& out, JSC::MarkedBlock::Handle::SweepMode);
}