DFGPreciseLocalClobberize.h [plain text]
#ifndef DFGPreciseLocalClobberize_h
#define DFGPreciseLocalClobberize_h
#if ENABLE(DFG_JIT)
#include "DFGClobberize.h"
#include "DFGMayExit.h"
namespace JSC { namespace DFG {
template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor>
class PreciseLocalClobberizeAdaptor {
public:
PreciseLocalClobberizeAdaptor(
Graph& graph, Node* node,
const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def)
: m_graph(graph)
, m_node(node)
, m_read(read)
, m_unconditionalWrite(write)
, m_def(def)
{
}
void read(AbstractHeap heap)
{
if (heap.kind() == Stack) {
if (heap.payload().isTop()) {
readTop();
return;
}
callIfAppropriate(m_read, VirtualRegister(heap.payload().value32()));
return;
}
if (heap.overlaps(Stack)) {
readTop();
return;
}
}
void write(AbstractHeap heap)
{
if (heap.kind() == Stack) {
RELEASE_ASSERT(!heap.payload().isTop());
callIfAppropriate(m_unconditionalWrite, VirtualRegister(heap.payload().value32()));
return;
}
RELEASE_ASSERT(!heap.overlaps(Stack));
}
void def(PureValue)
{
}
void def(HeapLocation location, LazyNode node)
{
if (location.kind() != StackLoc)
return;
RELEASE_ASSERT(location.heap().kind() == Stack);
m_def(VirtualRegister(location.heap().payload().value32()), node);
}
private:
template<typename Functor>
void callIfAppropriate(const Functor& functor, VirtualRegister operand)
{
if (operand.isLocal() && static_cast<unsigned>(operand.toLocal()) >= m_graph.block(0)->variablesAtHead.numberOfLocals())
return;
if (operand.isArgument() && !operand.isHeader() && static_cast<unsigned>(operand.toArgument()) >= m_graph.block(0)->variablesAtHead.numberOfArguments())
return;
functor(operand);
}
void readTop()
{
switch (m_node->op()) {
case GetMyArgumentByVal:
case ForwardVarargs:
case CallForwardVarargs:
case ConstructForwardVarargs: {
InlineCallFrame* inlineCallFrame = m_node->child1()->origin.semantic.inlineCallFrame;
if (!inlineCallFrame) {
for (unsigned i = m_graph.m_codeBlock->numParameters(); i-- > 1;)
m_read(virtualRegisterForArgument(i));
m_read(VirtualRegister(JSStack::ArgumentCount));
break;
}
for (unsigned i = inlineCallFrame->arguments.size(); i-- > 1;)
m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(i).offset()));
if (inlineCallFrame->isVarargs())
m_read(VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount));
break;
}
default: {
for (unsigned i = m_graph.m_codeBlock->numParameters(); i-- > 1;)
m_read(virtualRegisterForArgument(i));
for (unsigned i = 0; i < JSStack::ThisArgument; ++i)
m_read(VirtualRegister(i));
for (InlineCallFrame* inlineCallFrame = m_node->origin.semantic.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) {
for (unsigned i = inlineCallFrame->arguments.size(); i-- > 1;)
m_read(VirtualRegister(inlineCallFrame->stackOffset + virtualRegisterForArgument(i).offset()));
if (inlineCallFrame->isClosureCall)
m_read(VirtualRegister(inlineCallFrame->stackOffset + JSStack::Callee));
if (inlineCallFrame->isVarargs())
m_read(VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount));
}
break;
} }
}
Graph& m_graph;
Node* m_node;
const ReadFunctor& m_read;
const WriteFunctor& m_unconditionalWrite;
const DefFunctor& m_def;
};
template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor>
void preciseLocalClobberize(
Graph& graph, Node* node,
const ReadFunctor& read, const WriteFunctor& write, const DefFunctor& def)
{
PreciseLocalClobberizeAdaptor<ReadFunctor, WriteFunctor, DefFunctor>
adaptor(graph, node, read, write, def);
clobberize(graph, node, adaptor);
}
} }
#endif // ENABLE(DFG_JIT)
#endif // DFGPreciseLocalClobberize_h