DFGArgumentsEliminationPhase.cpp [plain text]
#include "config.h"
#include "DFGArgumentsEliminationPhase.h"
#if ENABLE(DFG_JIT)
#include "BytecodeLivenessAnalysisInlines.h"
#include "DFGArgumentsUtilities.h"
#include "DFGBasicBlockInlines.h"
#include "DFGBlockMapInlines.h"
#include "DFGClobberize.h"
#include "DFGCombinedLiveness.h"
#include "DFGForAllKills.h"
#include "DFGGraph.h"
#include "DFGInsertionSet.h"
#include "DFGLivenessAnalysisPhase.h"
#include "DFGOSRAvailabilityAnalysisPhase.h"
#include "DFGPhase.h"
#include "JSCInlines.h"
#include <wtf/HashMap.h>
#include <wtf/HashSet.h>
#include <wtf/ListDump.h>
namespace JSC { namespace DFG {
namespace {
bool verbose = false;
class ArgumentsEliminationPhase : public Phase {
public:
ArgumentsEliminationPhase(Graph& graph)
: Phase(graph, "arguments elimination")
{
}
bool run()
{
DFG_ASSERT(m_graph, nullptr, m_graph.m_form == SSA);
if (verbose) {
dataLog("Graph before arguments elimination:\n");
m_graph.dump();
}
identifyCandidates();
if (m_candidates.isEmpty())
return false;
eliminateCandidatesThatEscape();
if (m_candidates.isEmpty())
return false;
eliminateCandidatesThatInterfere();
if (m_candidates.isEmpty())
return false;
transform();
return true;
}
private:
void identifyCandidates()
{
for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
for (Node* node : *block) {
switch (node->op()) {
case CreateDirectArguments:
case CreateClonedArguments:
m_candidates.add(node);
break;
case CreateScopedArguments:
break;
default:
break;
}
}
}
if (verbose)
dataLog("Candidates: ", listDump(m_candidates), "\n");
}
void eliminateCandidatesThatEscape()
{
auto escape = [&] (Edge edge) {
if (!edge)
return;
m_candidates.remove(edge.node());
};
auto escapeBasedOnArrayMode = [&] (ArrayMode mode, Edge edge) {
switch (mode.type()) {
case Array::DirectArguments:
if (edge->op() != CreateDirectArguments)
escape(edge);
break;
case Array::Int32:
case Array::Double:
case Array::Contiguous:
if (edge->op() != CreateClonedArguments)
escape(edge);
break;
default:
escape(edge);
break;
}
};
for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
for (Node* node : *block) {
switch (node->op()) {
case GetFromArguments:
DFG_ASSERT(m_graph, node, node->child1()->op() == CreateDirectArguments);
break;
case GetByVal:
escapeBasedOnArrayMode(node->arrayMode(), node->child1());
escape(node->child2());
escape(node->child3());
break;
case GetArrayLength:
escapeBasedOnArrayMode(node->arrayMode(), node->child1());
escape(node->child2());
break;
case LoadVarargs:
break;
case CallVarargs:
case ConstructVarargs:
escape(node->child1());
escape(node->child3());
break;
case Check:
m_graph.doToChildren(
node,
[&] (Edge edge) {
if (edge.willNotHaveCheck())
return;
if (alreadyChecked(edge.useKind(), SpecObject))
return;
escape(edge);
});
break;
case MovHint:
case PutHint:
break;
case GetButterfly:
break;
case CheckArray:
escapeBasedOnArrayMode(node->arrayMode(), node->child1());
break;
default:
m_graph.doToChildren(node, escape);
break;
}
}
}
if (verbose)
dataLog("After escape analysis: ", listDump(m_candidates), "\n");
}
void eliminateCandidatesThatInterfere()
{
performLivenessAnalysis(m_graph);
performOSRAvailabilityAnalysis(m_graph);
m_graph.initializeNodeOwners();
CombinedLiveness combinedLiveness(m_graph);
BlockMap<Operands<bool>> clobberedByBlock(m_graph);
for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
Operands<bool>& clobberedByThisBlock = clobberedByBlock[block];
clobberedByThisBlock = Operands<bool>(OperandsLike, m_graph.block(0)->variablesAtHead);
for (Node* node : *block) {
clobberize(
m_graph, node, NoOpClobberize(),
[&] (AbstractHeap heap) {
if (heap.kind() != Stack) {
ASSERT(!heap.overlaps(Stack));
return;
}
ASSERT(!heap.payload().isTop());
VirtualRegister reg(heap.payload().value32());
clobberedByThisBlock.operand(reg) = true;
},
NoOpClobberize());
}
}
for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
if (m_candidates.isEmpty())
return;
bool writesToStack = false;
for (unsigned i = clobberedByBlock[block].size(); i--;) {
if (clobberedByBlock[block][i]) {
writesToStack = true;
break;
}
}
if (!writesToStack)
continue;
forAllKillsInBlock(
m_graph, combinedLiveness, block,
[&] (unsigned nodeIndex, Node* candidate) {
if (!m_candidates.contains(candidate))
return;
bool isClobberedByBlock = false;
Operands<bool>& clobberedByThisBlock = clobberedByBlock[block];
if (InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame) {
if (inlineCallFrame->isVarargs()) {
isClobberedByBlock |= clobberedByThisBlock.operand(
inlineCallFrame->stackOffset + JSStack::ArgumentCount);
}
if (!isClobberedByBlock || inlineCallFrame->isClosureCall) {
isClobberedByBlock |= clobberedByThisBlock.operand(
inlineCallFrame->stackOffset + JSStack::Callee);
}
if (!isClobberedByBlock) {
for (unsigned i = 0; i < inlineCallFrame->arguments.size() - 1; ++i) {
VirtualRegister reg =
VirtualRegister(inlineCallFrame->stackOffset) +
CallFrame::argumentOffset(i);
if (clobberedByThisBlock.operand(reg)) {
isClobberedByBlock = true;
break;
}
}
}
} else {
for (unsigned i = 1; i < static_cast<unsigned>(codeBlock()->numParameters()); ++i) {
if (clobberedByThisBlock.argument(i)) {
isClobberedByBlock = true;
break;
}
}
}
if (!isClobberedByBlock)
return;
if (nodeIndex == block->size() && candidate->owner != block) {
m_candidates.remove(candidate);
return;
}
while (nodeIndex--) {
Node* node = block->at(nodeIndex);
if (node == candidate)
break;
bool found = false;
clobberize(
m_graph, node, NoOpClobberize(),
[&] (AbstractHeap heap) {
if (heap.kind() == Stack && !heap.payload().isTop()) {
if (argumentsInvolveStackSlot(candidate, VirtualRegister(heap.payload().value32())))
found = true;
return;
}
if (heap.overlaps(Stack))
found = true;
},
NoOpClobberize());
if (found) {
m_candidates.remove(candidate);
return;
}
}
});
}
if (verbose)
dataLog("After interference analysis: ", listDump(m_candidates), "\n");
}
void transform()
{
InsertionSet insertionSet(m_graph);
for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
Node* node = block->at(nodeIndex);
auto getArrayLength = [&] (Node* candidate) -> Node* {
return emitCodeToGetArgumentsArrayLength(
insertionSet, candidate, nodeIndex, node->origin);
};
switch (node->op()) {
case CreateDirectArguments:
if (!m_candidates.contains(node))
break;
node->setOpAndDefaultFlags(PhantomDirectArguments);
break;
case CreateClonedArguments:
if (!m_candidates.contains(node))
break;
node->setOpAndDefaultFlags(PhantomClonedArguments);
break;
case GetFromArguments: {
Node* candidate = node->child1().node();
if (!m_candidates.contains(candidate))
break;
DFG_ASSERT(
m_graph, node,
node->child1()->op() == CreateDirectArguments
|| node->child1()->op() == PhantomDirectArguments);
VirtualRegister reg =
virtualRegisterForArgument(node->capturedArgumentsOffset().offset() + 1) +
node->origin.semantic.stackOffset();
StackAccessData* data = m_graph.m_stackAccessData.add(reg, FlushedJSValue);
node->convertToGetStack(data);
break;
}
case GetArrayLength: {
Node* candidate = node->child1().node();
if (!m_candidates.contains(candidate))
break;
node->convertToIdentityOn(getArrayLength(candidate));
break;
}
case GetByVal: {
Node* candidate = node->child1().node();
if (!m_candidates.contains(candidate))
break;
Node* result = nullptr;
if (node->child2()->isInt32Constant()) {
unsigned index = node->child2()->asUInt32();
InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame;
bool safeToGetStack;
if (inlineCallFrame)
safeToGetStack = index < inlineCallFrame->arguments.size() - 1;
else {
safeToGetStack =
index < static_cast<unsigned>(codeBlock()->numParameters()) - 1;
}
if (safeToGetStack) {
StackAccessData* data;
VirtualRegister arg = virtualRegisterForArgument(index + 1);
if (inlineCallFrame)
arg += inlineCallFrame->stackOffset;
data = m_graph.m_stackAccessData.add(arg, FlushedJSValue);
if (!inlineCallFrame || inlineCallFrame->isVarargs()
|| index >= inlineCallFrame->arguments.size() - 1) {
insertionSet.insertNode(
nodeIndex, SpecNone, CheckInBounds, node->origin,
node->child2(), Edge(getArrayLength(candidate), Int32Use));
}
result = insertionSet.insertNode(
nodeIndex, node->prediction(), GetStack, node->origin, OpInfo(data));
}
}
if (!result) {
result = insertionSet.insertNode(
nodeIndex, node->prediction(), GetMyArgumentByVal, node->origin,
node->child1(), node->child2());
}
node->convertToIdentityOn(result);
break;
}
case LoadVarargs: {
Node* candidate = node->child1().node();
if (!m_candidates.contains(candidate))
break;
LoadVarargsData* varargsData = node->loadVarargsData();
InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame;
if (inlineCallFrame
&& !inlineCallFrame->isVarargs()
&& inlineCallFrame->arguments.size() - varargsData->offset <= varargsData->limit) {
Node* argumentCount = insertionSet.insertConstant(
nodeIndex, node->origin,
jsNumber(inlineCallFrame->arguments.size() - varargsData->offset));
insertionSet.insertNode(
nodeIndex, SpecNone, MovHint, node->origin,
OpInfo(varargsData->count.offset()), Edge(argumentCount));
insertionSet.insertNode(
nodeIndex, SpecNone, PutStack, node->origin,
OpInfo(m_graph.m_stackAccessData.add(varargsData->count, FlushedInt32)),
Edge(argumentCount, Int32Use));
DFG_ASSERT(m_graph, node, varargsData->limit - 1 >= varargsData->mandatoryMinimum);
unsigned limit = varargsData->limit - 1;
Node* undefined = nullptr;
for (unsigned storeIndex = 0; storeIndex < limit; ++storeIndex) {
unsigned loadIndex = storeIndex + varargsData->offset;
Node* value;
if (loadIndex + 1 < inlineCallFrame->arguments.size()) {
VirtualRegister reg =
virtualRegisterForArgument(loadIndex + 1) +
inlineCallFrame->stackOffset;
StackAccessData* data = m_graph.m_stackAccessData.add(
reg, FlushedJSValue);
value = insertionSet.insertNode(
nodeIndex, SpecNone, GetStack, node->origin, OpInfo(data));
} else {
if (!undefined) {
undefined = insertionSet.insertConstant(
nodeIndex, node->origin, jsUndefined());
}
value = undefined;
}
VirtualRegister reg = varargsData->start + storeIndex;
StackAccessData* data =
m_graph.m_stackAccessData.add(reg, FlushedJSValue);
insertionSet.insertNode(
nodeIndex, SpecNone, MovHint, node->origin, OpInfo(reg.offset()),
Edge(value));
insertionSet.insertNode(
nodeIndex, SpecNone, PutStack, node->origin, OpInfo(data),
Edge(value));
}
node->remove();
break;
}
node->setOpAndDefaultFlags(ForwardVarargs);
break;
}
case CallVarargs:
case ConstructVarargs: {
Node* candidate = node->child2().node();
if (!m_candidates.contains(candidate))
break;
CallVarargsData* varargsData = node->callVarargsData();
InlineCallFrame* inlineCallFrame = candidate->origin.semantic.inlineCallFrame;
if (inlineCallFrame && !inlineCallFrame->isVarargs()) {
Vector<Node*> arguments;
for (unsigned i = 1 + varargsData->firstVarArgOffset; i < inlineCallFrame->arguments.size(); ++i) {
StackAccessData* data = m_graph.m_stackAccessData.add(
virtualRegisterForArgument(i) + inlineCallFrame->stackOffset,
FlushedJSValue);
Node* value = insertionSet.insertNode(
nodeIndex, SpecNone, GetStack, node->origin, OpInfo(data));
arguments.append(value);
}
unsigned firstChild = m_graph.m_varArgChildren.size();
m_graph.m_varArgChildren.append(node->child1());
m_graph.m_varArgChildren.append(node->child3());
for (Node* argument : arguments)
m_graph.m_varArgChildren.append(Edge(argument));
node->setOpAndDefaultFlags(
node->op() == CallVarargs ? Call : Construct);
node->children = AdjacencyList(
AdjacencyList::Variable,
firstChild, m_graph.m_varArgChildren.size() - firstChild);
break;
}
node->setOpAndDefaultFlags(
node->op() == CallVarargs ? CallForwardVarargs : ConstructForwardVarargs);
break;
}
case CheckArray:
case GetButterfly: {
if (!m_candidates.contains(node->child1().node()))
break;
node->remove();
break;
}
default:
break;
}
}
insertionSet.execute(block);
}
}
HashSet<Node*> m_candidates;
};
}
bool performArgumentsElimination(Graph& graph)
{
SamplingRegion samplingRegion("DFG Arguments Elimination Phase");
return runPhase<ArgumentsEliminationPhase>(graph);
}
} }
#endif // ENABLE(DFG_JIT)