2023-01-27 21:28:45 +00:00
|
|
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
2023-02-03 12:34:12 +00:00
|
|
|
#include "Luau/IrAnalysis.h"
|
2023-01-27 21:28:45 +00:00
|
|
|
|
2023-03-03 13:45:38 +00:00
|
|
|
#include "Luau/DenseHash.h"
|
2023-02-03 12:34:12 +00:00
|
|
|
#include "Luau/IrData.h"
|
|
|
|
#include "Luau/IrUtils.h"
|
2023-01-27 21:28:45 +00:00
|
|
|
|
2023-03-10 19:20:04 +00:00
|
|
|
#include "lobject.h"
|
|
|
|
|
|
|
|
#include <bitset>
|
|
|
|
|
2023-01-27 21:28:45 +00:00
|
|
|
#include <stddef.h>
|
|
|
|
|
|
|
|
namespace Luau
|
|
|
|
{
|
|
|
|
namespace CodeGen
|
|
|
|
{
|
|
|
|
|
2023-02-10 18:50:54 +00:00
|
|
|
void updateUseCounts(IrFunction& function)
|
2023-01-27 21:28:45 +00:00
|
|
|
{
|
2023-02-10 18:50:54 +00:00
|
|
|
std::vector<IrBlock>& blocks = function.blocks;
|
|
|
|
std::vector<IrInst>& instructions = function.instructions;
|
|
|
|
|
|
|
|
for (IrBlock& block : blocks)
|
|
|
|
block.useCount = 0;
|
|
|
|
|
|
|
|
for (IrInst& inst : instructions)
|
|
|
|
inst.useCount = 0;
|
2023-01-27 21:28:45 +00:00
|
|
|
|
2023-02-10 18:50:54 +00:00
|
|
|
auto checkOp = [&](IrOp op) {
|
|
|
|
if (op.kind == IrOpKind::Inst)
|
|
|
|
{
|
|
|
|
IrInst& target = instructions[op.index];
|
|
|
|
LUAU_ASSERT(target.useCount < 0xffff);
|
|
|
|
target.useCount++;
|
|
|
|
}
|
|
|
|
else if (op.kind == IrOpKind::Block)
|
|
|
|
{
|
|
|
|
IrBlock& target = blocks[op.index];
|
|
|
|
LUAU_ASSERT(target.useCount < 0xffff);
|
|
|
|
target.useCount++;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
for (IrInst& inst : instructions)
|
|
|
|
{
|
|
|
|
checkOp(inst.a);
|
|
|
|
checkOp(inst.b);
|
|
|
|
checkOp(inst.c);
|
|
|
|
checkOp(inst.d);
|
|
|
|
checkOp(inst.e);
|
2023-02-24 18:24:22 +00:00
|
|
|
checkOp(inst.f);
|
2023-02-10 18:50:54 +00:00
|
|
|
}
|
2023-01-27 21:28:45 +00:00
|
|
|
}
|
|
|
|
|
2023-02-10 18:50:54 +00:00
|
|
|
void updateLastUseLocations(IrFunction& function)
|
2023-01-27 21:28:45 +00:00
|
|
|
{
|
|
|
|
std::vector<IrInst>& instructions = function.instructions;
|
|
|
|
|
|
|
|
for (IrInst& inst : instructions)
|
|
|
|
inst.lastUse = 0;
|
|
|
|
|
2023-02-10 18:50:54 +00:00
|
|
|
for (size_t instIdx = 0; instIdx < instructions.size(); ++instIdx)
|
2023-01-27 21:28:45 +00:00
|
|
|
{
|
2023-02-10 18:50:54 +00:00
|
|
|
IrInst& inst = instructions[instIdx];
|
2023-01-27 21:28:45 +00:00
|
|
|
|
2023-02-10 18:50:54 +00:00
|
|
|
auto checkOp = [&](IrOp op) {
|
2023-01-27 21:28:45 +00:00
|
|
|
if (op.kind == IrOpKind::Inst)
|
2023-02-10 18:50:54 +00:00
|
|
|
instructions[op.index].lastUse = uint32_t(instIdx);
|
2023-01-27 21:28:45 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
checkOp(inst.a);
|
|
|
|
checkOp(inst.b);
|
|
|
|
checkOp(inst.c);
|
|
|
|
checkOp(inst.d);
|
|
|
|
checkOp(inst.e);
|
2023-02-24 18:24:22 +00:00
|
|
|
checkOp(inst.f);
|
2023-01-27 21:28:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-03 13:45:38 +00:00
|
|
|
std::pair<uint32_t, uint32_t> getLiveInOutValueCount(IrFunction& function, IrBlock& block)
|
|
|
|
{
|
|
|
|
uint32_t liveIns = 0;
|
|
|
|
uint32_t liveOuts = 0;
|
|
|
|
|
|
|
|
auto checkOp = [&](IrOp op) {
|
|
|
|
if (op.kind == IrOpKind::Inst)
|
|
|
|
{
|
|
|
|
if (op.index >= block.start && op.index <= block.finish)
|
|
|
|
liveOuts--;
|
|
|
|
else
|
|
|
|
liveIns++;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++)
|
|
|
|
{
|
|
|
|
IrInst& inst = function.instructions[instIdx];
|
|
|
|
|
|
|
|
liveOuts += inst.useCount;
|
|
|
|
|
|
|
|
checkOp(inst.a);
|
|
|
|
checkOp(inst.b);
|
|
|
|
checkOp(inst.c);
|
|
|
|
checkOp(inst.d);
|
|
|
|
checkOp(inst.e);
|
|
|
|
checkOp(inst.f);
|
|
|
|
}
|
|
|
|
|
|
|
|
return std::make_pair(liveIns, liveOuts);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t getLiveInValueCount(IrFunction& function, IrBlock& block)
|
|
|
|
{
|
|
|
|
return getLiveInOutValueCount(function, block).first;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t getLiveOutValueCount(IrFunction& function, IrBlock& block)
|
|
|
|
{
|
|
|
|
return getLiveInOutValueCount(function, block).second;
|
|
|
|
}
|
|
|
|
|
2023-03-10 19:20:04 +00:00
|
|
|
static void requireVariadicSequence(RegisterSet& sourceRs, const RegisterSet& defRs, uint8_t varargStart)
|
|
|
|
{
|
|
|
|
if (!defRs.varargSeq)
|
|
|
|
{
|
2023-03-17 14:59:30 +00:00
|
|
|
// Peel away registers from variadic sequence that we define
|
|
|
|
while (defRs.regs.test(varargStart))
|
|
|
|
varargStart++;
|
|
|
|
|
2023-03-10 19:20:04 +00:00
|
|
|
LUAU_ASSERT(!sourceRs.varargSeq || sourceRs.varargStart == varargStart);
|
|
|
|
|
|
|
|
sourceRs.varargSeq = true;
|
|
|
|
sourceRs.varargStart = varargStart;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// Variadic use sequence might include registers before def sequence
|
|
|
|
for (int i = varargStart; i < defRs.varargStart; i++)
|
|
|
|
{
|
|
|
|
if (!defRs.regs.test(i))
|
|
|
|
sourceRs.regs.set(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static RegisterSet computeBlockLiveInRegSet(IrFunction& function, const IrBlock& block, RegisterSet& defRs, std::bitset<256>& capturedRegs)
|
|
|
|
{
|
|
|
|
RegisterSet inRs;
|
|
|
|
|
|
|
|
auto def = [&](IrOp op, int offset = 0) {
|
|
|
|
LUAU_ASSERT(op.kind == IrOpKind::VmReg);
|
|
|
|
defRs.regs.set(op.index + offset, true);
|
|
|
|
};
|
|
|
|
|
|
|
|
auto use = [&](IrOp op, int offset = 0) {
|
|
|
|
LUAU_ASSERT(op.kind == IrOpKind::VmReg);
|
|
|
|
if (!defRs.regs.test(op.index + offset))
|
|
|
|
inRs.regs.set(op.index + offset, true);
|
|
|
|
};
|
|
|
|
|
|
|
|
auto maybeDef = [&](IrOp op) {
|
|
|
|
if (op.kind == IrOpKind::VmReg)
|
|
|
|
defRs.regs.set(op.index, true);
|
|
|
|
};
|
|
|
|
|
|
|
|
auto maybeUse = [&](IrOp op) {
|
|
|
|
if (op.kind == IrOpKind::VmReg)
|
|
|
|
{
|
|
|
|
if (!defRs.regs.test(op.index))
|
|
|
|
inRs.regs.set(op.index, true);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
auto defVarargs = [&](uint8_t varargStart) {
|
|
|
|
defRs.varargSeq = true;
|
|
|
|
defRs.varargStart = varargStart;
|
|
|
|
};
|
|
|
|
|
|
|
|
auto useVarargs = [&](uint8_t varargStart) {
|
|
|
|
requireVariadicSequence(inRs, defRs, varargStart);
|
|
|
|
|
|
|
|
// Variadic sequence has been consumed
|
|
|
|
defRs.varargSeq = false;
|
|
|
|
defRs.varargStart = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
auto defRange = [&](int start, int count) {
|
|
|
|
if (count == -1)
|
|
|
|
{
|
|
|
|
defVarargs(start);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (int i = start; i < start + count; i++)
|
|
|
|
defRs.regs.set(i, true);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
auto useRange = [&](int start, int count) {
|
|
|
|
if (count == -1)
|
|
|
|
{
|
|
|
|
useVarargs(start);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (int i = start; i < start + count; i++)
|
|
|
|
{
|
|
|
|
if (!defRs.regs.test(i))
|
|
|
|
inRs.regs.set(i, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++)
|
|
|
|
{
|
|
|
|
const IrInst& inst = function.instructions[instIdx];
|
|
|
|
|
|
|
|
// For correct analysis, all instruction uses must be handled before handling the definitions
|
|
|
|
switch (inst.cmd)
|
|
|
|
{
|
|
|
|
case IrCmd::LOAD_TAG:
|
|
|
|
case IrCmd::LOAD_POINTER:
|
|
|
|
case IrCmd::LOAD_DOUBLE:
|
|
|
|
case IrCmd::LOAD_INT:
|
|
|
|
case IrCmd::LOAD_TVALUE:
|
|
|
|
maybeUse(inst.a); // Argument can also be a VmConst
|
|
|
|
break;
|
|
|
|
case IrCmd::STORE_TAG:
|
|
|
|
case IrCmd::STORE_POINTER:
|
|
|
|
case IrCmd::STORE_DOUBLE:
|
|
|
|
case IrCmd::STORE_INT:
|
|
|
|
case IrCmd::STORE_TVALUE:
|
|
|
|
maybeDef(inst.a); // Argument can also be a pointer value
|
|
|
|
break;
|
|
|
|
case IrCmd::JUMP_IF_TRUTHY:
|
|
|
|
case IrCmd::JUMP_IF_FALSY:
|
|
|
|
use(inst.a);
|
|
|
|
break;
|
|
|
|
case IrCmd::JUMP_CMP_ANY:
|
|
|
|
use(inst.a);
|
|
|
|
use(inst.b);
|
|
|
|
break;
|
|
|
|
// A <- B, C
|
|
|
|
case IrCmd::DO_ARITH:
|
|
|
|
case IrCmd::GET_TABLE:
|
|
|
|
use(inst.b);
|
|
|
|
maybeUse(inst.c); // Argument can also be a VmConst
|
|
|
|
|
|
|
|
def(inst.a);
|
|
|
|
break;
|
2023-03-24 17:34:14 +00:00
|
|
|
case IrCmd::SET_TABLE:
|
|
|
|
use(inst.a);
|
|
|
|
use(inst.b);
|
|
|
|
maybeUse(inst.c); // Argument can also be a VmConst
|
|
|
|
break;
|
2023-03-10 19:20:04 +00:00
|
|
|
// A <- B
|
|
|
|
case IrCmd::DO_LEN:
|
|
|
|
use(inst.b);
|
|
|
|
|
|
|
|
def(inst.a);
|
|
|
|
break;
|
|
|
|
case IrCmd::GET_IMPORT:
|
|
|
|
def(inst.a);
|
|
|
|
break;
|
|
|
|
case IrCmd::CONCAT:
|
|
|
|
useRange(inst.a.index, function.uintOp(inst.b));
|
|
|
|
|
|
|
|
defRange(inst.a.index, function.uintOp(inst.b));
|
|
|
|
break;
|
|
|
|
case IrCmd::GET_UPVALUE:
|
|
|
|
def(inst.a);
|
|
|
|
break;
|
|
|
|
case IrCmd::SET_UPVALUE:
|
|
|
|
use(inst.b);
|
|
|
|
break;
|
|
|
|
case IrCmd::PREPARE_FORN:
|
|
|
|
use(inst.a);
|
|
|
|
use(inst.b);
|
|
|
|
use(inst.c);
|
|
|
|
|
|
|
|
def(inst.a);
|
|
|
|
def(inst.b);
|
|
|
|
def(inst.c);
|
|
|
|
break;
|
|
|
|
case IrCmd::INTERRUPT:
|
|
|
|
break;
|
|
|
|
case IrCmd::BARRIER_OBJ:
|
|
|
|
case IrCmd::BARRIER_TABLE_FORWARD:
|
|
|
|
use(inst.b);
|
|
|
|
break;
|
|
|
|
case IrCmd::CLOSE_UPVALS:
|
|
|
|
// Closing an upvalue should be counted as a register use (it copies the fresh register value)
|
|
|
|
// But we lack the required information about the specific set of registers that are affected
|
|
|
|
// Because we don't plan to optimize captured registers atm, we skip full dataflow analysis for them right now
|
|
|
|
break;
|
|
|
|
case IrCmd::CAPTURE:
|
|
|
|
maybeUse(inst.a);
|
|
|
|
|
|
|
|
if (function.boolOp(inst.b))
|
|
|
|
capturedRegs.set(inst.a.index, true);
|
|
|
|
break;
|
2023-03-31 13:21:14 +01:00
|
|
|
case IrCmd::SETLIST:
|
2023-03-10 19:20:04 +00:00
|
|
|
use(inst.b);
|
|
|
|
useRange(inst.c.index, function.intOp(inst.d));
|
|
|
|
break;
|
2023-03-31 13:21:14 +01:00
|
|
|
case IrCmd::CALL:
|
2023-03-24 17:34:14 +00:00
|
|
|
use(inst.a);
|
|
|
|
useRange(inst.a.index + 1, function.intOp(inst.b));
|
2023-03-10 19:20:04 +00:00
|
|
|
|
2023-03-24 17:34:14 +00:00
|
|
|
defRange(inst.a.index, function.intOp(inst.c));
|
2023-03-10 19:20:04 +00:00
|
|
|
break;
|
2023-03-31 13:21:14 +01:00
|
|
|
case IrCmd::RETURN:
|
2023-03-24 17:34:14 +00:00
|
|
|
useRange(inst.a.index, function.intOp(inst.b));
|
2023-03-10 19:20:04 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::FASTCALL:
|
|
|
|
case IrCmd::INVOKE_FASTCALL:
|
|
|
|
if (int count = function.intOp(inst.e); count != -1)
|
|
|
|
{
|
|
|
|
if (count >= 3)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(inst.d.kind == IrOpKind::VmReg && inst.d.index == inst.c.index + 1);
|
|
|
|
|
|
|
|
useRange(inst.c.index, count);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (count >= 1)
|
|
|
|
use(inst.c);
|
|
|
|
|
|
|
|
if (count >= 2)
|
|
|
|
maybeUse(inst.d); // Argument can also be a VmConst
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
useVarargs(inst.c.index);
|
|
|
|
}
|
|
|
|
|
2023-03-24 17:34:14 +00:00
|
|
|
// Multiple return sequences (count == -1) are defined by ADJUST_STACK_TO_REG
|
|
|
|
if (int count = function.intOp(inst.f); count != -1)
|
|
|
|
defRange(inst.b.index, count);
|
2023-03-10 19:20:04 +00:00
|
|
|
break;
|
2023-03-31 13:21:14 +01:00
|
|
|
case IrCmd::FORGLOOP:
|
2023-03-10 19:20:04 +00:00
|
|
|
// First register is not used by instruction, we check that it's still 'nil' with CHECK_TAG
|
|
|
|
use(inst.a, 1);
|
|
|
|
use(inst.a, 2);
|
|
|
|
|
|
|
|
def(inst.a, 2);
|
|
|
|
defRange(inst.a.index + 3, function.intOp(inst.b));
|
|
|
|
break;
|
2023-03-31 13:21:14 +01:00
|
|
|
case IrCmd::FORGLOOP_FALLBACK:
|
|
|
|
useRange(inst.a.index, 3);
|
2023-03-10 19:20:04 +00:00
|
|
|
|
2023-03-31 13:21:14 +01:00
|
|
|
def(inst.a, 2);
|
|
|
|
defRange(inst.a.index + 3, uint8_t(function.intOp(inst.b))); // ignore most significant bit
|
2023-03-10 19:20:04 +00:00
|
|
|
break;
|
2023-03-31 13:21:14 +01:00
|
|
|
case IrCmd::FORGPREP_XNEXT_FALLBACK:
|
2023-03-10 19:20:04 +00:00
|
|
|
use(inst.b);
|
|
|
|
break;
|
2023-03-24 17:34:14 +00:00
|
|
|
// A <- B, C
|
2023-03-31 13:21:14 +01:00
|
|
|
case IrCmd::AND:
|
|
|
|
case IrCmd::OR:
|
2023-03-24 17:34:14 +00:00
|
|
|
use(inst.b);
|
2023-03-10 19:20:04 +00:00
|
|
|
use(inst.c);
|
|
|
|
|
2023-03-24 17:34:14 +00:00
|
|
|
def(inst.a);
|
2023-03-10 19:20:04 +00:00
|
|
|
break;
|
2023-03-24 17:34:14 +00:00
|
|
|
// A <- B
|
2023-03-31 13:21:14 +01:00
|
|
|
case IrCmd::ANDK:
|
|
|
|
case IrCmd::ORK:
|
2023-03-24 17:34:14 +00:00
|
|
|
use(inst.b);
|
2023-03-10 19:20:04 +00:00
|
|
|
|
2023-03-24 17:34:14 +00:00
|
|
|
def(inst.a);
|
2023-03-10 19:20:04 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_GETGLOBAL:
|
|
|
|
def(inst.b);
|
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_SETGLOBAL:
|
|
|
|
use(inst.b);
|
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_GETTABLEKS:
|
|
|
|
use(inst.c);
|
|
|
|
|
|
|
|
def(inst.b);
|
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_SETTABLEKS:
|
|
|
|
use(inst.b);
|
|
|
|
use(inst.c);
|
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_NAMECALL:
|
|
|
|
use(inst.c);
|
|
|
|
|
|
|
|
defRange(inst.b.index, 2);
|
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_PREPVARARGS:
|
|
|
|
// No effect on explicitly referenced registers
|
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_GETVARARGS:
|
|
|
|
defRange(inst.b.index, function.intOp(inst.c));
|
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_NEWCLOSURE:
|
|
|
|
def(inst.b);
|
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_DUPCLOSURE:
|
|
|
|
def(inst.b);
|
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_FORGPREP:
|
|
|
|
use(inst.b);
|
|
|
|
|
|
|
|
defRange(inst.b.index, 3);
|
|
|
|
break;
|
|
|
|
case IrCmd::ADJUST_STACK_TO_REG:
|
2023-03-24 17:34:14 +00:00
|
|
|
defRange(inst.a.index, -1);
|
|
|
|
break;
|
2023-03-10 19:20:04 +00:00
|
|
|
case IrCmd::ADJUST_STACK_TO_TOP:
|
2023-03-24 17:34:14 +00:00
|
|
|
// While this can be considered to be a vararg consumer, it is already handled in fastcall instructions
|
2023-03-10 19:20:04 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2023-03-17 14:59:30 +00:00
|
|
|
// All instructions which reference registers have to be handled explicitly
|
|
|
|
LUAU_ASSERT(inst.a.kind != IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.b.kind != IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.c.kind != IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.d.kind != IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.e.kind != IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.f.kind != IrOpKind::VmReg);
|
2023-03-10 19:20:04 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return inRs;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The algorithm used here is commonly known as backwards data-flow analysis.
|
|
|
|
// For each block, we track 'upward-exposed' (live-in) uses of registers - a use of a register that hasn't been defined in the block yet.
|
|
|
|
// We also track the set of registers that were defined in the block.
|
|
|
|
// When initial live-in sets of registers are computed, propagation of those uses upwards through predecessors is performed.
|
|
|
|
// If predecessor doesn't define the register, we have to add it to the live-in set.
|
|
|
|
// Extending the set of live-in registers of a block requires re-checking of that block.
|
|
|
|
// Propagation runs iteratively, using a worklist of blocks to visit until a fixed point is reached.
|
|
|
|
// This algorithm can be easily extended to cover phi instructions, but we don't use those yet.
|
|
|
|
static void computeCfgLiveInOutRegSets(IrFunction& function)
|
|
|
|
{
|
|
|
|
CfgInfo& info = function.cfg;
|
|
|
|
|
2023-03-17 14:59:30 +00:00
|
|
|
// Clear existing data
|
|
|
|
// 'in' and 'captured' data is not cleared because it will be overwritten below
|
|
|
|
info.def.clear();
|
|
|
|
info.out.clear();
|
|
|
|
|
2023-03-10 19:20:04 +00:00
|
|
|
// Try to compute Luau VM register use-def info
|
|
|
|
info.in.resize(function.blocks.size());
|
2023-03-17 14:59:30 +00:00
|
|
|
info.def.resize(function.blocks.size());
|
2023-03-10 19:20:04 +00:00
|
|
|
info.out.resize(function.blocks.size());
|
|
|
|
|
|
|
|
// Captured registers are tracked for the whole function
|
|
|
|
// It should be possible to have a more precise analysis for them in the future
|
|
|
|
std::bitset<256> capturedRegs;
|
|
|
|
|
|
|
|
// First we compute live-in set of each block
|
|
|
|
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
|
|
|
|
{
|
|
|
|
const IrBlock& block = function.blocks[blockIdx];
|
|
|
|
|
|
|
|
if (block.kind == IrBlockKind::Dead)
|
|
|
|
continue;
|
|
|
|
|
2023-03-17 14:59:30 +00:00
|
|
|
info.in[blockIdx] = computeBlockLiveInRegSet(function, block, info.def[blockIdx], capturedRegs);
|
2023-03-10 19:20:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
info.captured.regs = capturedRegs;
|
|
|
|
|
|
|
|
// With live-in sets ready, we can arrive at a fixed point for both in/out registers by requesting required registers from predecessors
|
|
|
|
std::vector<uint32_t> worklist;
|
|
|
|
|
|
|
|
std::vector<uint8_t> inWorklist;
|
|
|
|
inWorklist.resize(function.blocks.size(), false);
|
|
|
|
|
|
|
|
// We will have to visit each block at least once, so we add all of them to the worklist immediately
|
|
|
|
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
|
|
|
|
{
|
|
|
|
const IrBlock& block = function.blocks[blockIdx];
|
|
|
|
|
|
|
|
if (block.kind == IrBlockKind::Dead)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
worklist.push_back(uint32_t(blockIdx));
|
|
|
|
inWorklist[blockIdx] = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!worklist.empty())
|
|
|
|
{
|
|
|
|
uint32_t blockIdx = worklist.back();
|
|
|
|
worklist.pop_back();
|
|
|
|
inWorklist[blockIdx] = false;
|
|
|
|
|
|
|
|
IrBlock& curr = function.blocks[blockIdx];
|
|
|
|
RegisterSet& inRs = info.in[blockIdx];
|
2023-03-17 14:59:30 +00:00
|
|
|
RegisterSet& defRs = info.def[blockIdx];
|
2023-03-10 19:20:04 +00:00
|
|
|
RegisterSet& outRs = info.out[blockIdx];
|
|
|
|
|
|
|
|
// Current block has to provide all registers in successor blocks
|
|
|
|
for (uint32_t succIdx : successors(info, blockIdx))
|
|
|
|
{
|
|
|
|
IrBlock& succ = function.blocks[succIdx];
|
|
|
|
|
|
|
|
// This is a step away from the usual definition of live range flow through CFG
|
|
|
|
// Exit from a regular block to a fallback block is not considered a block terminator
|
|
|
|
// This is because fallback blocks define an alternative implementation of the same operations
|
|
|
|
// This can cause the current block to define more registers that actually were available at fallback entry
|
|
|
|
if (curr.kind != IrBlockKind::Fallback && succ.kind == IrBlockKind::Fallback)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const RegisterSet& succRs = info.in[succIdx];
|
|
|
|
|
|
|
|
outRs.regs |= succRs.regs;
|
|
|
|
|
|
|
|
if (succRs.varargSeq)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(!outRs.varargSeq || outRs.varargStart == succRs.varargStart);
|
|
|
|
|
|
|
|
outRs.varargSeq = true;
|
|
|
|
outRs.varargStart = succRs.varargStart;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
RegisterSet oldInRs = inRs;
|
|
|
|
|
|
|
|
// If current block didn't define a live-out, it has to be live-in
|
|
|
|
inRs.regs |= outRs.regs & ~defRs.regs;
|
|
|
|
|
|
|
|
if (outRs.varargSeq)
|
|
|
|
requireVariadicSequence(inRs, defRs, outRs.varargStart);
|
|
|
|
|
|
|
|
// If we have new live-ins, we have to notify all predecessors
|
|
|
|
// We don't allow changes to the start of the variadic sequence, so we skip checking that member
|
|
|
|
if (inRs.regs != oldInRs.regs || inRs.varargSeq != oldInRs.varargSeq)
|
|
|
|
{
|
|
|
|
for (uint32_t predIdx : predecessors(info, blockIdx))
|
|
|
|
{
|
|
|
|
if (!inWorklist[predIdx])
|
|
|
|
{
|
|
|
|
worklist.push_back(predIdx);
|
|
|
|
inWorklist[predIdx] = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If Proto data is available, validate that entry block arguments match required registers
|
|
|
|
if (function.proto)
|
|
|
|
{
|
|
|
|
RegisterSet& entryIn = info.in[0];
|
|
|
|
|
|
|
|
LUAU_ASSERT(!entryIn.varargSeq);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < entryIn.regs.size(); i++)
|
|
|
|
LUAU_ASSERT(!entryIn.regs.test(i) || i < function.proto->numparams);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void computeCfgBlockEdges(IrFunction& function)
|
|
|
|
{
|
|
|
|
CfgInfo& info = function.cfg;
|
|
|
|
|
2023-03-17 14:59:30 +00:00
|
|
|
// Clear existing data
|
|
|
|
info.predecessorsOffsets.clear();
|
|
|
|
info.successorsOffsets.clear();
|
|
|
|
|
2023-03-10 19:20:04 +00:00
|
|
|
// Compute predecessors block edges
|
|
|
|
info.predecessorsOffsets.reserve(function.blocks.size());
|
|
|
|
info.successorsOffsets.reserve(function.blocks.size());
|
|
|
|
|
|
|
|
int edgeCount = 0;
|
|
|
|
|
|
|
|
for (const IrBlock& block : function.blocks)
|
|
|
|
{
|
|
|
|
info.predecessorsOffsets.push_back(edgeCount);
|
|
|
|
edgeCount += block.useCount;
|
|
|
|
}
|
|
|
|
|
|
|
|
info.predecessors.resize(edgeCount);
|
|
|
|
info.successors.resize(edgeCount);
|
|
|
|
|
|
|
|
edgeCount = 0;
|
|
|
|
|
|
|
|
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
|
|
|
|
{
|
|
|
|
const IrBlock& block = function.blocks[blockIdx];
|
|
|
|
|
|
|
|
info.successorsOffsets.push_back(edgeCount);
|
|
|
|
|
|
|
|
if (block.kind == IrBlockKind::Dead)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++)
|
|
|
|
{
|
|
|
|
const IrInst& inst = function.instructions[instIdx];
|
|
|
|
|
|
|
|
auto checkOp = [&](IrOp op) {
|
|
|
|
if (op.kind == IrOpKind::Block)
|
|
|
|
{
|
|
|
|
// We use a trick here, where we use the starting offset of the predecessor list as the position where to write next predecessor
|
|
|
|
// The values will be adjusted back in a separate loop later
|
|
|
|
info.predecessors[info.predecessorsOffsets[op.index]++] = uint32_t(blockIdx);
|
|
|
|
|
|
|
|
info.successors[edgeCount++] = op.index;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
checkOp(inst.a);
|
|
|
|
checkOp(inst.b);
|
|
|
|
checkOp(inst.c);
|
|
|
|
checkOp(inst.d);
|
|
|
|
checkOp(inst.e);
|
|
|
|
checkOp(inst.f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Offsets into the predecessor list were used as iterators in the previous loop
|
|
|
|
// To adjust them back, block use count is subtracted (predecessor count is equal to how many uses block has)
|
|
|
|
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
|
|
|
|
{
|
|
|
|
const IrBlock& block = function.blocks[blockIdx];
|
|
|
|
|
|
|
|
info.predecessorsOffsets[blockIdx] -= block.useCount;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void computeCfgInfo(IrFunction& function)
|
|
|
|
{
|
|
|
|
computeCfgBlockEdges(function);
|
|
|
|
computeCfgLiveInOutRegSets(function);
|
|
|
|
}
|
|
|
|
|
2023-03-24 17:34:14 +00:00
|
|
|
BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx)
|
2023-03-10 19:20:04 +00:00
|
|
|
{
|
|
|
|
LUAU_ASSERT(blockIdx < cfg.predecessorsOffsets.size());
|
|
|
|
|
|
|
|
uint32_t start = cfg.predecessorsOffsets[blockIdx];
|
|
|
|
uint32_t end = blockIdx + 1 < cfg.predecessorsOffsets.size() ? cfg.predecessorsOffsets[blockIdx + 1] : uint32_t(cfg.predecessors.size());
|
|
|
|
|
|
|
|
return BlockIteratorWrapper{cfg.predecessors.data() + start, cfg.predecessors.data() + end};
|
|
|
|
}
|
|
|
|
|
2023-03-24 17:34:14 +00:00
|
|
|
BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx)
|
2023-03-10 19:20:04 +00:00
|
|
|
{
|
|
|
|
LUAU_ASSERT(blockIdx < cfg.successorsOffsets.size());
|
|
|
|
|
|
|
|
uint32_t start = cfg.successorsOffsets[blockIdx];
|
|
|
|
uint32_t end = blockIdx + 1 < cfg.successorsOffsets.size() ? cfg.successorsOffsets[blockIdx + 1] : uint32_t(cfg.successors.size());
|
|
|
|
|
|
|
|
return BlockIteratorWrapper{cfg.successors.data() + start, cfg.successors.data() + end};
|
|
|
|
}
|
|
|
|
|
2023-01-27 21:28:45 +00:00
|
|
|
} // namespace CodeGen
|
|
|
|
} // namespace Luau
|