2023-02-24 21:49:38 +00:00
|
|
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
|
|
|
#include "Luau/OptimizeConstProp.h"
|
|
|
|
|
|
|
|
#include "Luau/DenseHash.h"
|
2023-08-11 15:42:37 +01:00
|
|
|
#include "Luau/IrData.h"
|
2023-02-24 21:49:38 +00:00
|
|
|
#include "Luau/IrBuilder.h"
|
|
|
|
#include "Luau/IrUtils.h"
|
|
|
|
|
|
|
|
#include "lua.h"
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
#include <array>
|
2023-08-18 19:15:41 +01:00
|
|
|
#include <utility>
|
2023-03-03 20:21:14 +00:00
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3)
|
2023-05-25 22:36:34 +01:00
|
|
|
LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false)
|
2023-03-03 20:21:14 +00:00
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
namespace Luau
|
|
|
|
{
|
|
|
|
namespace CodeGen
|
|
|
|
{
|
|
|
|
|
|
|
|
// Data we know about the register value
|
|
|
|
struct RegisterInfo
|
|
|
|
{
|
|
|
|
uint8_t tag = 0xff;
|
|
|
|
IrOp value;
|
|
|
|
|
|
|
|
// Used to quickly invalidate links between SSA values and register memory
|
|
|
|
// It's a bit imprecise where value and tag both always invalidate together
|
|
|
|
uint32_t version = 0;
|
|
|
|
|
|
|
|
bool knownNotReadonly = false;
|
|
|
|
bool knownNoMetatable = false;
|
2023-08-18 19:15:41 +01:00
|
|
|
int knownTableArraySize = -1;
|
2023-02-24 21:49:38 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Load instructions are linked to target register to carry knowledge about the target
|
|
|
|
// We track a register version at the point of the load so it's easy to break the link when register is updated
|
|
|
|
struct RegisterLink
|
|
|
|
{
|
|
|
|
uint8_t reg = 0;
|
|
|
|
uint32_t version = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Data we know about the current VM state
|
|
|
|
struct ConstPropState
|
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
ConstPropState(IrFunction& function)
|
2023-03-24 18:03:04 +00:00
|
|
|
: function(function)
|
2023-04-28 20:55:13 +01:00
|
|
|
, valueMap({})
|
2023-03-24 18:03:04 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
uint8_t tryGetTag(IrOp op)
|
|
|
|
{
|
|
|
|
if (RegisterInfo* info = tryGetRegisterInfo(op))
|
|
|
|
return info->tag;
|
|
|
|
|
|
|
|
return 0xff;
|
|
|
|
}
|
|
|
|
|
2023-05-25 22:36:34 +01:00
|
|
|
void updateTag(IrOp op, uint8_t tag)
|
|
|
|
{
|
|
|
|
if (RegisterInfo* info = tryGetRegisterInfo(op))
|
|
|
|
info->tag = tag;
|
|
|
|
}
|
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
void saveTag(IrOp op, uint8_t tag)
|
|
|
|
{
|
|
|
|
if (RegisterInfo* info = tryGetRegisterInfo(op))
|
2023-04-28 20:55:13 +01:00
|
|
|
{
|
|
|
|
if (info->tag != tag)
|
|
|
|
{
|
|
|
|
info->tag = tag;
|
|
|
|
info->version++;
|
|
|
|
}
|
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrOp tryGetValue(IrOp op)
|
|
|
|
{
|
|
|
|
if (RegisterInfo* info = tryGetRegisterInfo(op))
|
|
|
|
return info->value;
|
|
|
|
|
|
|
|
return IrOp{IrOpKind::None, 0u};
|
|
|
|
}
|
|
|
|
|
|
|
|
void saveValue(IrOp op, IrOp value)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(value.kind == IrOpKind::Constant);
|
|
|
|
|
|
|
|
if (RegisterInfo* info = tryGetRegisterInfo(op))
|
2023-04-28 20:55:13 +01:00
|
|
|
{
|
|
|
|
if (info->value != value)
|
|
|
|
{
|
|
|
|
info->value = value;
|
|
|
|
info->knownNotReadonly = false;
|
|
|
|
info->knownNoMetatable = false;
|
2023-08-18 19:15:41 +01:00
|
|
|
info->knownTableArraySize = -1;
|
2023-04-28 20:55:13 +01:00
|
|
|
info->version++;
|
|
|
|
}
|
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void invalidate(RegisterInfo& reg, bool invalidateTag, bool invalidateValue)
|
|
|
|
{
|
|
|
|
if (invalidateTag)
|
|
|
|
{
|
|
|
|
reg.tag = 0xff;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (invalidateValue)
|
|
|
|
{
|
|
|
|
reg.value = {};
|
|
|
|
reg.knownNotReadonly = false;
|
|
|
|
reg.knownNoMetatable = false;
|
2023-08-18 19:15:41 +01:00
|
|
|
reg.knownTableArraySize = -1;
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
reg.version++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void invalidateTag(IrOp regOp)
|
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
// TODO: use maxstacksize from Proto
|
|
|
|
maxReg = vmRegOp(regOp) > maxReg ? vmRegOp(regOp) : maxReg;
|
2023-04-07 22:01:29 +01:00
|
|
|
invalidate(regs[vmRegOp(regOp)], /* invalidateTag */ true, /* invalidateValue */ false);
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void invalidateValue(IrOp regOp)
|
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
// TODO: use maxstacksize from Proto
|
|
|
|
maxReg = vmRegOp(regOp) > maxReg ? vmRegOp(regOp) : maxReg;
|
2023-04-07 22:01:29 +01:00
|
|
|
invalidate(regs[vmRegOp(regOp)], /* invalidateTag */ false, /* invalidateValue */ true);
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void invalidate(IrOp regOp)
|
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
// TODO: use maxstacksize from Proto
|
|
|
|
maxReg = vmRegOp(regOp) > maxReg ? vmRegOp(regOp) : maxReg;
|
2023-04-07 22:01:29 +01:00
|
|
|
invalidate(regs[vmRegOp(regOp)], /* invalidateTag */ true, /* invalidateValue */ true);
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
void invalidateRegistersFrom(int firstReg)
|
2023-02-24 21:49:38 +00:00
|
|
|
{
|
2023-03-24 18:03:04 +00:00
|
|
|
for (int i = firstReg; i <= maxReg; ++i)
|
2023-02-24 21:49:38 +00:00
|
|
|
invalidate(regs[i], /* invalidateTag */ true, /* invalidateValue */ true);
|
|
|
|
}
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
void invalidateRegisterRange(int firstReg, int count)
|
|
|
|
{
|
2023-06-02 20:52:15 +01:00
|
|
|
if (count == -1)
|
|
|
|
{
|
|
|
|
invalidateRegistersFrom(firstReg);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (int i = firstReg; i < firstReg + count && i <= maxReg; ++i)
|
|
|
|
invalidate(regs[i], /* invalidateTag */ true, /* invalidateValue */ true);
|
|
|
|
}
|
2023-03-24 18:03:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void invalidateCapturedRegisters()
|
|
|
|
{
|
|
|
|
for (int i = 0; i <= maxReg; ++i)
|
|
|
|
{
|
|
|
|
if (function.cfg.captured.regs.test(i))
|
|
|
|
invalidate(regs[i], /* invalidateTag */ true, /* invalidateValue */ true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
void invalidateHeap()
|
|
|
|
{
|
|
|
|
for (int i = 0; i <= maxReg; ++i)
|
|
|
|
invalidateHeap(regs[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void invalidateHeap(RegisterInfo& reg)
|
|
|
|
{
|
|
|
|
reg.knownNotReadonly = false;
|
|
|
|
reg.knownNoMetatable = false;
|
2023-08-18 19:15:41 +01:00
|
|
|
reg.knownTableArraySize = -1;
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
void invalidateUserCall()
|
2023-02-24 21:49:38 +00:00
|
|
|
{
|
2023-03-24 18:03:04 +00:00
|
|
|
invalidateHeap();
|
|
|
|
invalidateCapturedRegisters();
|
2023-02-24 21:49:38 +00:00
|
|
|
inSafeEnv = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void createRegLink(uint32_t instIdx, IrOp regOp)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(!instLink.contains(instIdx));
|
2023-04-07 22:01:29 +01:00
|
|
|
instLink[instIdx] = RegisterLink{uint8_t(vmRegOp(regOp)), regs[vmRegOp(regOp)].version};
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
RegisterInfo* tryGetRegisterInfo(IrOp op)
|
|
|
|
{
|
|
|
|
if (op.kind == IrOpKind::VmReg)
|
|
|
|
{
|
2023-04-07 22:01:29 +01:00
|
|
|
maxReg = vmRegOp(op) > maxReg ? vmRegOp(op) : maxReg;
|
|
|
|
return ®s[vmRegOp(op)];
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (RegisterLink* link = tryGetRegLink(op))
|
|
|
|
{
|
|
|
|
maxReg = int(link->reg) > maxReg ? int(link->reg) : maxReg;
|
|
|
|
return ®s[link->reg];
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
RegisterLink* tryGetRegLink(IrOp instOp)
|
|
|
|
{
|
|
|
|
if (instOp.kind != IrOpKind::Inst)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
if (RegisterLink* link = instLink.find(instOp.index))
|
|
|
|
{
|
|
|
|
// Check that the target register hasn't changed the value
|
2023-05-25 22:36:34 +01:00
|
|
|
if (link->version < regs[link->reg].version)
|
2023-02-24 21:49:38 +00:00
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
return link;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
// Attach register version number to the register operand in a load instruction
|
|
|
|
// This is used to allow instructions with register references to be compared for equality
|
|
|
|
IrInst versionedVmRegLoad(IrCmd loadCmd, IrOp op)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(op.kind == IrOpKind::VmReg);
|
|
|
|
uint32_t version = regs[vmRegOp(op)].version;
|
|
|
|
LUAU_ASSERT(version <= 0xffffff);
|
|
|
|
op.index = vmRegOp(op) | (version << 8);
|
|
|
|
return IrInst{loadCmd, op};
|
|
|
|
}
|
|
|
|
|
2023-08-18 19:15:41 +01:00
|
|
|
uint32_t* getPreviousInstIndex(const IrInst& inst)
|
2023-04-28 20:55:13 +01:00
|
|
|
{
|
2023-08-18 19:15:41 +01:00
|
|
|
LUAU_ASSERT(useValueNumbering);
|
2023-04-28 20:55:13 +01:00
|
|
|
|
|
|
|
if (uint32_t* prevIdx = valueMap.find(inst))
|
2023-06-02 20:52:15 +01:00
|
|
|
{
|
|
|
|
// Previous load might have been removed as unused
|
2023-08-18 19:15:41 +01:00
|
|
|
if (function.instructions[*prevIdx].useCount != 0)
|
|
|
|
return prevIdx;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t* getPreviousVersionedLoadIndex(IrCmd cmd, IrOp vmReg)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(vmReg.kind == IrOpKind::VmReg);
|
|
|
|
return getPreviousInstIndex(versionedVmRegLoad(cmd, vmReg));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::pair<IrCmd, uint32_t> getPreviousVersionedLoadForTag(uint8_t tag, IrOp vmReg)
|
|
|
|
{
|
|
|
|
if (useValueNumbering && !function.cfg.captured.regs.test(vmRegOp(vmReg)))
|
|
|
|
{
|
|
|
|
if (tag == LUA_TBOOLEAN)
|
2023-06-02 20:52:15 +01:00
|
|
|
{
|
2023-08-18 19:15:41 +01:00
|
|
|
if (uint32_t* prevIdx = getPreviousVersionedLoadIndex(IrCmd::LOAD_INT, vmReg))
|
|
|
|
return std::make_pair(IrCmd::LOAD_INT, *prevIdx);
|
2023-06-02 20:52:15 +01:00
|
|
|
}
|
2023-08-18 19:15:41 +01:00
|
|
|
else if (tag == LUA_TNUMBER)
|
|
|
|
{
|
|
|
|
if (uint32_t* prevIdx = getPreviousVersionedLoadIndex(IrCmd::LOAD_DOUBLE, vmReg))
|
|
|
|
return std::make_pair(IrCmd::LOAD_DOUBLE, *prevIdx);
|
|
|
|
}
|
|
|
|
else if (isGCO(tag))
|
|
|
|
{
|
|
|
|
if (uint32_t* prevIdx = getPreviousVersionedLoadIndex(IrCmd::LOAD_POINTER, vmReg))
|
|
|
|
return std::make_pair(IrCmd::LOAD_POINTER, *prevIdx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return std::make_pair(IrCmd::NOP, kInvalidInstIdx);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find existing value of the instruction that is exactly the same, or record current on for future lookups
|
|
|
|
void substituteOrRecord(IrInst& inst, uint32_t instIdx)
|
|
|
|
{
|
|
|
|
if (!useValueNumbering)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (uint32_t* prevIdx = getPreviousInstIndex(inst))
|
|
|
|
{
|
|
|
|
substitute(function, inst, IrOp{IrOpKind::Inst, *prevIdx});
|
|
|
|
return;
|
2023-06-02 20:52:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
valueMap[inst] = instIdx;
|
2023-04-28 20:55:13 +01:00
|
|
|
}
|
|
|
|
|
2023-08-18 19:15:41 +01:00
|
|
|
// VM register load can be replaced by a previous load of the same version of the register
|
2023-04-28 20:55:13 +01:00
|
|
|
// If there is no previous load, we record the current one for future lookups
|
|
|
|
void substituteOrRecordVmRegLoad(IrInst& loadInst)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(loadInst.a.kind == IrOpKind::VmReg);
|
|
|
|
|
|
|
|
if (!useValueNumbering)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// To avoid captured register invalidation tracking in lowering later, values from loads from captured registers are not propagated
|
|
|
|
// This prevents the case where load value location is linked to memory in case of a spill and is then cloberred in a user call
|
|
|
|
if (function.cfg.captured.regs.test(vmRegOp(loadInst.a)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
IrInst versionedLoad = versionedVmRegLoad(loadInst.cmd, loadInst.a);
|
|
|
|
|
|
|
|
// Check if there is a value that already has this version of the register
|
2023-08-18 19:15:41 +01:00
|
|
|
if (uint32_t* prevIdx = getPreviousInstIndex(versionedLoad))
|
2023-04-28 20:55:13 +01:00
|
|
|
{
|
2023-08-18 19:15:41 +01:00
|
|
|
// Previous value might not be linked to a register yet
|
|
|
|
// For example, it could be a NEW_TABLE stored into a register and we might need to track guards made with this value
|
|
|
|
if (!instLink.contains(*prevIdx))
|
|
|
|
createRegLink(*prevIdx, loadInst.a);
|
2023-04-28 20:55:13 +01:00
|
|
|
|
2023-08-18 19:15:41 +01:00
|
|
|
// Substitute load instructon with the previous value
|
|
|
|
substitute(function, loadInst, IrOp{IrOpKind::Inst, *prevIdx});
|
|
|
|
return;
|
2023-04-28 20:55:13 +01:00
|
|
|
}
|
|
|
|
|
2023-06-02 20:52:15 +01:00
|
|
|
uint32_t instIdx = function.getInstIndex(loadInst);
|
2023-04-28 20:55:13 +01:00
|
|
|
|
2023-06-02 20:52:15 +01:00
|
|
|
// Record load of this register version for future substitution
|
|
|
|
valueMap[versionedLoad] = instIdx;
|
|
|
|
|
|
|
|
createRegLink(instIdx, loadInst.a);
|
2023-04-28 20:55:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// VM register loads can use the value that was stored in the same Vm register earlier
|
|
|
|
void forwardVmRegStoreToLoad(const IrInst& storeInst, IrCmd loadCmd)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(storeInst.a.kind == IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(storeInst.b.kind == IrOpKind::Inst);
|
|
|
|
|
|
|
|
if (!useValueNumbering)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// To avoid captured register invalidation tracking in lowering later, values from stores into captured registers are not propagated
|
|
|
|
// This prevents the case where store creates an alternative value location in case of a spill and is then cloberred in a user call
|
|
|
|
if (function.cfg.captured.regs.test(vmRegOp(storeInst.a)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Future loads of this register version can use the value we stored
|
|
|
|
valueMap[versionedVmRegLoad(loadCmd, storeInst.a)] = storeInst.b.index;
|
|
|
|
}
|
|
|
|
|
2023-05-05 22:52:49 +01:00
|
|
|
void clear()
|
|
|
|
{
|
|
|
|
for (int i = 0; i <= maxReg; ++i)
|
|
|
|
regs[i] = RegisterInfo();
|
|
|
|
|
|
|
|
maxReg = 0;
|
|
|
|
|
|
|
|
inSafeEnv = false;
|
|
|
|
checkedGc = false;
|
|
|
|
|
|
|
|
instLink.clear();
|
|
|
|
valueMap.clear();
|
|
|
|
}
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
IrFunction& function;
|
2023-03-24 18:03:04 +00:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
bool useValueNumbering = false;
|
|
|
|
|
|
|
|
std::array<RegisterInfo, 256> regs;
|
2023-02-24 21:49:38 +00:00
|
|
|
|
|
|
|
// For range/full invalidations, we only want to visit a limited number of data that we have recorded
|
|
|
|
int maxReg = 0;
|
|
|
|
|
|
|
|
bool inSafeEnv = false;
|
|
|
|
bool checkedGc = false;
|
|
|
|
|
|
|
|
DenseHashMap<uint32_t, RegisterLink> instLink{~0u};
|
2023-04-28 20:55:13 +01:00
|
|
|
|
|
|
|
DenseHashMap<IrInst, uint32_t, IrInstHash, IrInstEq> valueMap;
|
2023-02-24 21:49:38 +00:00
|
|
|
};
|
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
static void handleBuiltinEffects(ConstPropState& state, LuauBuiltinFunction bfid, uint32_t firstReturnReg, int nresults)
|
|
|
|
{
|
|
|
|
// Switch over all values is used to force new items to be handled
|
|
|
|
switch (bfid)
|
|
|
|
{
|
|
|
|
case LBF_NONE:
|
|
|
|
case LBF_ASSERT:
|
|
|
|
case LBF_MATH_ABS:
|
|
|
|
case LBF_MATH_ACOS:
|
|
|
|
case LBF_MATH_ASIN:
|
|
|
|
case LBF_MATH_ATAN2:
|
|
|
|
case LBF_MATH_ATAN:
|
|
|
|
case LBF_MATH_CEIL:
|
|
|
|
case LBF_MATH_COSH:
|
|
|
|
case LBF_MATH_COS:
|
|
|
|
case LBF_MATH_DEG:
|
|
|
|
case LBF_MATH_EXP:
|
|
|
|
case LBF_MATH_FLOOR:
|
|
|
|
case LBF_MATH_FMOD:
|
|
|
|
case LBF_MATH_FREXP:
|
|
|
|
case LBF_MATH_LDEXP:
|
|
|
|
case LBF_MATH_LOG10:
|
|
|
|
case LBF_MATH_LOG:
|
|
|
|
case LBF_MATH_MAX:
|
|
|
|
case LBF_MATH_MIN:
|
|
|
|
case LBF_MATH_MODF:
|
|
|
|
case LBF_MATH_POW:
|
|
|
|
case LBF_MATH_RAD:
|
|
|
|
case LBF_MATH_SINH:
|
|
|
|
case LBF_MATH_SIN:
|
|
|
|
case LBF_MATH_SQRT:
|
|
|
|
case LBF_MATH_TANH:
|
|
|
|
case LBF_MATH_TAN:
|
|
|
|
case LBF_BIT32_ARSHIFT:
|
|
|
|
case LBF_BIT32_BAND:
|
|
|
|
case LBF_BIT32_BNOT:
|
|
|
|
case LBF_BIT32_BOR:
|
|
|
|
case LBF_BIT32_BXOR:
|
|
|
|
case LBF_BIT32_BTEST:
|
|
|
|
case LBF_BIT32_EXTRACT:
|
|
|
|
case LBF_BIT32_LROTATE:
|
|
|
|
case LBF_BIT32_LSHIFT:
|
|
|
|
case LBF_BIT32_REPLACE:
|
|
|
|
case LBF_BIT32_RROTATE:
|
|
|
|
case LBF_BIT32_RSHIFT:
|
|
|
|
case LBF_TYPE:
|
|
|
|
case LBF_STRING_BYTE:
|
|
|
|
case LBF_STRING_CHAR:
|
|
|
|
case LBF_STRING_LEN:
|
|
|
|
case LBF_TYPEOF:
|
|
|
|
case LBF_STRING_SUB:
|
|
|
|
case LBF_MATH_CLAMP:
|
|
|
|
case LBF_MATH_SIGN:
|
|
|
|
case LBF_MATH_ROUND:
|
|
|
|
case LBF_RAWGET:
|
|
|
|
case LBF_RAWEQUAL:
|
|
|
|
case LBF_TABLE_UNPACK:
|
|
|
|
case LBF_VECTOR:
|
|
|
|
case LBF_BIT32_COUNTLZ:
|
|
|
|
case LBF_BIT32_COUNTRZ:
|
|
|
|
case LBF_SELECT_VARARG:
|
|
|
|
case LBF_RAWLEN:
|
|
|
|
case LBF_BIT32_EXTRACTK:
|
|
|
|
case LBF_GETMETATABLE:
|
2023-07-28 16:13:53 +01:00
|
|
|
case LBF_TONUMBER:
|
|
|
|
case LBF_TOSTRING:
|
2023-03-03 20:21:14 +00:00
|
|
|
break;
|
2023-08-18 19:15:41 +01:00
|
|
|
case LBF_TABLE_INSERT:
|
|
|
|
state.invalidateHeap();
|
|
|
|
return; // table.insert does not modify result registers.
|
|
|
|
case LBF_RAWSET:
|
|
|
|
state.invalidateHeap();
|
|
|
|
break;
|
2023-03-03 20:21:14 +00:00
|
|
|
case LBF_SETMETATABLE:
|
|
|
|
state.invalidateHeap(); // TODO: only knownNoMetatable is affected and we might know which one
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: classify further using switch above, some fastcalls only modify the value, not the tag
|
2023-04-28 20:55:13 +01:00
|
|
|
// TODO: fastcalls are different from calls and it might be possible to not invalidate all register starting from return
|
2023-03-03 20:21:14 +00:00
|
|
|
state.invalidateRegistersFrom(firstReturnReg);
|
|
|
|
}
|
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction& function, IrBlock& block, IrInst& inst, uint32_t index)
|
|
|
|
{
|
|
|
|
switch (inst.cmd)
|
|
|
|
{
|
|
|
|
case IrCmd::LOAD_TAG:
|
|
|
|
if (uint8_t tag = state.tryGetTag(inst.a); tag != 0xff)
|
|
|
|
substitute(function, inst, build.constTag(tag));
|
|
|
|
else if (inst.a.kind == IrOpKind::VmReg)
|
|
|
|
state.createRegLink(index, inst.a);
|
|
|
|
break;
|
|
|
|
case IrCmd::LOAD_POINTER:
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
2023-04-28 20:55:13 +01:00
|
|
|
state.substituteOrRecordVmRegLoad(inst);
|
2023-02-24 21:49:38 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::LOAD_DOUBLE:
|
2023-07-07 21:10:48 +01:00
|
|
|
{
|
|
|
|
IrOp value = state.tryGetValue(inst.a);
|
|
|
|
|
|
|
|
if (function.asDoubleOp(value))
|
2023-02-24 21:49:38 +00:00
|
|
|
substitute(function, inst, value);
|
|
|
|
else if (inst.a.kind == IrOpKind::VmReg)
|
2023-04-28 20:55:13 +01:00
|
|
|
state.substituteOrRecordVmRegLoad(inst);
|
2023-02-24 21:49:38 +00:00
|
|
|
break;
|
2023-07-07 21:10:48 +01:00
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::LOAD_INT:
|
2023-07-07 21:10:48 +01:00
|
|
|
{
|
|
|
|
IrOp value = state.tryGetValue(inst.a);
|
|
|
|
|
|
|
|
if (function.asIntOp(value))
|
2023-02-24 21:49:38 +00:00
|
|
|
substitute(function, inst, value);
|
|
|
|
else if (inst.a.kind == IrOpKind::VmReg)
|
2023-04-28 20:55:13 +01:00
|
|
|
state.substituteOrRecordVmRegLoad(inst);
|
2023-02-24 21:49:38 +00:00
|
|
|
break;
|
2023-07-07 21:10:48 +01:00
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::LOAD_TVALUE:
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
2023-04-28 20:55:13 +01:00
|
|
|
state.substituteOrRecordVmRegLoad(inst);
|
2023-02-24 21:49:38 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::STORE_TAG:
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
const IrOp source = inst.a;
|
2023-08-18 19:15:41 +01:00
|
|
|
|
|
|
|
IrCmd activeLoadCmd = IrCmd::NOP;
|
|
|
|
uint32_t activeLoadValue = kInvalidInstIdx;
|
2023-04-28 20:55:13 +01:00
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
uint8_t value = function.tagOp(inst.b);
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
// STORE_TAG usually follows a store of the value, but it also bumps the version of the whole register
|
2023-08-18 19:15:41 +01:00
|
|
|
// To be able to propagate STORE_*** into LOAD_***, we find active LOAD_*** value and recreate it with updated version
|
2023-04-28 20:55:13 +01:00
|
|
|
// Register in this optimization cannot be captured to avoid complications in lowering (IrValueLocationTracking doesn't model it)
|
2023-08-18 19:15:41 +01:00
|
|
|
std::tie(activeLoadCmd, activeLoadValue) = state.getPreviousVersionedLoadForTag(value, source);
|
2023-04-28 20:55:13 +01:00
|
|
|
|
|
|
|
if (state.tryGetTag(source) == value)
|
2023-06-02 20:52:15 +01:00
|
|
|
{
|
|
|
|
if (FFlag::DebugLuauAbortingChecks)
|
|
|
|
replace(function, block, index, {IrCmd::CHECK_TAG, inst.a, inst.b, build.undef()});
|
|
|
|
else
|
|
|
|
kill(function, inst);
|
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
else
|
2023-06-02 20:52:15 +01:00
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
state.saveTag(source, value);
|
2023-06-02 20:52:15 +01:00
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
state.invalidateTag(source);
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
2023-04-28 20:55:13 +01:00
|
|
|
|
2023-08-18 19:15:41 +01:00
|
|
|
// Future LOAD_*** instructions can re-use previous register version load
|
|
|
|
if (activeLoadValue != kInvalidInstIdx)
|
|
|
|
state.valueMap[state.versionedVmRegLoad(activeLoadCmd, source)] = activeLoadValue;
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::STORE_POINTER:
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
2023-04-28 20:55:13 +01:00
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
state.invalidateValue(inst.a);
|
2023-04-28 20:55:13 +01:00
|
|
|
state.forwardVmRegStoreToLoad(inst, IrCmd::LOAD_POINTER);
|
2023-08-04 20:18:54 +01:00
|
|
|
|
|
|
|
if (IrInst* instOp = function.asInstOp(inst.b); instOp && instOp->cmd == IrCmd::NEW_TABLE)
|
|
|
|
{
|
|
|
|
if (RegisterInfo* info = state.tryGetRegisterInfo(inst.a))
|
|
|
|
{
|
|
|
|
info->knownNotReadonly = true;
|
|
|
|
info->knownNoMetatable = true;
|
2023-08-18 19:15:41 +01:00
|
|
|
info->knownTableArraySize = function.uintOp(instOp->a);
|
2023-08-04 20:18:54 +01:00
|
|
|
}
|
|
|
|
}
|
2023-04-28 20:55:13 +01:00
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::STORE_DOUBLE:
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
|
|
|
{
|
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
2023-03-17 19:20:37 +00:00
|
|
|
if (state.tryGetValue(inst.a) == inst.b)
|
2023-02-24 21:49:38 +00:00
|
|
|
kill(function, inst);
|
|
|
|
else
|
|
|
|
state.saveValue(inst.a, inst.b);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
state.invalidateValue(inst.a);
|
2023-04-28 20:55:13 +01:00
|
|
|
state.forwardVmRegStoreToLoad(inst, IrCmd::LOAD_DOUBLE);
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::STORE_INT:
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
|
|
|
{
|
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
2023-03-17 19:20:37 +00:00
|
|
|
if (state.tryGetValue(inst.a) == inst.b)
|
2023-02-24 21:49:38 +00:00
|
|
|
kill(function, inst);
|
|
|
|
else
|
|
|
|
state.saveValue(inst.a, inst.b);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
state.invalidateValue(inst.a);
|
2023-04-28 20:55:13 +01:00
|
|
|
state.forwardVmRegStoreToLoad(inst, IrCmd::LOAD_INT);
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2023-04-07 22:01:29 +01:00
|
|
|
case IrCmd::STORE_VECTOR:
|
|
|
|
state.invalidateValue(inst.a);
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::STORE_TVALUE:
|
2023-08-18 19:15:41 +01:00
|
|
|
if (inst.a.kind == IrOpKind::VmReg || inst.a.kind == IrOpKind::Inst)
|
|
|
|
{
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
|
|
|
state.invalidate(inst.a);
|
|
|
|
|
|
|
|
uint8_t tag = state.tryGetTag(inst.b);
|
|
|
|
IrOp value = state.tryGetValue(inst.b);
|
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
|
|
|
{
|
|
|
|
if (tag != 0xff)
|
|
|
|
state.saveTag(inst.a, tag);
|
|
|
|
|
|
|
|
if (value.kind != IrOpKind::None)
|
|
|
|
state.saveValue(inst.a, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
IrCmd activeLoadCmd = IrCmd::NOP;
|
|
|
|
uint32_t activeLoadValue = kInvalidInstIdx;
|
|
|
|
|
|
|
|
if (tag != 0xff)
|
|
|
|
{
|
|
|
|
// If we know the tag, try to extract the value from a register used by LOAD_TVALUE
|
|
|
|
if (IrInst* arg = function.asInstOp(inst.b); arg && arg->cmd == IrCmd::LOAD_TVALUE && arg->a.kind == IrOpKind::VmReg)
|
|
|
|
{
|
|
|
|
std::tie(activeLoadCmd, activeLoadValue) = state.getPreviousVersionedLoadForTag(tag, arg->a);
|
|
|
|
|
|
|
|
if (activeLoadValue != kInvalidInstIdx)
|
|
|
|
value = IrOp{IrOpKind::Inst, activeLoadValue};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have constant tag and value, replace TValue store with tag/value pair store
|
|
|
|
if (tag != 0xff && value.kind != IrOpKind::None && (tag == LUA_TBOOLEAN || tag == LUA_TNUMBER || isGCO(tag)))
|
|
|
|
{
|
|
|
|
replace(function, block, index, {IrCmd::STORE_SPLIT_TVALUE, inst.a, build.constTag(tag), value, inst.c});
|
|
|
|
|
|
|
|
// Value can be propagated to future loads of the same register
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg && activeLoadValue != kInvalidInstIdx)
|
|
|
|
state.valueMap[state.versionedVmRegLoad(activeLoadCmd, inst.a)] = activeLoadValue;
|
|
|
|
}
|
|
|
|
else if (inst.a.kind == IrOpKind::VmReg)
|
|
|
|
{
|
|
|
|
state.forwardVmRegStoreToLoad(inst, IrCmd::LOAD_TVALUE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::STORE_SPLIT_TVALUE:
|
2023-02-24 21:49:38 +00:00
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
|
|
|
{
|
|
|
|
state.invalidate(inst.a);
|
|
|
|
|
2023-08-18 19:15:41 +01:00
|
|
|
state.saveTag(inst.a, function.tagOp(inst.b));
|
2023-04-28 20:55:13 +01:00
|
|
|
|
2023-08-18 19:15:41 +01:00
|
|
|
if (inst.c.kind == IrOpKind::Constant)
|
|
|
|
state.saveValue(inst.a, inst.c);
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::JUMP_IF_TRUTHY:
|
|
|
|
if (uint8_t tag = state.tryGetTag(inst.a); tag != 0xff)
|
|
|
|
{
|
|
|
|
if (tag == LUA_TNIL)
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.c});
|
|
|
|
else if (tag != LUA_TBOOLEAN)
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.b});
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::JUMP_IF_FALSY:
|
|
|
|
if (uint8_t tag = state.tryGetTag(inst.a); tag != 0xff)
|
|
|
|
{
|
|
|
|
if (tag == LUA_TNIL)
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.b});
|
|
|
|
else if (tag != LUA_TBOOLEAN)
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.c});
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::JUMP_EQ_TAG:
|
|
|
|
{
|
|
|
|
uint8_t tagA = inst.a.kind == IrOpKind::Constant ? function.tagOp(inst.a) : state.tryGetTag(inst.a);
|
|
|
|
uint8_t tagB = inst.b.kind == IrOpKind::Constant ? function.tagOp(inst.b) : state.tryGetTag(inst.b);
|
|
|
|
|
|
|
|
if (tagA != 0xff && tagB != 0xff)
|
|
|
|
{
|
|
|
|
if (tagA == tagB)
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.c});
|
|
|
|
else
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.d});
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::JUMP_EQ_INT:
|
|
|
|
{
|
|
|
|
std::optional<int> valueA = function.asIntOp(inst.a.kind == IrOpKind::Constant ? inst.a : state.tryGetValue(inst.a));
|
|
|
|
std::optional<int> valueB = function.asIntOp(inst.b.kind == IrOpKind::Constant ? inst.b : state.tryGetValue(inst.b));
|
|
|
|
|
|
|
|
if (valueA && valueB)
|
|
|
|
{
|
|
|
|
if (*valueA == *valueB)
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.c});
|
|
|
|
else
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.d});
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2023-04-21 23:14:26 +01:00
|
|
|
case IrCmd::JUMP_LT_INT:
|
|
|
|
{
|
|
|
|
std::optional<int> valueA = function.asIntOp(inst.a.kind == IrOpKind::Constant ? inst.a : state.tryGetValue(inst.a));
|
|
|
|
std::optional<int> valueB = function.asIntOp(inst.b.kind == IrOpKind::Constant ? inst.b : state.tryGetValue(inst.b));
|
|
|
|
|
|
|
|
if (valueA && valueB)
|
|
|
|
{
|
|
|
|
if (*valueA < *valueB)
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.c});
|
|
|
|
else
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.d});
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::JUMP_GE_UINT:
|
|
|
|
{
|
|
|
|
std::optional<unsigned> valueA = function.asUintOp(inst.a.kind == IrOpKind::Constant ? inst.a : state.tryGetValue(inst.a));
|
|
|
|
std::optional<unsigned> valueB = function.asUintOp(inst.b.kind == IrOpKind::Constant ? inst.b : state.tryGetValue(inst.b));
|
|
|
|
|
|
|
|
if (valueA && valueB)
|
|
|
|
{
|
|
|
|
if (*valueA >= *valueB)
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.c});
|
|
|
|
else
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.d});
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::JUMP_CMP_NUM:
|
|
|
|
{
|
|
|
|
std::optional<double> valueA = function.asDoubleOp(inst.a.kind == IrOpKind::Constant ? inst.a : state.tryGetValue(inst.a));
|
|
|
|
std::optional<double> valueB = function.asDoubleOp(inst.b.kind == IrOpKind::Constant ? inst.b : state.tryGetValue(inst.b));
|
|
|
|
|
|
|
|
if (valueA && valueB)
|
|
|
|
{
|
2023-03-24 18:03:04 +00:00
|
|
|
if (compare(*valueA, *valueB, conditionOp(inst.c)))
|
2023-02-24 21:49:38 +00:00
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.d});
|
|
|
|
else
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.e});
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::GET_UPVALUE:
|
|
|
|
state.invalidate(inst.a);
|
|
|
|
break;
|
2023-08-18 19:15:41 +01:00
|
|
|
case IrCmd::SET_UPVALUE:
|
|
|
|
if (inst.b.kind == IrOpKind::VmReg)
|
|
|
|
{
|
|
|
|
if (uint8_t tag = state.tryGetTag(inst.b); tag != 0xff)
|
|
|
|
{
|
|
|
|
replace(function, inst.c, build.constTag(tag));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::CHECK_TAG:
|
|
|
|
{
|
|
|
|
uint8_t b = function.tagOp(inst.b);
|
|
|
|
|
|
|
|
if (uint8_t tag = state.tryGetTag(inst.a); tag != 0xff)
|
|
|
|
{
|
|
|
|
if (tag == b)
|
2023-05-25 22:36:34 +01:00
|
|
|
{
|
|
|
|
if (FFlag::DebugLuauAbortingChecks)
|
|
|
|
replace(function, inst.c, build.undef());
|
|
|
|
else
|
|
|
|
kill(function, inst);
|
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
else
|
2023-05-25 22:36:34 +01:00
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.c}); // Shows a conflict in assumptions on this path
|
2023-05-25 22:36:34 +01:00
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-05-25 22:36:34 +01:00
|
|
|
state.updateTag(inst.a, b); // We can assume the tag value going forward
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2023-08-04 20:18:54 +01:00
|
|
|
case IrCmd::CHECK_TRUTHY:
|
|
|
|
// It is possible to check if current tag in state is truthy or not, but this case almost never comes up
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::CHECK_READONLY:
|
|
|
|
if (RegisterInfo* info = state.tryGetRegisterInfo(inst.a))
|
|
|
|
{
|
|
|
|
if (info->knownNotReadonly)
|
2023-05-25 22:36:34 +01:00
|
|
|
{
|
|
|
|
if (FFlag::DebugLuauAbortingChecks)
|
|
|
|
replace(function, inst.b, build.undef());
|
|
|
|
else
|
|
|
|
kill(function, inst);
|
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
else
|
2023-05-25 22:36:34 +01:00
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
info->knownNotReadonly = true;
|
2023-05-25 22:36:34 +01:00
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::CHECK_NO_METATABLE:
|
|
|
|
if (RegisterInfo* info = state.tryGetRegisterInfo(inst.a))
|
|
|
|
{
|
|
|
|
if (info->knownNoMetatable)
|
2023-05-25 22:36:34 +01:00
|
|
|
{
|
|
|
|
if (FFlag::DebugLuauAbortingChecks)
|
|
|
|
replace(function, inst.b, build.undef());
|
|
|
|
else
|
|
|
|
kill(function, inst);
|
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
else
|
2023-05-25 22:36:34 +01:00
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
info->knownNoMetatable = true;
|
2023-05-25 22:36:34 +01:00
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::CHECK_SAFE_ENV:
|
|
|
|
if (state.inSafeEnv)
|
2023-05-25 22:36:34 +01:00
|
|
|
{
|
|
|
|
if (FFlag::DebugLuauAbortingChecks)
|
|
|
|
replace(function, inst.a, build.undef());
|
|
|
|
else
|
|
|
|
kill(function, inst);
|
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
else
|
2023-05-25 22:36:34 +01:00
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
state.inSafeEnv = true;
|
2023-05-25 22:36:34 +01:00
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::CHECK_GC:
|
|
|
|
// It is enough to perform a GC check once in a block
|
|
|
|
if (state.checkedGc)
|
|
|
|
kill(function, inst);
|
|
|
|
else
|
|
|
|
state.checkedGc = true;
|
|
|
|
break;
|
|
|
|
case IrCmd::BARRIER_OBJ:
|
|
|
|
case IrCmd::BARRIER_TABLE_FORWARD:
|
|
|
|
if (inst.b.kind == IrOpKind::VmReg)
|
|
|
|
{
|
|
|
|
if (uint8_t tag = state.tryGetTag(inst.b); tag != 0xff)
|
|
|
|
{
|
|
|
|
// If the written object is not collectable, barrier is not required
|
|
|
|
if (!isGCO(tag))
|
|
|
|
kill(function, inst);
|
2023-06-24 07:19:39 +01:00
|
|
|
else
|
|
|
|
replace(function, inst.c, build.constTag(tag));
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2023-04-14 19:06:22 +01:00
|
|
|
|
|
|
|
// TODO: FASTCALL is more restrictive than INVOKE_FASTCALL; we should either determine the exact semantics, or rework it
|
2023-03-03 20:21:14 +00:00
|
|
|
case IrCmd::FASTCALL:
|
|
|
|
case IrCmd::INVOKE_FASTCALL:
|
2023-04-07 22:01:29 +01:00
|
|
|
handleBuiltinEffects(state, LuauBuiltinFunction(function.uintOp(inst.a)), vmRegOp(inst.b), function.intOp(inst.f));
|
2023-03-03 20:21:14 +00:00
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
|
|
|
|
// These instructions don't have an effect on register/memory state we are tracking
|
|
|
|
case IrCmd::NOP:
|
|
|
|
case IrCmd::LOAD_ENV:
|
|
|
|
case IrCmd::GET_ARR_ADDR:
|
|
|
|
case IrCmd::GET_SLOT_NODE_ADDR:
|
2023-03-17 19:20:37 +00:00
|
|
|
case IrCmd::GET_HASH_NODE_ADDR:
|
2023-07-28 16:13:53 +01:00
|
|
|
case IrCmd::GET_CLOSURE_UPVAL_ADDR:
|
2023-04-28 20:55:13 +01:00
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::ADD_INT:
|
|
|
|
case IrCmd::SUB_INT:
|
|
|
|
case IrCmd::ADD_NUM:
|
|
|
|
case IrCmd::SUB_NUM:
|
|
|
|
case IrCmd::MUL_NUM:
|
|
|
|
case IrCmd::DIV_NUM:
|
|
|
|
case IrCmd::MOD_NUM:
|
2023-03-03 20:21:14 +00:00
|
|
|
case IrCmd::MIN_NUM:
|
|
|
|
case IrCmd::MAX_NUM:
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::UNM_NUM:
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::FLOOR_NUM:
|
|
|
|
case IrCmd::CEIL_NUM:
|
|
|
|
case IrCmd::ROUND_NUM:
|
|
|
|
case IrCmd::SQRT_NUM:
|
|
|
|
case IrCmd::ABS_NUM:
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::NOT_ANY:
|
2023-04-28 20:55:13 +01:00
|
|
|
state.substituteOrRecord(inst, index);
|
|
|
|
break;
|
2023-08-04 20:18:54 +01:00
|
|
|
case IrCmd::CMP_ANY:
|
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::JUMP:
|
|
|
|
case IrCmd::JUMP_EQ_POINTER:
|
2023-03-17 19:20:37 +00:00
|
|
|
case IrCmd::JUMP_SLOT_MATCH:
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::TABLE_LEN:
|
2023-07-07 21:10:48 +01:00
|
|
|
case IrCmd::STRING_LEN:
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::NEW_TABLE:
|
|
|
|
case IrCmd::DUP_TABLE:
|
2023-03-17 19:20:37 +00:00
|
|
|
case IrCmd::TRY_NUM_TO_INDEX:
|
|
|
|
case IrCmd::TRY_CALL_FASTGETTM:
|
2023-05-19 20:37:30 +01:00
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::INT_TO_NUM:
|
2023-04-21 23:14:26 +01:00
|
|
|
case IrCmd::UINT_TO_NUM:
|
2023-05-19 20:37:30 +01:00
|
|
|
state.substituteOrRecord(inst, index);
|
|
|
|
break;
|
2023-04-21 23:14:26 +01:00
|
|
|
case IrCmd::NUM_TO_INT:
|
2023-05-19 20:37:30 +01:00
|
|
|
if (IrInst* src = function.asInstOp(inst.a); src && src->cmd == IrCmd::INT_TO_NUM)
|
|
|
|
substitute(function, inst, src->a);
|
|
|
|
else
|
|
|
|
state.substituteOrRecord(inst, index);
|
|
|
|
break;
|
2023-04-21 23:14:26 +01:00
|
|
|
case IrCmd::NUM_TO_UINT:
|
2023-05-19 20:37:30 +01:00
|
|
|
if (IrInst* src = function.asInstOp(inst.a); src && src->cmd == IrCmd::UINT_TO_NUM)
|
|
|
|
substitute(function, inst, src->a);
|
|
|
|
else
|
|
|
|
state.substituteOrRecord(inst, index);
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::CHECK_ARRAY_SIZE:
|
2023-08-18 19:15:41 +01:00
|
|
|
{
|
|
|
|
std::optional<int> arrayIndex = function.asIntOp(inst.b.kind == IrOpKind::Constant ? inst.b : state.tryGetValue(inst.b));
|
|
|
|
|
|
|
|
if (RegisterInfo* info = state.tryGetRegisterInfo(inst.a); info && arrayIndex)
|
|
|
|
{
|
|
|
|
if (info->knownTableArraySize >= 0)
|
|
|
|
{
|
|
|
|
if (unsigned(*arrayIndex) < unsigned(info->knownTableArraySize))
|
|
|
|
{
|
|
|
|
if (FFlag::DebugLuauAbortingChecks)
|
|
|
|
replace(function, inst.c, build.undef());
|
|
|
|
else
|
|
|
|
kill(function, inst);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
replace(function, block, index, {IrCmd::JUMP, inst.c});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::CHECK_SLOT_MATCH:
|
2023-03-17 19:20:37 +00:00
|
|
|
case IrCmd::CHECK_NODE_NO_NEXT:
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::BARRIER_TABLE_BACK:
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::RETURN:
|
|
|
|
case IrCmd::COVERAGE:
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::SET_SAVEDPC: // TODO: we may be able to remove some updates to PC
|
|
|
|
case IrCmd::CLOSE_UPVALS: // Doesn't change memory that we track
|
|
|
|
case IrCmd::CAPTURE:
|
|
|
|
case IrCmd::SUBSTITUTE:
|
|
|
|
case IrCmd::ADJUST_STACK_TO_REG: // Changes stack top, but not the values
|
|
|
|
case IrCmd::ADJUST_STACK_TO_TOP: // Changes stack top, but not the values
|
2023-03-03 20:21:14 +00:00
|
|
|
case IrCmd::CHECK_FASTCALL_RES: // Changes stack top, but not the values
|
2023-04-21 23:14:26 +01:00
|
|
|
case IrCmd::BITAND_UINT:
|
|
|
|
case IrCmd::BITXOR_UINT:
|
|
|
|
case IrCmd::BITOR_UINT:
|
|
|
|
case IrCmd::BITNOT_UINT:
|
2023-07-28 16:13:53 +01:00
|
|
|
break;
|
2023-04-21 23:14:26 +01:00
|
|
|
case IrCmd::BITLSHIFT_UINT:
|
|
|
|
case IrCmd::BITRSHIFT_UINT:
|
|
|
|
case IrCmd::BITARSHIFT_UINT:
|
|
|
|
case IrCmd::BITRROTATE_UINT:
|
|
|
|
case IrCmd::BITLROTATE_UINT:
|
|
|
|
case IrCmd::BITCOUNTLZ_UINT:
|
|
|
|
case IrCmd::BITCOUNTRZ_UINT:
|
|
|
|
case IrCmd::INVOKE_LIBM:
|
2023-06-24 07:19:39 +01:00
|
|
|
case IrCmd::GET_TYPE:
|
|
|
|
case IrCmd::GET_TYPEOF:
|
2023-07-28 16:13:53 +01:00
|
|
|
case IrCmd::FINDUPVAL:
|
2023-02-24 21:49:38 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case IrCmd::DO_ARITH:
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidate(inst.a);
|
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::DO_LEN:
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidate(inst.a);
|
|
|
|
state.invalidateUserCall(); // TODO: if argument is a string, there will be no user call
|
|
|
|
|
|
|
|
state.saveTag(inst.a, LUA_TNUMBER);
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::GET_TABLE:
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidate(inst.a);
|
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::SET_TABLE:
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::GET_IMPORT:
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidate(inst.a);
|
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::CONCAT:
|
2023-04-07 22:01:29 +01:00
|
|
|
state.invalidateRegisterRange(vmRegOp(inst.a), function.uintOp(inst.b));
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidateUserCall(); // TODO: if only strings and numbers are concatenated, there will be no user calls
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::PREPARE_FORN:
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidateValue(inst.a);
|
|
|
|
state.saveTag(inst.a, LUA_TNUMBER);
|
|
|
|
state.invalidateValue(inst.b);
|
|
|
|
state.saveTag(inst.b, LUA_TNUMBER);
|
|
|
|
state.invalidateValue(inst.c);
|
|
|
|
state.saveTag(inst.c, LUA_TNUMBER);
|
|
|
|
break;
|
|
|
|
case IrCmd::INTERRUPT:
|
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-04-28 20:55:13 +01:00
|
|
|
case IrCmd::SETLIST:
|
|
|
|
state.valueMap.clear(); // TODO: this can be relaxed when x64 emitInstSetList becomes aware of register allocator
|
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::CALL:
|
2023-04-07 22:01:29 +01:00
|
|
|
state.invalidateRegistersFrom(vmRegOp(inst.a));
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidateUserCall();
|
2023-04-28 20:55:13 +01:00
|
|
|
|
|
|
|
// We cannot guarantee right now that all live values can be remeterialized from non-stack memory locations
|
|
|
|
// To prevent earlier values from being propagated to after the call, we have to clear the map
|
|
|
|
// TODO: remove only the values that don't have a guaranteed restore location
|
|
|
|
state.valueMap.clear();
|
2023-03-24 18:03:04 +00:00
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::FORGLOOP:
|
2023-04-07 22:01:29 +01:00
|
|
|
state.invalidateRegistersFrom(vmRegOp(inst.a) + 2); // Rn and Rn+1 are not modified
|
2023-04-28 20:55:13 +01:00
|
|
|
state.valueMap.clear(); // TODO: this can be relaxed when x64 emitInstForGLoop becomes aware of register allocator
|
2023-03-24 18:03:04 +00:00
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::FORGLOOP_FALLBACK:
|
2023-04-07 22:01:29 +01:00
|
|
|
state.invalidateRegistersFrom(vmRegOp(inst.a) + 2); // Rn and Rn+1 are not modified
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::FORGPREP_XNEXT_FALLBACK:
|
2023-03-24 18:03:04 +00:00
|
|
|
// This fallback only conditionally throws an exception
|
|
|
|
break;
|
2023-04-28 20:55:13 +01:00
|
|
|
|
|
|
|
// Full fallback instructions
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::FALLBACK_GETGLOBAL:
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidate(inst.b);
|
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::FALLBACK_SETGLOBAL:
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::FALLBACK_GETTABLEKS:
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidate(inst.b);
|
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::FALLBACK_SETTABLEKS:
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::FALLBACK_NAMECALL:
|
2023-04-07 22:01:29 +01:00
|
|
|
state.invalidate(IrOp{inst.b.kind, vmRegOp(inst.b) + 0u});
|
|
|
|
state.invalidate(IrOp{inst.b.kind, vmRegOp(inst.b) + 1u});
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidateUserCall();
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::FALLBACK_PREPVARARGS:
|
2023-03-24 18:03:04 +00:00
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::FALLBACK_GETVARARGS:
|
2023-04-28 20:55:13 +01:00
|
|
|
state.invalidateRegisterRange(vmRegOp(inst.b), function.intOp(inst.c));
|
2023-03-24 18:03:04 +00:00
|
|
|
break;
|
2023-07-28 16:13:53 +01:00
|
|
|
case IrCmd::NEWCLOSURE:
|
2023-03-24 18:03:04 +00:00
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::FALLBACK_DUPCLOSURE:
|
2023-03-24 18:03:04 +00:00
|
|
|
state.invalidate(inst.b);
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::FALLBACK_FORGPREP:
|
2023-04-07 22:01:29 +01:00
|
|
|
state.invalidate(IrOp{inst.b.kind, vmRegOp(inst.b) + 0u});
|
|
|
|
state.invalidate(IrOp{inst.b.kind, vmRegOp(inst.b) + 1u});
|
|
|
|
state.invalidate(IrOp{inst.b.kind, vmRegOp(inst.b) + 2u});
|
2023-02-24 21:49:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void constPropInBlock(IrBuilder& build, IrBlock& block, ConstPropState& state)
|
|
|
|
{
|
|
|
|
IrFunction& function = build.function;
|
|
|
|
|
|
|
|
for (uint32_t index = block.start; index <= block.finish; index++)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(index < function.instructions.size());
|
|
|
|
IrInst& inst = function.instructions[index];
|
|
|
|
|
|
|
|
applySubstitutions(function, inst);
|
|
|
|
|
|
|
|
foldConstants(build, function, block, index);
|
|
|
|
|
|
|
|
constPropInInst(state, build, function, block, inst, index);
|
|
|
|
}
|
2023-04-28 20:55:13 +01:00
|
|
|
|
|
|
|
// Value numbering and load/store propagation is not performed between blocks
|
|
|
|
state.valueMap.clear();
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
|
2023-05-05 22:52:49 +01:00
|
|
|
static void constPropInBlockChain(IrBuilder& build, std::vector<uint8_t>& visited, IrBlock* block, ConstPropState& state)
|
2023-02-24 21:49:38 +00:00
|
|
|
{
|
|
|
|
IrFunction& function = build.function;
|
|
|
|
|
2023-05-05 22:52:49 +01:00
|
|
|
state.clear();
|
2023-02-24 21:49:38 +00:00
|
|
|
|
|
|
|
while (block)
|
|
|
|
{
|
|
|
|
uint32_t blockIdx = function.getBlockIndex(*block);
|
|
|
|
LUAU_ASSERT(!visited[blockIdx]);
|
|
|
|
visited[blockIdx] = true;
|
|
|
|
|
|
|
|
constPropInBlock(build, *block, state);
|
|
|
|
|
|
|
|
IrInst& termInst = function.instructions[block->finish];
|
|
|
|
|
|
|
|
IrBlock* nextBlock = nullptr;
|
|
|
|
|
|
|
|
// Unconditional jump into a block with a single user (current block) allows us to continue optimization
|
|
|
|
// with the information we have gathered so far (unless we have already visited that block earlier)
|
2023-07-14 19:08:53 +01:00
|
|
|
if (termInst.cmd == IrCmd::JUMP && termInst.a.kind != IrOpKind::VmExit)
|
2023-02-24 21:49:38 +00:00
|
|
|
{
|
|
|
|
IrBlock& target = function.blockOp(termInst.a);
|
2023-03-03 20:21:14 +00:00
|
|
|
uint32_t targetIdx = function.getBlockIndex(target);
|
2023-02-24 21:49:38 +00:00
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
if (target.useCount == 1 && !visited[targetIdx] && target.kind != IrBlockKind::Fallback)
|
2023-02-24 21:49:38 +00:00
|
|
|
nextBlock = ⌖
|
|
|
|
}
|
|
|
|
|
|
|
|
block = nextBlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
// Note that blocks in the collected path are marked as visited
|
|
|
|
static std::vector<uint32_t> collectDirectBlockJumpPath(IrFunction& function, std::vector<uint8_t>& visited, IrBlock* block)
|
|
|
|
{
|
|
|
|
// Path shouldn't be built starting with a block that has 'live out' values.
|
|
|
|
// One theoretical way to get it is if we had 'block' jumping unconditionally into a successor that uses values from 'block'
|
|
|
|
// * if the successor has only one use, the starting conditions of 'tryCreateLinearBlock' would prevent this
|
|
|
|
// * if the successor has multiple uses, it can't have such 'live in' values without phi nodes that we don't have yet
|
|
|
|
// Another possibility is to have two paths from 'block' into the target through two intermediate blocks
|
|
|
|
// Usually that would mean that we would have a conditional jump at the end of 'block'
|
2023-03-17 19:20:37 +00:00
|
|
|
// But using check guards and fallback blocks it becomes a possible setup
|
2023-03-03 20:21:14 +00:00
|
|
|
// We avoid this by making sure fallbacks rejoin the other immediate successor of 'block'
|
|
|
|
LUAU_ASSERT(getLiveOutValueCount(function, *block) == 0);
|
|
|
|
|
|
|
|
std::vector<uint32_t> path;
|
|
|
|
|
|
|
|
while (block)
|
|
|
|
{
|
|
|
|
IrInst& termInst = function.instructions[block->finish];
|
|
|
|
IrBlock* nextBlock = nullptr;
|
|
|
|
|
|
|
|
// A chain is made from internal blocks that were not a part of bytecode CFG
|
2023-07-14 19:08:53 +01:00
|
|
|
if (termInst.cmd == IrCmd::JUMP && termInst.a.kind != IrOpKind::VmExit)
|
2023-03-03 20:21:14 +00:00
|
|
|
{
|
|
|
|
IrBlock& target = function.blockOp(termInst.a);
|
|
|
|
uint32_t targetIdx = function.getBlockIndex(target);
|
|
|
|
|
|
|
|
if (!visited[targetIdx] && target.kind == IrBlockKind::Internal)
|
|
|
|
{
|
|
|
|
// Additional restriction is that to join a block, it cannot produce values that are used in other blocks
|
|
|
|
// And it also can't use values produced in other blocks
|
|
|
|
auto [liveIns, liveOuts] = getLiveInOutValueCount(function, target);
|
|
|
|
|
|
|
|
if (liveIns == 0 && liveOuts == 0)
|
|
|
|
{
|
|
|
|
visited[targetIdx] = true;
|
|
|
|
path.push_back(targetIdx);
|
|
|
|
|
|
|
|
nextBlock = ⌖
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
block = nextBlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
return path;
|
|
|
|
}
|
|
|
|
|
2023-05-05 22:52:49 +01:00
|
|
|
static void tryCreateLinearBlock(IrBuilder& build, std::vector<uint8_t>& visited, IrBlock& startingBlock, ConstPropState& state)
|
2023-03-03 20:21:14 +00:00
|
|
|
{
|
|
|
|
IrFunction& function = build.function;
|
|
|
|
|
|
|
|
uint32_t blockIdx = function.getBlockIndex(startingBlock);
|
|
|
|
LUAU_ASSERT(!visited[blockIdx]);
|
|
|
|
visited[blockIdx] = true;
|
|
|
|
|
|
|
|
IrInst& termInst = function.instructions[startingBlock.finish];
|
|
|
|
|
|
|
|
// Block has to end with an unconditional jump
|
|
|
|
if (termInst.cmd != IrCmd::JUMP)
|
|
|
|
return;
|
|
|
|
|
2023-07-14 19:08:53 +01:00
|
|
|
// And it can't be jump to a VM exit
|
|
|
|
if (termInst.a.kind == IrOpKind::VmExit)
|
|
|
|
return;
|
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
// And it has to jump to a block with more than one user
|
|
|
|
// If there's only one use, it should already be optimized by constPropInBlockChain
|
|
|
|
if (function.blockOp(termInst.a).useCount == 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
uint32_t targetBlockIdx = termInst.a.index;
|
|
|
|
|
|
|
|
// Check if this path is worth it (and it will also mark path blocks as visited)
|
|
|
|
std::vector<uint32_t> path = collectDirectBlockJumpPath(function, visited, &startingBlock);
|
|
|
|
|
|
|
|
// If path is too small, we are not going to linearize it
|
|
|
|
if (int(path.size()) < FInt::LuauCodeGenMinLinearBlockPath)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Initialize state with the knowledge of our current block
|
2023-05-05 22:52:49 +01:00
|
|
|
state.clear();
|
|
|
|
|
|
|
|
// TODO: using values from the first block can cause 'live out' of the linear block predecessor to not have all required registers
|
2023-03-03 20:21:14 +00:00
|
|
|
constPropInBlock(build, startingBlock, state);
|
|
|
|
|
2023-06-16 18:35:18 +01:00
|
|
|
// Verify that target hasn't changed
|
2023-03-03 20:21:14 +00:00
|
|
|
LUAU_ASSERT(function.instructions[startingBlock.finish].a.index == targetBlockIdx);
|
|
|
|
|
2023-06-16 18:35:18 +01:00
|
|
|
// Note: using startingBlock after this line is unsafe as the reference may be reallocated by build.block() below
|
|
|
|
uint32_t startingInsn = startingBlock.start;
|
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
// Create new linearized block into which we are going to redirect starting block jump
|
|
|
|
IrOp newBlock = build.block(IrBlockKind::Linearized);
|
|
|
|
visited.push_back(false);
|
|
|
|
|
|
|
|
build.beginBlock(newBlock);
|
|
|
|
|
2023-07-14 19:08:53 +01:00
|
|
|
// By default, blocks are ordered according to start instruction; we alter sort order to make sure linearized block is placed right after the
|
|
|
|
// starting block
|
2023-06-16 18:35:18 +01:00
|
|
|
function.blocks[newBlock.index].sortkey = startingInsn + 1;
|
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
replace(function, termInst.a, newBlock);
|
|
|
|
|
2023-05-05 22:52:49 +01:00
|
|
|
// Clone the collected path into our fresh block
|
2023-03-03 20:21:14 +00:00
|
|
|
for (uint32_t pathBlockIdx : path)
|
|
|
|
build.clone(function.blocks[pathBlockIdx], /* removeCurrentTerminator */ true);
|
|
|
|
|
2023-05-05 22:52:49 +01:00
|
|
|
// If all live in/out data is defined aside from the new block, generate it
|
|
|
|
// Note that liveness information is not strictly correct after optimization passes and may need to be recomputed before next passes
|
|
|
|
// The information generated here is consistent with current state that could be outdated, but still useful in IR inspection
|
|
|
|
if (function.cfg.in.size() == newBlock.index)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(function.cfg.in.size() == function.cfg.out.size());
|
|
|
|
LUAU_ASSERT(function.cfg.in.size() == function.cfg.def.size());
|
|
|
|
|
|
|
|
// Live in is the same as the input of the original first block
|
|
|
|
function.cfg.in.push_back(function.cfg.in[path.front()]);
|
|
|
|
|
|
|
|
// Live out is the same as the result of the original last block
|
|
|
|
function.cfg.out.push_back(function.cfg.out[path.back()]);
|
|
|
|
|
|
|
|
// Defs are tricky, registers are joined together, but variadic sequences can be consumed inside the block
|
|
|
|
function.cfg.def.push_back({});
|
|
|
|
RegisterSet& def = function.cfg.def.back();
|
|
|
|
|
|
|
|
for (uint32_t pathBlockIdx : path)
|
|
|
|
{
|
|
|
|
const RegisterSet& pathDef = function.cfg.def[pathBlockIdx];
|
|
|
|
|
|
|
|
def.regs |= pathDef.regs;
|
|
|
|
|
|
|
|
// Taking only the last defined variadic sequence if it's not consumed before before the end
|
|
|
|
if (pathDef.varargSeq && function.cfg.out.back().varargSeq)
|
|
|
|
{
|
|
|
|
def.varargSeq = true;
|
|
|
|
def.varargStart = pathDef.varargStart;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
// Optimize our linear block
|
|
|
|
IrBlock& linearBlock = function.blockOp(newBlock);
|
|
|
|
constPropInBlock(build, linearBlock, state);
|
|
|
|
}
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
void constPropInBlockChains(IrBuilder& build, bool useValueNumbering)
|
2023-02-24 21:49:38 +00:00
|
|
|
{
|
|
|
|
IrFunction& function = build.function;
|
|
|
|
|
2023-05-05 22:52:49 +01:00
|
|
|
ConstPropState state{function};
|
|
|
|
state.useValueNumbering = useValueNumbering;
|
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
std::vector<uint8_t> visited(function.blocks.size(), false);
|
|
|
|
|
|
|
|
for (IrBlock& block : function.blocks)
|
|
|
|
{
|
|
|
|
if (block.kind == IrBlockKind::Fallback || block.kind == IrBlockKind::Dead)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (visited[function.getBlockIndex(block)])
|
|
|
|
continue;
|
|
|
|
|
2023-05-05 22:52:49 +01:00
|
|
|
constPropInBlockChain(build, visited, &block, state);
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
2023-04-21 23:14:26 +01:00
|
|
|
}
|
2023-03-03 20:21:14 +00:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
void createLinearBlocks(IrBuilder& build, bool useValueNumbering)
|
2023-04-21 23:14:26 +01:00
|
|
|
{
|
|
|
|
// Go through internal block chains and outline them into a single new block.
|
2023-03-03 20:21:14 +00:00
|
|
|
// Outlining will be able to linearize the execution, even if there was a jump to a block with multiple users,
|
|
|
|
// new 'block' will only be reachable from a single one and all gathered information can be preserved.
|
2023-04-21 23:14:26 +01:00
|
|
|
IrFunction& function = build.function;
|
|
|
|
|
2023-05-05 22:52:49 +01:00
|
|
|
ConstPropState state{function};
|
|
|
|
state.useValueNumbering = useValueNumbering;
|
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
std::vector<uint8_t> visited(function.blocks.size(), false);
|
2023-03-03 20:21:14 +00:00
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
// This loop can create new 'linear' blocks, so index-based loop has to be used (and it intentionally won't reach those new blocks)
|
2023-03-03 20:21:14 +00:00
|
|
|
size_t originalBlockCount = function.blocks.size();
|
|
|
|
for (size_t i = 0; i < originalBlockCount; i++)
|
|
|
|
{
|
|
|
|
IrBlock& block = function.blocks[i];
|
|
|
|
|
|
|
|
if (block.kind == IrBlockKind::Fallback || block.kind == IrBlockKind::Dead)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (visited[function.getBlockIndex(block)])
|
|
|
|
continue;
|
|
|
|
|
2023-05-05 22:52:49 +01:00
|
|
|
tryCreateLinearBlock(build, visited, block, state);
|
2023-03-03 20:21:14 +00:00
|
|
|
}
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace CodeGen
|
|
|
|
} // namespace Luau
|