mirror of
https://github.com/luau-lang/luau.git
synced 2024-12-14 06:00:39 +00:00
0b2755f964
* Progress toward a diffing algorithm for types. We hope that this will be useful for writing clearer error messages. * Add a missing recursion limiter in `Unifier::tryUnifyTables`. This was causing a crash in certain situations. * Luau heap graph enumeration improvements: * Weak references are not reported * Added tag as a fallback name of non-string table links * Included top Luau function information in thread name to understand where thread might be suspended * Constant folding for `math.pi` and `math.huge` at -O2 * Optimize `string.format` and `%*` * This change makes string interpolation 1.5x-2x faster depending on the number and type of formatted components, assuming a few are using primitive types, and reduces associated GC pressure. New type checker: * Initial work toward tracking the upper and lower bounds of types accurately. Native code generation (JIT): * Add IrCmd::CHECK_TRUTHY for improved assert fast-calls * Do not compute type map for modules without types * Capture metatable+readonly state for NEW_TABLE IR instructions * Replace JUMP_CMP_ANY with CMP_ANY and existing JUMP_EQ_INT * Add support for exits to VM with reentry lock in VmExit --------- Co-authored-by: Arseny Kapoulkine <arseny.kapoulkine@gmail.com> Co-authored-by: Vyacheslav Egorov <vegorov@roblox.com>
128 lines
3.9 KiB
C++
128 lines
3.9 KiB
C++
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
|
#include "Luau/OptimizeFinalX64.h"
|
|
|
|
#include "Luau/IrUtils.h"
|
|
|
|
#include <utility>
|
|
|
|
namespace Luau
|
|
{
|
|
namespace CodeGen
|
|
{
|
|
|
|
// x64 assembly allows memory operands, but IR separates loads from uses
|
|
// To improve final x64 lowering, we try to 'inline' single-use register/constant loads into some of our instructions
|
|
// This pass might not be useful on different architectures
|
|
static void optimizeMemoryOperandsX64(IrFunction& function, IrBlock& block)
|
|
{
|
|
LUAU_ASSERT(block.kind != IrBlockKind::Dead);
|
|
|
|
for (uint32_t index = block.start; index <= block.finish; index++)
|
|
{
|
|
LUAU_ASSERT(index < function.instructions.size());
|
|
IrInst& inst = function.instructions[index];
|
|
|
|
switch (inst.cmd)
|
|
{
|
|
case IrCmd::CHECK_TAG:
|
|
{
|
|
if (inst.a.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& tag = function.instOp(inst.a);
|
|
|
|
if (tag.useCount == 1 && tag.cmd == IrCmd::LOAD_TAG && (tag.a.kind == IrOpKind::VmReg || tag.a.kind == IrOpKind::VmConst))
|
|
replace(function, inst.a, tag.a);
|
|
}
|
|
break;
|
|
}
|
|
case IrCmd::CHECK_TRUTHY:
|
|
{
|
|
if (inst.a.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& tag = function.instOp(inst.a);
|
|
|
|
if (tag.useCount == 1 && tag.cmd == IrCmd::LOAD_TAG && (tag.a.kind == IrOpKind::VmReg || tag.a.kind == IrOpKind::VmConst))
|
|
replace(function, inst.a, tag.a);
|
|
}
|
|
|
|
if (inst.b.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& value = function.instOp(inst.b);
|
|
|
|
if (value.useCount == 1 && value.cmd == IrCmd::LOAD_INT)
|
|
replace(function, inst.b, value.a);
|
|
}
|
|
break;
|
|
}
|
|
case IrCmd::ADD_NUM:
|
|
case IrCmd::SUB_NUM:
|
|
case IrCmd::MUL_NUM:
|
|
case IrCmd::DIV_NUM:
|
|
case IrCmd::MOD_NUM:
|
|
case IrCmd::MIN_NUM:
|
|
case IrCmd::MAX_NUM:
|
|
{
|
|
if (inst.b.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& rhs = function.instOp(inst.b);
|
|
|
|
if (rhs.useCount == 1 && rhs.cmd == IrCmd::LOAD_DOUBLE && (rhs.a.kind == IrOpKind::VmReg || rhs.a.kind == IrOpKind::VmConst))
|
|
replace(function, inst.b, rhs.a);
|
|
}
|
|
break;
|
|
}
|
|
case IrCmd::JUMP_EQ_TAG:
|
|
{
|
|
if (inst.a.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& tagA = function.instOp(inst.a);
|
|
|
|
if (tagA.useCount == 1 && tagA.cmd == IrCmd::LOAD_TAG && (tagA.a.kind == IrOpKind::VmReg || tagA.a.kind == IrOpKind::VmConst))
|
|
{
|
|
replace(function, inst.a, tagA.a);
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (inst.b.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& tagB = function.instOp(inst.b);
|
|
|
|
if (tagB.useCount == 1 && tagB.cmd == IrCmd::LOAD_TAG && (tagB.a.kind == IrOpKind::VmReg || tagB.a.kind == IrOpKind::VmConst))
|
|
{
|
|
std::swap(inst.a, inst.b);
|
|
replace(function, inst.a, tagB.a);
|
|
}
|
|
}
|
|
break;
|
|
}
|
|
case IrCmd::JUMP_CMP_NUM:
|
|
{
|
|
if (inst.a.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& num = function.instOp(inst.a);
|
|
|
|
if (num.useCount == 1 && num.cmd == IrCmd::LOAD_DOUBLE)
|
|
replace(function, inst.a, num.a);
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
void optimizeMemoryOperandsX64(IrFunction& function)
|
|
{
|
|
for (IrBlock& block : function.blocks)
|
|
{
|
|
if (block.kind == IrBlockKind::Dead)
|
|
continue;
|
|
|
|
optimizeMemoryOperandsX64(function, block);
|
|
}
|
|
}
|
|
|
|
} // namespace CodeGen
|
|
} // namespace Luau
|