mirror of
https://github.com/luau-lang/luau.git
synced 2025-04-05 11:20:54 +01:00
* Added a limit on how many instructions the Compiler can safely produce (reported by @TheGreatSageEqualToHeaven) C++ API Changes: * With work started on read-only and write-only properties, `Property::type` member variable has been replaced with `TypeId type()` and `setType(TypeId)` functions. * New `LazyType` unwrap callback now has a `void` return type, all that's required from the callback is to write into `unwrapped` field. In our work on the new type solver, the following issues were fixed: * Work has started to support https://github.com/Roblox/luau/pull/77 and https://github.com/Roblox/luau/pull/79 * Refinements are no longer applied on l-values, removing some false-positive errors * Improved overload resolution against expected result type * `Frontend::prepareModuleScope` now works in the new solver * Cofinite strings are now comparable And these are the changes in native code generation (JIT): * Fixed MIN_NUM and MAX_NUM constant fold when one of the arguments is NaN * Added constant folding for number conversions and bit operations * Value spilling and rematerialization is now supported on arm64 * Improved FASTCALL2K IR generation to support second argument constant * Added value numbering and load/store propagation optimizations * Added STORE_VECTOR on arm64, completing the IR lowering on this target
109 lines
3.3 KiB
C++
109 lines
3.3 KiB
C++
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
|
#include "Luau/OptimizeFinalX64.h"
|
|
|
|
#include "Luau/IrUtils.h"
|
|
|
|
#include <utility>
|
|
|
|
namespace Luau
|
|
{
|
|
namespace CodeGen
|
|
{
|
|
|
|
// x64 assembly allows memory operands, but IR separates loads from uses
|
|
// To improve final x64 lowering, we try to 'inline' single-use register/constant loads into some of our instructions
|
|
// This pass might not be useful on different architectures
|
|
static void optimizeMemoryOperandsX64(IrFunction& function, IrBlock& block)
|
|
{
|
|
LUAU_ASSERT(block.kind != IrBlockKind::Dead);
|
|
|
|
for (uint32_t index = block.start; index <= block.finish; index++)
|
|
{
|
|
LUAU_ASSERT(index < function.instructions.size());
|
|
IrInst& inst = function.instructions[index];
|
|
|
|
switch (inst.cmd)
|
|
{
|
|
case IrCmd::CHECK_TAG:
|
|
{
|
|
if (inst.a.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& tag = function.instOp(inst.a);
|
|
|
|
if (tag.useCount == 1 && tag.cmd == IrCmd::LOAD_TAG && (tag.a.kind == IrOpKind::VmReg || tag.a.kind == IrOpKind::VmConst))
|
|
replace(function, inst.a, tag.a);
|
|
}
|
|
break;
|
|
}
|
|
case IrCmd::ADD_NUM:
|
|
case IrCmd::SUB_NUM:
|
|
case IrCmd::MUL_NUM:
|
|
case IrCmd::DIV_NUM:
|
|
case IrCmd::MOD_NUM:
|
|
case IrCmd::MIN_NUM:
|
|
case IrCmd::MAX_NUM:
|
|
{
|
|
if (inst.b.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& rhs = function.instOp(inst.b);
|
|
|
|
if (rhs.useCount == 1 && rhs.cmd == IrCmd::LOAD_DOUBLE && (rhs.a.kind == IrOpKind::VmReg || rhs.a.kind == IrOpKind::VmConst))
|
|
replace(function, inst.b, rhs.a);
|
|
}
|
|
break;
|
|
}
|
|
case IrCmd::JUMP_EQ_TAG:
|
|
{
|
|
if (inst.a.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& tagA = function.instOp(inst.a);
|
|
|
|
if (tagA.useCount == 1 && tagA.cmd == IrCmd::LOAD_TAG && (tagA.a.kind == IrOpKind::VmReg || tagA.a.kind == IrOpKind::VmConst))
|
|
{
|
|
replace(function, inst.a, tagA.a);
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (inst.b.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& tagB = function.instOp(inst.b);
|
|
|
|
if (tagB.useCount == 1 && tagB.cmd == IrCmd::LOAD_TAG && (tagB.a.kind == IrOpKind::VmReg || tagB.a.kind == IrOpKind::VmConst))
|
|
{
|
|
std::swap(inst.a, inst.b);
|
|
replace(function, inst.a, tagB.a);
|
|
}
|
|
}
|
|
break;
|
|
}
|
|
case IrCmd::JUMP_CMP_NUM:
|
|
{
|
|
if (inst.a.kind == IrOpKind::Inst)
|
|
{
|
|
IrInst& num = function.instOp(inst.a);
|
|
|
|
if (num.useCount == 1 && num.cmd == IrCmd::LOAD_DOUBLE)
|
|
replace(function, inst.a, num.a);
|
|
}
|
|
break;
|
|
}
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
void optimizeMemoryOperandsX64(IrFunction& function)
|
|
{
|
|
for (IrBlock& block : function.blocks)
|
|
{
|
|
if (block.kind == IrBlockKind::Dead)
|
|
continue;
|
|
|
|
optimizeMemoryOperandsX64(function, block);
|
|
}
|
|
}
|
|
|
|
} // namespace CodeGen
|
|
} // namespace Luau
|