2023-01-27 22:28:31 +00:00
|
|
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
|
|
|
#include "IrLoweringX64.h"
|
|
|
|
|
|
|
|
#include "Luau/DenseHash.h"
|
2023-08-11 15:42:37 +01:00
|
|
|
#include "Luau/IrData.h"
|
Sync to upstream/release/562 (#828)
* Fixed rare use-after-free in analysis during table unification
A lot of work these past months went into two new Luau components:
* A near full rewrite of the typechecker using a new deferred constraint
resolution system
* Native code generation for AoT/JiT compilation of VM bytecode into x64
(avx)/arm64 instructions
Both of these components are far from finished and we don't provide
documentation on building and using them at this point.
However, curious community members expressed interest in learning about
changes that go into these components each week, so we are now listing
them here in the 'sync' pull request descriptions.
---
New typechecker can be enabled by setting
DebugLuauDeferredConstraintResolution flag to 'true'.
It is considered unstable right now, so try it at your own risk.
Even though it already provides better type inference than the current
one in some cases, our main goal right now is to reach feature parity
with current typechecker.
Features which improve over the capabilities of the current typechecker
are marked as '(NEW)'.
Changes to new typechecker:
* Regular for loop index and parameters are now typechecked
* Invalid type annotations on local variables are ignored to improve
autocomplete
* Fixed missing autocomplete type suggestions for function arguments
* Type reduction is now performed to produce simpler types to be
presented to the user (error messages, custom LSPs)
* Internally, complex types like '((number | string) & ~(false?)) |
string' can be produced, which is just 'string | number' when simplified
* Fixed spots where support for unknown and never types was missing
* (NEW) Length operator '#' is now valid to use on top table type, this
type comes up when doing typeof(x) == "table" guards and isn't available
in current typechecker
---
Changes to native code generation:
* Additional math library fast calls are now lowered to x64: math.ldexp,
math.round, math.frexp, math.modf, math.sign and math.clamp
2023-02-03 19:26:13 +00:00
|
|
|
#include "Luau/IrUtils.h"
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
#include "Luau/IrCallWrapperX64.h"
|
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
#include "EmitBuiltinsX64.h"
|
2023-01-27 22:28:31 +00:00
|
|
|
#include "EmitCommonX64.h"
|
|
|
|
#include "EmitInstructionX64.h"
|
|
|
|
#include "NativeState.h"
|
|
|
|
|
|
|
|
#include "lstate.h"
|
2023-08-11 15:42:37 +01:00
|
|
|
#include "lgc.h"
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-11-17 18:46:18 +00:00
|
|
|
LUAU_FASTFLAG(LuauCodeGenFixByteLower)
|
|
|
|
|
2023-01-27 22:28:31 +00:00
|
|
|
namespace Luau
|
|
|
|
{
|
|
|
|
namespace CodeGen
|
|
|
|
{
|
2023-03-03 20:21:14 +00:00
|
|
|
namespace X64
|
|
|
|
{
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-09-15 18:26:59 +01:00
|
|
|
IrLoweringX64::IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, IrFunction& function, LoweringStats* stats)
|
2023-01-27 22:28:31 +00:00
|
|
|
: build(build)
|
|
|
|
, helpers(helpers)
|
|
|
|
, function(function)
|
2023-09-15 18:26:59 +01:00
|
|
|
, stats(stats)
|
|
|
|
, regs(build, function, stats)
|
2023-04-21 23:14:26 +01:00
|
|
|
, valueTracker(function)
|
2023-07-14 19:08:53 +01:00
|
|
|
, exitHandlerMap(~0u)
|
2023-01-27 22:28:31 +00:00
|
|
|
{
|
2023-04-21 23:14:26 +01:00
|
|
|
valueTracker.setRestoreCallack(®s, [](void* context, IrInst& inst) {
|
|
|
|
((IrRegAllocX64*)context)->restore(inst, false);
|
|
|
|
});
|
|
|
|
|
2023-04-14 19:06:22 +01:00
|
|
|
build.align(kFunctionAlignment, X64::AlignmentDataX64::Ud2);
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
void IrLoweringX64::storeDoubleAsFloat(OperandX64 dst, IrOp src)
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
|
|
|
|
|
|
|
if (src.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
build.vmovss(tmp.reg, build.f32(float(doubleOp(src))));
|
|
|
|
}
|
|
|
|
else if (src.kind == IrOpKind::Inst)
|
|
|
|
{
|
|
|
|
build.vcvtsd2ss(tmp.reg, regOp(src), regOp(src));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
}
|
|
|
|
build.vmovss(dst, tmp.reg);
|
|
|
|
}
|
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
2023-01-27 22:28:31 +00:00
|
|
|
{
|
2023-04-07 22:01:29 +01:00
|
|
|
regs.currInstIdx = index;
|
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
valueTracker.beforeInstLowering(inst);
|
|
|
|
|
2023-01-27 22:28:31 +00:00
|
|
|
switch (inst.cmd)
|
|
|
|
{
|
|
|
|
case IrCmd::LOAD_TAG:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocReg(SizeX64::dword, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
2023-03-24 18:03:04 +00:00
|
|
|
build.mov(inst.regX64, luauRegTag(vmRegOp(inst.a)));
|
2023-01-27 22:28:31 +00:00
|
|
|
else if (inst.a.kind == IrOpKind::VmConst)
|
2023-03-24 18:03:04 +00:00
|
|
|
build.mov(inst.regX64, luauConstantTag(vmConstOp(inst.a)));
|
2023-02-10 19:40:38 +00:00
|
|
|
// If we have a register, we assume it's a pointer to TValue
|
|
|
|
// We might introduce explicit operand types in the future to make this more robust
|
|
|
|
else if (inst.a.kind == IrOpKind::Inst)
|
|
|
|
build.mov(inst.regX64, dword[regOp(inst.a) + offsetof(TValue, tt)]);
|
2023-01-27 22:28:31 +00:00
|
|
|
else
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
break;
|
|
|
|
case IrCmd::LOAD_POINTER:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocReg(SizeX64::qword, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
2023-03-24 18:03:04 +00:00
|
|
|
build.mov(inst.regX64, luauRegValue(vmRegOp(inst.a)));
|
2023-01-27 22:28:31 +00:00
|
|
|
else if (inst.a.kind == IrOpKind::VmConst)
|
2023-03-24 18:03:04 +00:00
|
|
|
build.mov(inst.regX64, luauConstantValue(vmConstOp(inst.a)));
|
2023-03-17 19:20:37 +00:00
|
|
|
// If we have a register, we assume it's a pointer to TValue
|
|
|
|
// We might introduce explicit operand types in the future to make this more robust
|
|
|
|
else if (inst.a.kind == IrOpKind::Inst)
|
|
|
|
build.mov(inst.regX64, qword[regOp(inst.a) + offsetof(TValue, value)]);
|
2023-01-27 22:28:31 +00:00
|
|
|
else
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
break;
|
|
|
|
case IrCmd::LOAD_DOUBLE:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
2023-03-24 18:03:04 +00:00
|
|
|
build.vmovsd(inst.regX64, luauRegValue(vmRegOp(inst.a)));
|
2023-01-27 22:28:31 +00:00
|
|
|
else if (inst.a.kind == IrOpKind::VmConst)
|
2023-03-24 18:03:04 +00:00
|
|
|
build.vmovsd(inst.regX64, luauConstantValue(vmConstOp(inst.a)));
|
2023-01-27 22:28:31 +00:00
|
|
|
else
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
break;
|
|
|
|
case IrCmd::LOAD_INT:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocReg(SizeX64::dword, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
build.mov(inst.regX64, luauRegValueInt(vmRegOp(inst.a)));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::LOAD_TVALUE:
|
2023-08-18 19:15:41 +01:00
|
|
|
{
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-08-18 19:15:41 +01:00
|
|
|
int addrOffset = inst.b.kind != IrOpKind::None ? intOp(inst.b) : 0;
|
|
|
|
|
2023-01-27 22:28:31 +00:00
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
2023-03-24 18:03:04 +00:00
|
|
|
build.vmovups(inst.regX64, luauReg(vmRegOp(inst.a)));
|
2023-01-27 22:28:31 +00:00
|
|
|
else if (inst.a.kind == IrOpKind::VmConst)
|
2023-03-24 18:03:04 +00:00
|
|
|
build.vmovups(inst.regX64, luauConstant(vmConstOp(inst.a)));
|
2023-01-27 22:28:31 +00:00
|
|
|
else if (inst.a.kind == IrOpKind::Inst)
|
2023-08-18 19:15:41 +01:00
|
|
|
build.vmovups(inst.regX64, xmmword[regOp(inst.a) + addrOffset]);
|
2023-01-27 22:28:31 +00:00
|
|
|
else
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
break;
|
2023-08-18 19:15:41 +01:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::LOAD_ENV:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocReg(SizeX64::qword, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.mov(inst.regX64, sClosure);
|
|
|
|
build.mov(inst.regX64, qword[inst.regX64 + offsetof(Closure, env)]);
|
|
|
|
break;
|
|
|
|
case IrCmd::GET_ARR_ADDR:
|
|
|
|
if (inst.b.kind == IrOpKind::Inst)
|
|
|
|
{
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::qword, index, {inst.b});
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-02-10 19:40:38 +00:00
|
|
|
if (dwordReg(inst.regX64) != regOp(inst.b))
|
|
|
|
build.mov(dwordReg(inst.regX64), regOp(inst.b));
|
|
|
|
|
2023-01-27 22:28:31 +00:00
|
|
|
build.shl(dwordReg(inst.regX64), kTValueSizeLog2);
|
|
|
|
build.add(inst.regX64, qword[regOp(inst.a) + offsetof(Table, array)]);
|
|
|
|
}
|
|
|
|
else if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::qword, index, {inst.a});
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.mov(inst.regX64, qword[regOp(inst.a) + offsetof(Table, array)]);
|
|
|
|
|
2023-02-17 23:41:51 +00:00
|
|
|
if (intOp(inst.b) != 0)
|
|
|
|
build.lea(inst.regX64, addr[inst.regX64 + intOp(inst.b) * sizeof(TValue)]);
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::GET_SLOT_NODE_ADDR:
|
|
|
|
{
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocReg(SizeX64::qword, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::qword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
getTableNodeAtCachedSlot(build, tmp.reg, inst.regX64, regOp(inst.a), uintOp(inst.b));
|
|
|
|
break;
|
|
|
|
}
|
2023-03-17 19:20:37 +00:00
|
|
|
case IrCmd::GET_HASH_NODE_ADDR:
|
|
|
|
{
|
|
|
|
// Custom bit shift value can only be placed in cl
|
2023-04-07 22:01:29 +01:00
|
|
|
ScopedRegX64 shiftTmp{regs, regs.takeReg(rcx, kInvalidInstIdx)};
|
2023-03-17 19:20:37 +00:00
|
|
|
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocReg(SizeX64::qword, index);
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::qword};
|
|
|
|
|
|
|
|
build.mov(inst.regX64, qword[regOp(inst.a) + offsetof(Table, node)]);
|
|
|
|
build.mov(dwordReg(tmp.reg), 1);
|
|
|
|
build.mov(byteReg(shiftTmp.reg), byte[regOp(inst.a) + offsetof(Table, lsizenode)]);
|
|
|
|
build.shl(dwordReg(tmp.reg), byteReg(shiftTmp.reg));
|
|
|
|
build.dec(dwordReg(tmp.reg));
|
|
|
|
build.and_(dwordReg(tmp.reg), uintOp(inst.b));
|
|
|
|
build.shl(tmp.reg, kLuaNodeSizeLog2);
|
|
|
|
build.add(inst.regX64, tmp.reg);
|
|
|
|
break;
|
|
|
|
};
|
2023-07-28 16:13:53 +01:00
|
|
|
case IrCmd::GET_CLOSURE_UPVAL_ADDR:
|
|
|
|
{
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::qword, index, {inst.a});
|
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::Undef)
|
|
|
|
{
|
|
|
|
build.mov(inst.regX64, sClosure);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
RegisterX64 cl = regOp(inst.a);
|
|
|
|
if (inst.regX64 != cl)
|
|
|
|
build.mov(inst.regX64, cl);
|
|
|
|
}
|
|
|
|
|
|
|
|
build.add(inst.regX64, offsetof(Closure, l.uprefs) + sizeof(TValue) * vmUpvalueOp(inst.b));
|
|
|
|
break;
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::STORE_TAG:
|
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
2023-07-28 16:13:53 +01:00
|
|
|
{
|
|
|
|
if (inst.a.kind == IrOpKind::Inst)
|
|
|
|
build.mov(dword[regOp(inst.a) + offsetof(TValue, tt)], tagOp(inst.b));
|
|
|
|
else
|
|
|
|
build.mov(luauRegTag(vmRegOp(inst.a)), tagOp(inst.b));
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
else
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
break;
|
|
|
|
case IrCmd::STORE_POINTER:
|
2023-11-17 18:46:18 +00:00
|
|
|
{
|
|
|
|
OperandX64 valueLhs = inst.a.kind == IrOpKind::Inst ? qword[regOp(inst.a) + offsetof(TValue, value)] : luauRegValue(vmRegOp(inst.a));
|
|
|
|
|
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(intOp(inst.b) == 0);
|
|
|
|
build.mov(valueLhs, 0);
|
|
|
|
}
|
|
|
|
else if (inst.b.kind == IrOpKind::Inst)
|
|
|
|
{
|
|
|
|
build.mov(valueLhs, regOp(inst.b));
|
|
|
|
}
|
2023-07-28 16:13:53 +01:00
|
|
|
else
|
2023-11-17 18:46:18 +00:00
|
|
|
{
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-11-17 18:46:18 +00:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::STORE_DOUBLE:
|
2023-11-10 21:10:07 +00:00
|
|
|
{
|
|
|
|
OperandX64 valueLhs = inst.a.kind == IrOpKind::Inst ? qword[regOp(inst.a) + offsetof(TValue, value)] : luauRegValue(vmRegOp(inst.a));
|
|
|
|
|
2023-01-27 22:28:31 +00:00
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.vmovsd(tmp.reg, build.f64(doubleOp(inst.b)));
|
2023-11-10 21:10:07 +00:00
|
|
|
build.vmovsd(valueLhs, tmp.reg);
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
else if (inst.b.kind == IrOpKind::Inst)
|
|
|
|
{
|
2023-11-10 21:10:07 +00:00
|
|
|
build.vmovsd(valueLhs, regOp(inst.b));
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
}
|
|
|
|
break;
|
2023-11-10 21:10:07 +00:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::STORE_INT:
|
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
2023-03-24 18:03:04 +00:00
|
|
|
build.mov(luauRegValueInt(vmRegOp(inst.a)), intOp(inst.b));
|
2023-01-27 22:28:31 +00:00
|
|
|
else if (inst.b.kind == IrOpKind::Inst)
|
2023-03-24 18:03:04 +00:00
|
|
|
build.mov(luauRegValueInt(vmRegOp(inst.a)), regOp(inst.b));
|
2023-01-27 22:28:31 +00:00
|
|
|
else
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
break;
|
2023-04-07 22:01:29 +01:00
|
|
|
case IrCmd::STORE_VECTOR:
|
|
|
|
storeDoubleAsFloat(luauRegValueVector(vmRegOp(inst.a), 0), inst.b);
|
|
|
|
storeDoubleAsFloat(luauRegValueVector(vmRegOp(inst.a), 1), inst.c);
|
|
|
|
storeDoubleAsFloat(luauRegValueVector(vmRegOp(inst.a), 2), inst.d);
|
|
|
|
break;
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::STORE_TVALUE:
|
2023-08-18 19:15:41 +01:00
|
|
|
{
|
|
|
|
int addrOffset = inst.c.kind != IrOpKind::None ? intOp(inst.c) : 0;
|
|
|
|
|
2023-01-27 22:28:31 +00:00
|
|
|
if (inst.a.kind == IrOpKind::VmReg)
|
2023-03-24 18:03:04 +00:00
|
|
|
build.vmovups(luauReg(vmRegOp(inst.a)), regOp(inst.b));
|
2023-01-27 22:28:31 +00:00
|
|
|
else if (inst.a.kind == IrOpKind::Inst)
|
2023-08-18 19:15:41 +01:00
|
|
|
build.vmovups(xmmword[regOp(inst.a) + addrOffset], regOp(inst.b));
|
2023-01-27 22:28:31 +00:00
|
|
|
else
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
break;
|
2023-08-18 19:15:41 +01:00
|
|
|
}
|
|
|
|
case IrCmd::STORE_SPLIT_TVALUE:
|
|
|
|
{
|
|
|
|
int addrOffset = inst.d.kind != IrOpKind::None ? intOp(inst.d) : 0;
|
|
|
|
|
|
|
|
OperandX64 tagLhs = inst.a.kind == IrOpKind::Inst ? dword[regOp(inst.a) + offsetof(TValue, tt) + addrOffset] : luauRegTag(vmRegOp(inst.a));
|
|
|
|
build.mov(tagLhs, tagOp(inst.b));
|
|
|
|
|
|
|
|
if (tagOp(inst.b) == LUA_TBOOLEAN)
|
|
|
|
{
|
|
|
|
OperandX64 valueLhs =
|
|
|
|
inst.a.kind == IrOpKind::Inst ? dword[regOp(inst.a) + offsetof(TValue, value) + addrOffset] : luauRegValueInt(vmRegOp(inst.a));
|
|
|
|
build.mov(valueLhs, inst.c.kind == IrOpKind::Constant ? OperandX64(intOp(inst.c)) : regOp(inst.c));
|
|
|
|
}
|
|
|
|
else if (tagOp(inst.b) == LUA_TNUMBER)
|
|
|
|
{
|
|
|
|
OperandX64 valueLhs =
|
|
|
|
inst.a.kind == IrOpKind::Inst ? qword[regOp(inst.a) + offsetof(TValue, value) + addrOffset] : luauRegValue(vmRegOp(inst.a));
|
|
|
|
|
|
|
|
if (inst.c.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
|
|
|
|
|
|
|
build.vmovsd(tmp.reg, build.f64(doubleOp(inst.c)));
|
|
|
|
build.vmovsd(valueLhs, tmp.reg);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.vmovsd(valueLhs, regOp(inst.c));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (isGCO(tagOp(inst.b)))
|
|
|
|
{
|
|
|
|
OperandX64 valueLhs =
|
|
|
|
inst.a.kind == IrOpKind::Inst ? qword[regOp(inst.a) + offsetof(TValue, value) + addrOffset] : luauRegValue(vmRegOp(inst.a));
|
|
|
|
build.mov(valueLhs, regOp(inst.c));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-08-18 19:15:41 +01:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::ADD_INT:
|
2023-04-28 20:55:13 +01:00
|
|
|
{
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
build.lea(inst.regX64, addr[regOp(inst.b) + intOp(inst.a)]);
|
|
|
|
}
|
|
|
|
else if (inst.a.kind == IrOpKind::Inst)
|
|
|
|
{
|
|
|
|
if (inst.regX64 == regOp(inst.a))
|
|
|
|
{
|
|
|
|
if (inst.b.kind == IrOpKind::Inst)
|
|
|
|
build.add(inst.regX64, regOp(inst.b));
|
|
|
|
else if (intOp(inst.b) == 1)
|
|
|
|
build.inc(inst.regX64);
|
|
|
|
else
|
|
|
|
build.add(inst.regX64, intOp(inst.b));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (inst.b.kind == IrOpKind::Inst)
|
|
|
|
build.lea(inst.regX64, addr[regOp(inst.a) + regOp(inst.b)]);
|
|
|
|
else
|
|
|
|
build.lea(inst.regX64, addr[regOp(inst.a) + intOp(inst.b)]);
|
|
|
|
}
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
else
|
2023-04-28 20:55:13 +01:00
|
|
|
{
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-04-28 20:55:13 +01:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::SUB_INT:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
if (inst.regX64 == regOp(inst.a) && intOp(inst.b) == 1)
|
|
|
|
build.dec(inst.regX64);
|
|
|
|
else if (inst.regX64 == regOp(inst.a))
|
|
|
|
build.sub(inst.regX64, intOp(inst.b));
|
|
|
|
else
|
|
|
|
build.lea(inst.regX64, addr[regOp(inst.a) - intOp(inst.b)]);
|
|
|
|
break;
|
|
|
|
case IrCmd::ADD_NUM:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
|
2023-02-24 21:49:38 +00:00
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
|
|
|
|
build.vaddsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.vaddsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::SUB_NUM:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
|
2023-02-24 21:49:38 +00:00
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
|
|
|
|
build.vsubsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.vsubsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::MUL_NUM:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
|
|
|
|
|
|
|
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
|
|
|
|
build.vmulsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.vmulsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::DIV_NUM:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
|
2023-02-24 21:49:38 +00:00
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
|
|
|
|
build.vdivsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.vdivsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-09-01 18:58:27 +01:00
|
|
|
case IrCmd::IDIV_NUM:
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
|
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
|
|
|
|
|
|
|
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
|
|
|
|
build.vdivsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.vdivsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
|
|
|
|
}
|
|
|
|
build.vroundsd(inst.regX64, inst.regX64, inst.regX64, RoundingModeX64::RoundToNegativeInfinity);
|
|
|
|
break;
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::MOD_NUM:
|
|
|
|
{
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-03-10 20:21:07 +00:00
|
|
|
ScopedRegX64 optLhsTmp{regs};
|
|
|
|
RegisterX64 lhs;
|
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
optLhsTmp.alloc(SizeX64::xmmword);
|
|
|
|
|
|
|
|
build.vmovsd(optLhsTmp.reg, memRegDoubleOp(inst.a));
|
|
|
|
lhs = optLhsTmp.reg;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
lhs = regOp(inst.a);
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
if (inst.b.kind == IrOpKind::Inst)
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.vdivsd(tmp.reg, lhs, memRegDoubleOp(inst.b));
|
|
|
|
build.vroundsd(tmp.reg, tmp.reg, tmp.reg, RoundingModeX64::RoundToNegativeInfinity);
|
|
|
|
build.vmulsd(tmp.reg, tmp.reg, memRegDoubleOp(inst.b));
|
|
|
|
build.vsubsd(inst.regX64, lhs, tmp.reg);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
|
|
|
|
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.vmovsd(tmp1.reg, memRegDoubleOp(inst.b));
|
|
|
|
build.vdivsd(tmp2.reg, lhs, tmp1.reg);
|
|
|
|
build.vroundsd(tmp2.reg, tmp2.reg, tmp2.reg, RoundingModeX64::RoundToNegativeInfinity);
|
|
|
|
build.vmulsd(tmp1.reg, tmp2.reg, tmp1.reg);
|
|
|
|
build.vsubsd(inst.regX64, lhs, tmp1.reg);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2023-03-03 20:21:14 +00:00
|
|
|
case IrCmd::MIN_NUM:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
|
2023-03-03 20:21:14 +00:00
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
|
|
|
|
|
|
|
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
|
|
|
|
build.vminsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.vminsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::MAX_NUM:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
|
2023-03-03 20:21:14 +00:00
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
|
|
|
|
|
|
|
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
|
|
|
|
build.vmaxsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.vmaxsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
|
|
|
|
}
|
|
|
|
break;
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::UNM_NUM:
|
|
|
|
{
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a});
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
RegisterX64 src = regOp(inst.a);
|
|
|
|
|
|
|
|
if (inst.regX64 == src)
|
|
|
|
{
|
|
|
|
build.vxorpd(inst.regX64, inst.regX64, build.f64(-0.0));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.vmovsd(inst.regX64, src, src);
|
|
|
|
build.vxorpd(inst.regX64, inst.regX64, build.f64(-0.0));
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::FLOOR_NUM:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a});
|
2023-03-31 19:42:49 +01:00
|
|
|
|
|
|
|
build.vroundsd(inst.regX64, inst.regX64, memRegDoubleOp(inst.a), RoundingModeX64::RoundToNegativeInfinity);
|
|
|
|
break;
|
|
|
|
case IrCmd::CEIL_NUM:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a});
|
2023-03-31 19:42:49 +01:00
|
|
|
|
|
|
|
build.vroundsd(inst.regX64, inst.regX64, memRegDoubleOp(inst.a), RoundingModeX64::RoundToPositiveInfinity);
|
|
|
|
break;
|
|
|
|
case IrCmd::ROUND_NUM:
|
|
|
|
{
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a});
|
2023-03-31 19:42:49 +01:00
|
|
|
|
|
|
|
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
|
|
|
|
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
if (inst.a.kind != IrOpKind::Inst)
|
2023-03-31 19:42:49 +01:00
|
|
|
build.vmovsd(inst.regX64, memRegDoubleOp(inst.a));
|
2023-04-07 22:01:29 +01:00
|
|
|
else if (regOp(inst.a) != inst.regX64)
|
|
|
|
build.vmovsd(inst.regX64, inst.regX64, regOp(inst.a));
|
2023-03-31 19:42:49 +01:00
|
|
|
|
|
|
|
build.vandpd(tmp1.reg, inst.regX64, build.f64x2(-0.0, -0.0));
|
|
|
|
build.vmovsd(tmp2.reg, build.i64(0x3fdfffffffffffff)); // 0.49999999999999994
|
|
|
|
build.vorpd(tmp1.reg, tmp1.reg, tmp2.reg);
|
|
|
|
build.vaddsd(inst.regX64, inst.regX64, tmp1.reg);
|
|
|
|
build.vroundsd(inst.regX64, inst.regX64, inst.regX64, RoundingModeX64::RoundToZero);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::SQRT_NUM:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a});
|
2023-03-31 19:42:49 +01:00
|
|
|
|
|
|
|
build.vsqrtsd(inst.regX64, inst.regX64, memRegDoubleOp(inst.a));
|
|
|
|
break;
|
|
|
|
case IrCmd::ABS_NUM:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a});
|
2023-03-31 19:42:49 +01:00
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
if (inst.a.kind != IrOpKind::Inst)
|
2023-03-31 19:42:49 +01:00
|
|
|
build.vmovsd(inst.regX64, memRegDoubleOp(inst.a));
|
2023-04-07 22:01:29 +01:00
|
|
|
else if (regOp(inst.a) != inst.regX64)
|
|
|
|
build.vmovsd(inst.regX64, inst.regX64, regOp(inst.a));
|
2023-03-31 19:42:49 +01:00
|
|
|
|
|
|
|
build.vandpd(inst.regX64, inst.regX64, build.i64(~(1LL << 63)));
|
|
|
|
break;
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::NOT_ANY:
|
|
|
|
{
|
|
|
|
// TODO: if we have a single user which is a STORE_INT, we are missing the opportunity to write directly to target
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a, inst.b});
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
Label saveone, savezero, exit;
|
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
// Other cases should've been constant folded
|
|
|
|
LUAU_ASSERT(tagOp(inst.a) == LUA_TBOOLEAN);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.cmp(regOp(inst.a), LUA_TNIL);
|
|
|
|
build.jcc(ConditionX64::Equal, saveone);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
build.cmp(regOp(inst.a), LUA_TBOOLEAN);
|
|
|
|
build.jcc(ConditionX64::NotEqual, savezero);
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-07-14 19:08:53 +01:00
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
// If value is 1, we fallthrough to storing 0
|
|
|
|
if (intOp(inst.b) == 0)
|
|
|
|
build.jmp(saveone);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.cmp(regOp(inst.b), 0);
|
|
|
|
build.jcc(ConditionX64::Equal, saveone);
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.setLabel(savezero);
|
|
|
|
build.mov(inst.regX64, 0);
|
|
|
|
build.jmp(exit);
|
|
|
|
|
|
|
|
build.setLabel(saveone);
|
|
|
|
build.mov(inst.regX64, 1);
|
|
|
|
|
|
|
|
build.setLabel(exit);
|
|
|
|
break;
|
|
|
|
}
|
2023-08-04 20:18:54 +01:00
|
|
|
case IrCmd::CMP_ANY:
|
|
|
|
{
|
|
|
|
IrCondition cond = conditionOp(inst.c);
|
|
|
|
|
|
|
|
IrCallWrapperX64 callWrap(regs, build);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::qword, luauRegAddress(vmRegOp(inst.a)));
|
|
|
|
callWrap.addArgument(SizeX64::qword, luauRegAddress(vmRegOp(inst.b)));
|
|
|
|
|
|
|
|
if (cond == IrCondition::LessEqual)
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_lessequal)]);
|
|
|
|
else if (cond == IrCondition::Less)
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_lessthan)]);
|
|
|
|
else if (cond == IrCondition::Equal)
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_equalval)]);
|
|
|
|
else
|
|
|
|
LUAU_ASSERT(!"Unsupported condition");
|
|
|
|
|
|
|
|
emitUpdateBase(build);
|
|
|
|
|
|
|
|
inst.regX64 = regs.takeReg(eax, index);
|
|
|
|
break;
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::JUMP:
|
2023-08-11 15:42:37 +01:00
|
|
|
jumpOrAbortOnUndef(inst.a, next);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::JUMP_IF_TRUTHY:
|
2023-03-24 18:03:04 +00:00
|
|
|
jumpIfTruthy(build, vmRegOp(inst.a), labelOp(inst.b), labelOp(inst.c));
|
2023-01-27 22:28:31 +00:00
|
|
|
jumpOrFallthrough(blockOp(inst.c), next);
|
|
|
|
break;
|
|
|
|
case IrCmd::JUMP_IF_FALSY:
|
2023-03-24 18:03:04 +00:00
|
|
|
jumpIfFalsy(build, vmRegOp(inst.a), labelOp(inst.b), labelOp(inst.c));
|
2023-01-27 22:28:31 +00:00
|
|
|
jumpOrFallthrough(blockOp(inst.c), next);
|
|
|
|
break;
|
|
|
|
case IrCmd::JUMP_EQ_TAG:
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(inst.b.kind == IrOpKind::Inst || inst.b.kind == IrOpKind::Constant);
|
|
|
|
OperandX64 opb = inst.b.kind == IrOpKind::Inst ? regOp(inst.b) : OperandX64(tagOp(inst.b));
|
|
|
|
|
2023-08-25 18:23:55 +01:00
|
|
|
if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
build.cmp(opb, tagOp(inst.a));
|
|
|
|
else
|
|
|
|
build.cmp(memRegTagOp(inst.a), opb);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
if (isFallthroughBlock(blockOp(inst.d), next))
|
|
|
|
{
|
|
|
|
build.jcc(ConditionX64::Equal, labelOp(inst.c));
|
|
|
|
jumpOrFallthrough(blockOp(inst.d), next);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.jcc(ConditionX64::NotEqual, labelOp(inst.d));
|
|
|
|
jumpOrFallthrough(blockOp(inst.c), next);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2023-09-08 01:13:49 +01:00
|
|
|
case IrCmd::JUMP_CMP_INT:
|
|
|
|
{
|
|
|
|
IrCondition cond = conditionOp(inst.c);
|
|
|
|
|
|
|
|
if ((cond == IrCondition::Equal || cond == IrCondition::NotEqual) && intOp(inst.b) == 0)
|
2023-08-04 20:18:54 +01:00
|
|
|
{
|
2023-09-08 01:13:49 +01:00
|
|
|
bool invert = cond == IrCondition::NotEqual;
|
|
|
|
|
2023-08-04 20:18:54 +01:00
|
|
|
build.test(regOp(inst.a), regOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-09-08 01:13:49 +01:00
|
|
|
if (isFallthroughBlock(blockOp(inst.d), next))
|
2023-08-04 20:18:54 +01:00
|
|
|
{
|
2023-09-08 01:13:49 +01:00
|
|
|
build.jcc(invert ? ConditionX64::Zero : ConditionX64::NotZero, labelOp(inst.e));
|
|
|
|
jumpOrFallthrough(blockOp(inst.d), next);
|
2023-08-04 20:18:54 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-09-08 01:13:49 +01:00
|
|
|
build.jcc(invert ? ConditionX64::NotZero : ConditionX64::Zero, labelOp(inst.d));
|
|
|
|
jumpOrFallthrough(blockOp(inst.e), next);
|
2023-08-04 20:18:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.cmp(regOp(inst.a), intOp(inst.b));
|
|
|
|
|
2023-09-08 01:13:49 +01:00
|
|
|
build.jcc(getConditionInt(cond), labelOp(inst.d));
|
|
|
|
jumpOrFallthrough(blockOp(inst.e), next);
|
2023-08-04 20:18:54 +01:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-09-08 01:13:49 +01:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::JUMP_EQ_POINTER:
|
|
|
|
build.cmp(regOp(inst.a), regOp(inst.b));
|
|
|
|
|
|
|
|
build.jcc(ConditionX64::Equal, labelOp(inst.c));
|
|
|
|
jumpOrFallthrough(blockOp(inst.d), next);
|
|
|
|
break;
|
|
|
|
case IrCmd::JUMP_CMP_NUM:
|
|
|
|
{
|
2023-03-24 18:03:04 +00:00
|
|
|
IrCondition cond = conditionOp(inst.c);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
jumpOnNumberCmp(build, tmp.reg, memRegDoubleOp(inst.a), memRegDoubleOp(inst.b), cond, labelOp(inst.d));
|
2023-01-27 22:28:31 +00:00
|
|
|
jumpOrFallthrough(blockOp(inst.e), next);
|
|
|
|
break;
|
|
|
|
}
|
2023-10-21 02:10:30 +01:00
|
|
|
case IrCmd::JUMP_FORN_LOOP_COND:
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
|
|
|
|
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
|
|
|
|
ScopedRegX64 tmp3{regs, SizeX64::xmmword};
|
|
|
|
|
|
|
|
RegisterX64 index = inst.a.kind == IrOpKind::Inst ? regOp(inst.a) : tmp1.reg;
|
|
|
|
RegisterX64 limit = inst.b.kind == IrOpKind::Inst ? regOp(inst.b) : tmp2.reg;
|
|
|
|
|
|
|
|
if (inst.a.kind != IrOpKind::Inst)
|
|
|
|
build.vmovsd(tmp1.reg, memRegDoubleOp(inst.a));
|
|
|
|
|
|
|
|
if (inst.b.kind != IrOpKind::Inst)
|
|
|
|
build.vmovsd(tmp2.reg, memRegDoubleOp(inst.b));
|
|
|
|
|
|
|
|
Label direct;
|
|
|
|
|
|
|
|
// step > 0
|
|
|
|
jumpOnNumberCmp(build, tmp3.reg, memRegDoubleOp(inst.c), build.f64(0.0), IrCondition::Greater, direct);
|
|
|
|
|
|
|
|
// !(limit <= index)
|
|
|
|
jumpOnNumberCmp(build, noreg, limit, index, IrCondition::NotLessEqual, labelOp(inst.e));
|
|
|
|
build.jmp(labelOp(inst.d));
|
|
|
|
|
|
|
|
// !(index <= limit)
|
|
|
|
build.setLabel(direct);
|
|
|
|
jumpOnNumberCmp(build, noreg, index, limit, IrCondition::NotLessEqual, labelOp(inst.e));
|
|
|
|
jumpOrFallthrough(blockOp(inst.d), next);
|
|
|
|
break;
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::TABLE_LEN:
|
2023-03-31 19:42:49 +01:00
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, regOp(inst.a), inst.a);
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaH_getn)]);
|
2023-09-01 18:58:27 +01:00
|
|
|
inst.regX64 = regs.takeReg(eax, index);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::TABLE_SETNUM:
|
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::qword, regOp(inst.a), inst.a);
|
|
|
|
callWrap.addArgument(SizeX64::dword, regOp(inst.b), inst.b);
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaH_setnum)]);
|
|
|
|
inst.regX64 = regs.takeReg(rax, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
}
|
2023-07-07 21:10:48 +01:00
|
|
|
case IrCmd::STRING_LEN:
|
|
|
|
{
|
|
|
|
RegisterX64 ptr = regOp(inst.a);
|
|
|
|
inst.regX64 = regs.allocReg(SizeX64::dword, index);
|
|
|
|
build.mov(inst.regX64, dword[ptr + offsetof(TString, len)]);
|
|
|
|
break;
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::NEW_TABLE:
|
2023-03-31 19:42:49 +01:00
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
2023-04-14 19:06:22 +01:00
|
|
|
callWrap.addArgument(SizeX64::dword, int32_t(uintOp(inst.a)));
|
|
|
|
callWrap.addArgument(SizeX64::dword, int32_t(uintOp(inst.b)));
|
2023-03-31 19:42:49 +01:00
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaH_new)]);
|
2023-04-07 22:01:29 +01:00
|
|
|
inst.regX64 = regs.takeReg(rax, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::DUP_TABLE:
|
2023-03-31 19:42:49 +01:00
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::qword, regOp(inst.a), inst.a);
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaH_clone)]);
|
2023-04-07 22:01:29 +01:00
|
|
|
inst.regX64 = regs.takeReg(rax, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
}
|
2023-03-17 19:20:37 +00:00
|
|
|
case IrCmd::TRY_NUM_TO_INDEX:
|
2023-01-27 22:28:31 +00:00
|
|
|
{
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocReg(SizeX64::dword, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
convertNumberToIndexOrJump(build, tmp.reg, regOp(inst.a), inst.regX64, labelOp(inst.b));
|
|
|
|
break;
|
|
|
|
}
|
2023-03-17 19:20:37 +00:00
|
|
|
case IrCmd::TRY_CALL_FASTGETTM:
|
|
|
|
{
|
2023-03-31 19:42:49 +01:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::qword};
|
|
|
|
|
|
|
|
build.mov(tmp.reg, qword[regOp(inst.a) + offsetof(Table, metatable)]);
|
|
|
|
regs.freeLastUseReg(function.instOp(inst.a), index); // Release before the call if it's the last use
|
2023-03-17 19:20:37 +00:00
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
build.test(tmp.reg, tmp.reg);
|
|
|
|
build.jcc(ConditionX64::Zero, labelOp(inst.c)); // No metatable
|
2023-03-17 19:20:37 +00:00
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
build.test(byte[tmp.reg + offsetof(Table, tmcache)], 1 << intOp(inst.b));
|
|
|
|
build.jcc(ConditionX64::NotZero, labelOp(inst.c)); // No tag method
|
|
|
|
|
|
|
|
ScopedRegX64 tmp2{regs, SizeX64::qword};
|
|
|
|
build.mov(tmp2.reg, qword[rState + offsetof(lua_State, global)]);
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
{
|
|
|
|
ScopedSpills spillGuard(regs);
|
|
|
|
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, tmp);
|
|
|
|
callWrap.addArgument(SizeX64::qword, intOp(inst.b));
|
|
|
|
callWrap.addArgument(SizeX64::qword, qword[tmp2.release() + offsetof(global_State, tmname) + intOp(inst.b) * sizeof(TString*)]);
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaT_gettm)]);
|
|
|
|
}
|
|
|
|
|
2023-06-02 20:52:15 +01:00
|
|
|
build.test(rax, rax);
|
|
|
|
build.jcc(ConditionX64::Zero, labelOp(inst.c)); // No tag method
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
inst.regX64 = regs.takeReg(rax, index);
|
2023-03-17 19:20:37 +00:00
|
|
|
break;
|
|
|
|
}
|
2023-02-10 19:40:38 +00:00
|
|
|
case IrCmd::INT_TO_NUM:
|
2023-04-14 19:06:22 +01:00
|
|
|
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
|
2023-02-10 19:40:38 +00:00
|
|
|
|
|
|
|
build.vcvtsi2sd(inst.regX64, inst.regX64, regOp(inst.a));
|
|
|
|
break;
|
2023-04-21 23:14:26 +01:00
|
|
|
case IrCmd::UINT_TO_NUM:
|
|
|
|
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
|
|
|
|
|
2023-11-03 23:45:04 +00:00
|
|
|
// AVX has no uint->double conversion; the source must come from UINT op and they all should clear top 32 bits so we can usually
|
|
|
|
// use 64-bit reg; the one exception is NUM_TO_UINT which doesn't clear top bits
|
|
|
|
if (IrCmd source = function.instOp(inst.a).cmd; source == IrCmd::NUM_TO_UINT)
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::dword};
|
|
|
|
build.mov(tmp.reg, regOp(inst.a));
|
|
|
|
build.vcvtsi2sd(inst.regX64, inst.regX64, qwordReg(tmp.reg));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(source != IrCmd::SUBSTITUTE); // we don't process substitutions
|
|
|
|
build.vcvtsi2sd(inst.regX64, inst.regX64, qwordReg(regOp(inst.a)));
|
|
|
|
}
|
2023-04-21 23:14:26 +01:00
|
|
|
break;
|
|
|
|
case IrCmd::NUM_TO_INT:
|
|
|
|
inst.regX64 = regs.allocReg(SizeX64::dword, index);
|
|
|
|
|
|
|
|
build.vcvttsd2si(inst.regX64, memRegDoubleOp(inst.a));
|
|
|
|
break;
|
|
|
|
case IrCmd::NUM_TO_UINT:
|
|
|
|
inst.regX64 = regs.allocReg(SizeX64::dword, index);
|
|
|
|
|
|
|
|
build.vcvttsd2si(qwordReg(inst.regX64), memRegDoubleOp(inst.a));
|
|
|
|
break;
|
2023-02-24 21:49:38 +00:00
|
|
|
case IrCmd::ADJUST_STACK_TO_REG:
|
|
|
|
{
|
2023-04-07 22:01:29 +01:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::qword};
|
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
2023-03-24 18:03:04 +00:00
|
|
|
build.lea(tmp.reg, addr[rBase + (vmRegOp(inst.a) + intOp(inst.b)) * sizeof(TValue)]);
|
2023-02-24 21:49:38 +00:00
|
|
|
build.mov(qword[rState + offsetof(lua_State, top)], tmp.reg);
|
|
|
|
}
|
|
|
|
else if (inst.b.kind == IrOpKind::Inst)
|
|
|
|
{
|
2023-04-07 22:01:29 +01:00
|
|
|
build.mov(dwordReg(tmp.reg), regOp(inst.b));
|
|
|
|
build.shl(tmp.reg, kTValueSizeLog2);
|
|
|
|
build.lea(tmp.reg, addr[rBase + tmp.reg + vmRegOp(inst.a) * sizeof(TValue)]);
|
|
|
|
build.mov(qword[rState + offsetof(lua_State, top)], tmp.reg);
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::ADJUST_STACK_TO_TOP:
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::qword};
|
|
|
|
build.mov(tmp.reg, qword[rState + offsetof(lua_State, ci)]);
|
|
|
|
build.mov(tmp.reg, qword[tmp.reg + offsetof(CallInfo, top)]);
|
|
|
|
build.mov(qword[rState + offsetof(lua_State, top)], tmp.reg);
|
|
|
|
break;
|
|
|
|
}
|
2023-03-03 20:21:14 +00:00
|
|
|
|
|
|
|
case IrCmd::FASTCALL:
|
2023-04-28 20:55:13 +01:00
|
|
|
{
|
|
|
|
OperandX64 arg2 = inst.d.kind != IrOpKind::Undef ? memRegDoubleOp(inst.d) : OperandX64{0};
|
|
|
|
|
|
|
|
emitBuiltin(regs, build, uintOp(inst.a), vmRegOp(inst.b), vmRegOp(inst.c), arg2, intOp(inst.e), intOp(inst.f));
|
2023-03-03 20:21:14 +00:00
|
|
|
break;
|
2023-04-28 20:55:13 +01:00
|
|
|
}
|
2023-03-03 20:21:14 +00:00
|
|
|
case IrCmd::INVOKE_FASTCALL:
|
|
|
|
{
|
|
|
|
unsigned bfid = uintOp(inst.a);
|
|
|
|
|
|
|
|
OperandX64 args = 0;
|
|
|
|
|
|
|
|
if (inst.d.kind == IrOpKind::VmReg)
|
2023-03-24 18:03:04 +00:00
|
|
|
args = luauRegAddress(vmRegOp(inst.d));
|
2023-03-03 20:21:14 +00:00
|
|
|
else if (inst.d.kind == IrOpKind::VmConst)
|
2023-03-24 18:03:04 +00:00
|
|
|
args = luauConstantAddress(vmConstOp(inst.d));
|
2023-03-03 20:21:14 +00:00
|
|
|
else
|
2023-04-28 20:55:13 +01:00
|
|
|
LUAU_ASSERT(inst.d.kind == IrOpKind::Undef);
|
2023-03-03 20:21:14 +00:00
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
int ra = vmRegOp(inst.b);
|
|
|
|
int arg = vmRegOp(inst.c);
|
2023-03-03 20:21:14 +00:00
|
|
|
int nparams = intOp(inst.e);
|
|
|
|
int nresults = intOp(inst.f);
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::qword, luauRegAddress(ra));
|
|
|
|
callWrap.addArgument(SizeX64::qword, luauRegAddress(arg));
|
|
|
|
callWrap.addArgument(SizeX64::dword, nresults);
|
|
|
|
callWrap.addArgument(SizeX64::qword, args);
|
2023-03-03 20:21:14 +00:00
|
|
|
|
|
|
|
if (nparams == LUA_MULTRET)
|
|
|
|
{
|
2023-04-14 19:06:22 +01:00
|
|
|
RegisterX64 reg = callWrap.suggestNextArgumentRegister(SizeX64::qword);
|
2023-04-07 22:01:29 +01:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::qword};
|
|
|
|
|
2023-04-14 19:06:22 +01:00
|
|
|
// L->top - (ra + 1)
|
2023-03-03 20:21:14 +00:00
|
|
|
build.mov(reg, qword[rState + offsetof(lua_State, top)]);
|
2023-04-07 22:01:29 +01:00
|
|
|
build.lea(tmp.reg, addr[rBase + (ra + 1) * sizeof(TValue)]);
|
|
|
|
build.sub(reg, tmp.reg);
|
2023-03-03 20:21:14 +00:00
|
|
|
build.shr(reg, kTValueSizeLog2);
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
callWrap.addArgument(SizeX64::dword, dwordReg(reg));
|
2023-03-03 20:21:14 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-04-07 22:01:29 +01:00
|
|
|
callWrap.addArgument(SizeX64::dword, nparams);
|
2023-03-03 20:21:14 +00:00
|
|
|
}
|
|
|
|
|
2023-07-07 21:10:48 +01:00
|
|
|
ScopedRegX64 func{regs, SizeX64::qword};
|
|
|
|
build.mov(func.reg, qword[rNativeContext + offsetof(NativeContext, luauF_table) + bfid * sizeof(luau_FastFunction)]);
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
callWrap.call(func.release());
|
|
|
|
inst.regX64 = regs.takeReg(eax, index); // Result of a builtin call is returned in eax
|
2023-03-03 20:21:14 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::CHECK_FASTCALL_RES:
|
|
|
|
{
|
|
|
|
RegisterX64 res = regOp(inst.a);
|
|
|
|
|
|
|
|
build.test(res, res); // test here will set SF=1 for a negative number and it always sets OF to 0
|
|
|
|
build.jcc(ConditionX64::Less, labelOp(inst.b)); // jl jumps if SF != OF
|
|
|
|
break;
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::DO_ARITH:
|
Add SUBRK and DIVRK bytecode instructions to bytecode v5 (#1115)
Right now, we can compile R\*K for all arithmetic instructions, but K\*R
gets compiled into two instructions (LOADN/LOADK + arithmetic opcode).
This is problematic since it leads to reduced performance for some code.
However, we'd like to avoid adding reverse variants of ADDK et al for
all opcodes to avoid the increase in I$ footprint for interpreter.
Looking at the arithmetic instructions, % and // don't have interesting
use cases for K\*V; ^ is sometimes used with constant on the left hand
side but this would need to call pow() by necessity in all cases so it
would be slow regardless of the dispatch overhead. This leaves the four
basic arithmetic operations.
For + and \*, we can implement a compiler-side optimization in the
future that transforms K\*R to R\*K automatically. This could either be
done unconditionally at -O2, or conditionally based on the type of the
value (driven by type annotations / inference) -- this technically
changes behavior in presence of metamethods, although it might be
sensible to just always do this because non-commutative +/* are evil.
However, for - and / it is impossible for the compiler to optimize this
in the future, so we need dedicated opcodes. This only increases the
interpreter size by ~300 bytes (~1.5%) on X64.
This makes spectral-norm and math-partial-sums 6% faster; maybe more
importantly, voxelgen gets 1.5% faster (so this change does have
real-world impact).
To avoid the proliferation of bytecode versions this change piggybacks
on the bytecode version bump that was just made in 604 for vector
constants; we would still be able to enable these independently but
we'll consider v5 complete when both are enabled.
Related: #626
---------
Co-authored-by: vegorov-rbx <75688451+vegorov-rbx@users.noreply.github.com>
2023-11-28 15:35:01 +00:00
|
|
|
{
|
|
|
|
OperandX64 opb = inst.b.kind == IrOpKind::VmReg ? luauRegAddress(vmRegOp(inst.b)) : luauConstantAddress(vmConstOp(inst.b));
|
|
|
|
OperandX64 opc = inst.c.kind == IrOpKind::VmReg ? luauRegAddress(vmRegOp(inst.c)) : luauConstantAddress(vmConstOp(inst.c));
|
|
|
|
callArithHelper(regs, build, vmRegOp(inst.a), opb, opc, TMS(intOp(inst.d)));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
Add SUBRK and DIVRK bytecode instructions to bytecode v5 (#1115)
Right now, we can compile R\*K for all arithmetic instructions, but K\*R
gets compiled into two instructions (LOADN/LOADK + arithmetic opcode).
This is problematic since it leads to reduced performance for some code.
However, we'd like to avoid adding reverse variants of ADDK et al for
all opcodes to avoid the increase in I$ footprint for interpreter.
Looking at the arithmetic instructions, % and // don't have interesting
use cases for K\*V; ^ is sometimes used with constant on the left hand
side but this would need to call pow() by necessity in all cases so it
would be slow regardless of the dispatch overhead. This leaves the four
basic arithmetic operations.
For + and \*, we can implement a compiler-side optimization in the
future that transforms K\*R to R\*K automatically. This could either be
done unconditionally at -O2, or conditionally based on the type of the
value (driven by type annotations / inference) -- this technically
changes behavior in presence of metamethods, although it might be
sensible to just always do this because non-commutative +/* are evil.
However, for - and / it is impossible for the compiler to optimize this
in the future, so we need dedicated opcodes. This only increases the
interpreter size by ~300 bytes (~1.5%) on X64.
This makes spectral-norm and math-partial-sums 6% faster; maybe more
importantly, voxelgen gets 1.5% faster (so this change does have
real-world impact).
To avoid the proliferation of bytecode versions this change piggybacks
on the bytecode version bump that was just made in 604 for vector
constants; we would still be able to enable these independently but
we'll consider v5 complete when both are enabled.
Related: #626
---------
Co-authored-by: vegorov-rbx <75688451+vegorov-rbx@users.noreply.github.com>
2023-11-28 15:35:01 +00:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::DO_LEN:
|
2023-03-31 19:42:49 +01:00
|
|
|
callLengthHelper(regs, build, vmRegOp(inst.a), vmRegOp(inst.b));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::GET_TABLE:
|
|
|
|
if (inst.c.kind == IrOpKind::VmReg)
|
|
|
|
{
|
2023-03-31 19:42:49 +01:00
|
|
|
callGetTable(regs, build, vmRegOp(inst.b), luauRegAddress(vmRegOp(inst.c)), vmRegOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
else if (inst.c.kind == IrOpKind::Constant)
|
|
|
|
{
|
2023-05-25 22:36:34 +01:00
|
|
|
TValue n = {};
|
2023-01-27 22:28:31 +00:00
|
|
|
setnvalue(&n, uintOp(inst.c));
|
2023-03-31 19:42:49 +01:00
|
|
|
callGetTable(regs, build, vmRegOp(inst.b), build.bytes(&n, sizeof(n)), vmRegOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::SET_TABLE:
|
|
|
|
if (inst.c.kind == IrOpKind::VmReg)
|
|
|
|
{
|
2023-03-31 19:42:49 +01:00
|
|
|
callSetTable(regs, build, vmRegOp(inst.b), luauRegAddress(vmRegOp(inst.c)), vmRegOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
else if (inst.c.kind == IrOpKind::Constant)
|
|
|
|
{
|
2023-05-25 22:36:34 +01:00
|
|
|
TValue n = {};
|
2023-01-27 22:28:31 +00:00
|
|
|
setnvalue(&n, uintOp(inst.c));
|
2023-03-31 19:42:49 +01:00
|
|
|
callSetTable(regs, build, vmRegOp(inst.b), build.bytes(&n, sizeof(n)), vmRegOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IrCmd::GET_IMPORT:
|
2023-04-14 19:06:22 +01:00
|
|
|
{
|
|
|
|
ScopedRegX64 tmp1{regs, SizeX64::qword};
|
|
|
|
|
|
|
|
build.mov(tmp1.reg, sClosure);
|
|
|
|
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::qword, qword[tmp1.release() + offsetof(Closure, env)]);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rConstants);
|
2023-05-25 22:36:34 +01:00
|
|
|
callWrap.addArgument(SizeX64::qword, luauRegAddress(vmRegOp(inst.a)));
|
2023-04-14 19:06:22 +01:00
|
|
|
callWrap.addArgument(SizeX64::dword, uintOp(inst.b));
|
|
|
|
callWrap.addArgument(SizeX64::dword, 0);
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_getimport)]);
|
|
|
|
|
|
|
|
emitUpdateBase(build);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-04-14 19:06:22 +01:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::CONCAT:
|
2023-03-31 19:42:49 +01:00
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::dword, int32_t(uintOp(inst.b)));
|
|
|
|
callWrap.addArgument(SizeX64::dword, int32_t(vmRegOp(inst.a) + uintOp(inst.b) - 1));
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_concat)]);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
emitUpdateBase(build);
|
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::GET_UPVALUE:
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp1{regs, SizeX64::qword};
|
|
|
|
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.mov(tmp1.reg, sClosure);
|
2023-03-24 18:03:04 +00:00
|
|
|
build.add(tmp1.reg, offsetof(Closure, l.uprefs) + sizeof(TValue) * vmUpvalueOp(inst.b));
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
// uprefs[] is either an actual value, or it points to UpVal object which has a pointer to value
|
|
|
|
Label skip;
|
|
|
|
build.cmp(dword[tmp1.reg + offsetof(TValue, tt)], LUA_TUPVAL);
|
|
|
|
build.jcc(ConditionX64::NotEqual, skip);
|
|
|
|
|
|
|
|
// UpVal.v points to the value (either on stack, or on heap inside each UpVal, but we can deref it unconditionally)
|
|
|
|
build.mov(tmp1.reg, qword[tmp1.reg + offsetof(TValue, value.gc)]);
|
|
|
|
build.mov(tmp1.reg, qword[tmp1.reg + offsetof(UpVal, v)]);
|
|
|
|
|
|
|
|
build.setLabel(skip);
|
|
|
|
|
|
|
|
build.vmovups(tmp2.reg, xmmword[tmp1.reg]);
|
2023-03-24 18:03:04 +00:00
|
|
|
build.vmovups(luauReg(vmRegOp(inst.a)), tmp2.reg);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::SET_UPVALUE:
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp1{regs, SizeX64::qword};
|
|
|
|
ScopedRegX64 tmp2{regs, SizeX64::qword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.mov(tmp1.reg, sClosure);
|
2023-03-24 18:03:04 +00:00
|
|
|
build.mov(tmp2.reg, qword[tmp1.reg + offsetof(Closure, l.uprefs) + sizeof(TValue) * vmUpvalueOp(inst.a) + offsetof(TValue, value.gc)]);
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.mov(tmp1.reg, qword[tmp2.reg + offsetof(UpVal, v)]);
|
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
{
|
|
|
|
ScopedRegX64 tmp3{regs, SizeX64::xmmword};
|
|
|
|
build.vmovups(tmp3.reg, luauReg(vmRegOp(inst.b)));
|
|
|
|
build.vmovups(xmmword[tmp1.reg], tmp3.reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp1.free();
|
|
|
|
|
2023-08-18 19:15:41 +01:00
|
|
|
if (inst.c.kind == IrOpKind::Undef || isGCO(tagOp(inst.c)))
|
2023-09-01 18:58:27 +01:00
|
|
|
callBarrierObject(regs, build, tmp2.release(), {}, inst.b, inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::CHECK_TAG:
|
2023-04-14 19:06:22 +01:00
|
|
|
build.cmp(memRegTagOp(inst.a), tagOp(inst.b));
|
2023-08-11 15:42:37 +01:00
|
|
|
jumpOrAbortOnUndef(ConditionX64::NotEqual, inst.c, next);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-08-04 20:18:54 +01:00
|
|
|
case IrCmd::CHECK_TRUTHY:
|
|
|
|
{
|
|
|
|
// Constant tags which don't require boolean value check should've been removed in constant folding
|
|
|
|
LUAU_ASSERT(inst.a.kind != IrOpKind::Constant || tagOp(inst.a) == LUA_TBOOLEAN);
|
|
|
|
|
|
|
|
Label skip;
|
|
|
|
|
|
|
|
if (inst.a.kind != IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
// Fail to fallback on 'nil' (falsy)
|
|
|
|
build.cmp(memRegTagOp(inst.a), LUA_TNIL);
|
2023-08-11 15:42:37 +01:00
|
|
|
jumpOrAbortOnUndef(ConditionX64::Equal, inst.c, next);
|
2023-08-04 20:18:54 +01:00
|
|
|
|
|
|
|
// Skip value test if it's not a boolean (truthy)
|
|
|
|
build.cmp(memRegTagOp(inst.a), LUA_TBOOLEAN);
|
|
|
|
build.jcc(ConditionX64::NotEqual, skip);
|
|
|
|
}
|
|
|
|
|
|
|
|
// fail to fallback on 'false' boolean value (falsy)
|
|
|
|
build.cmp(memRegUintOp(inst.b), 0);
|
2023-08-11 15:42:37 +01:00
|
|
|
jumpOrAbortOnUndef(ConditionX64::Equal, inst.c, next);
|
2023-08-04 20:18:54 +01:00
|
|
|
|
|
|
|
if (inst.a.kind != IrOpKind::Constant)
|
|
|
|
build.setLabel(skip);
|
|
|
|
break;
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::CHECK_READONLY:
|
2023-04-14 19:06:22 +01:00
|
|
|
build.cmp(byte[regOp(inst.a) + offsetof(Table, readonly)], 0);
|
2023-08-11 15:42:37 +01:00
|
|
|
jumpOrAbortOnUndef(ConditionX64::NotEqual, inst.b, next);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::CHECK_NO_METATABLE:
|
2023-04-14 19:06:22 +01:00
|
|
|
build.cmp(qword[regOp(inst.a) + offsetof(Table, metatable)], 0);
|
2023-08-11 15:42:37 +01:00
|
|
|
jumpOrAbortOnUndef(ConditionX64::NotEqual, inst.b, next);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::CHECK_SAFE_ENV:
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::qword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-04-14 19:06:22 +01:00
|
|
|
build.mov(tmp.reg, sClosure);
|
|
|
|
build.mov(tmp.reg, qword[tmp.reg + offsetof(Closure, env)]);
|
|
|
|
build.cmp(byte[tmp.reg + offsetof(Table, safeenv)], 0);
|
2023-07-07 21:10:48 +01:00
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
jumpOrAbortOnUndef(ConditionX64::Equal, inst.a, next);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::CHECK_ARRAY_SIZE:
|
|
|
|
if (inst.b.kind == IrOpKind::Inst)
|
|
|
|
build.cmp(dword[regOp(inst.a) + offsetof(Table, sizearray)], regOp(inst.b));
|
|
|
|
else if (inst.b.kind == IrOpKind::Constant)
|
2023-02-17 23:41:51 +00:00
|
|
|
build.cmp(dword[regOp(inst.a) + offsetof(Table, sizearray)], intOp(inst.b));
|
2023-01-27 22:28:31 +00:00
|
|
|
else
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
jumpOrAbortOnUndef(ConditionX64::BelowEqual, inst.c, next);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-05-25 22:36:34 +01:00
|
|
|
case IrCmd::JUMP_SLOT_MATCH:
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::CHECK_SLOT_MATCH:
|
|
|
|
{
|
2023-05-25 22:36:34 +01:00
|
|
|
Label abort; // Used when guard aborts execution
|
|
|
|
const IrOp& mismatchOp = inst.cmd == IrCmd::JUMP_SLOT_MATCH ? inst.d : inst.c;
|
|
|
|
Label& mismatch = mismatchOp.kind == IrOpKind::Undef ? abort : labelOp(mismatchOp);
|
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::qword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2023-05-25 22:36:34 +01:00
|
|
|
// Check if node key tag is a string
|
|
|
|
build.mov(dwordReg(tmp.reg), luauNodeKeyTag(regOp(inst.a)));
|
|
|
|
build.and_(dwordReg(tmp.reg), kTKeyTagMask);
|
|
|
|
build.cmp(dwordReg(tmp.reg), LUA_TSTRING);
|
|
|
|
build.jcc(ConditionX64::NotEqual, mismatch);
|
|
|
|
|
|
|
|
// Check that node key value matches the expected one
|
|
|
|
build.mov(tmp.reg, luauConstantValue(vmConstOp(inst.b)));
|
|
|
|
build.cmp(tmp.reg, luauNodeKeyValue(regOp(inst.a)));
|
|
|
|
build.jcc(ConditionX64::NotEqual, mismatch);
|
|
|
|
|
|
|
|
// Check that node value is not nil
|
|
|
|
build.cmp(dword[regOp(inst.a) + offsetof(LuaNode, val) + offsetof(TValue, tt)], LUA_TNIL);
|
|
|
|
build.jcc(ConditionX64::Equal, mismatch);
|
|
|
|
|
|
|
|
if (inst.cmd == IrCmd::JUMP_SLOT_MATCH)
|
|
|
|
{
|
|
|
|
jumpOrFallthrough(blockOp(inst.c), next);
|
|
|
|
}
|
|
|
|
else if (mismatchOp.kind == IrOpKind::Undef)
|
|
|
|
{
|
|
|
|
Label skip;
|
|
|
|
build.jmp(skip);
|
|
|
|
build.setLabel(abort);
|
|
|
|
build.ud2();
|
|
|
|
build.setLabel(skip);
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
}
|
2023-03-17 19:20:37 +00:00
|
|
|
case IrCmd::CHECK_NODE_NO_NEXT:
|
2023-04-14 19:06:22 +01:00
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::dword};
|
|
|
|
|
2023-05-19 20:37:30 +01:00
|
|
|
build.mov(tmp.reg, dword[regOp(inst.a) + offsetof(LuaNode, key) + kOffsetOfTKeyTagNext]);
|
|
|
|
build.shr(tmp.reg, kTKeyTagBits);
|
2023-08-11 15:42:37 +01:00
|
|
|
jumpOrAbortOnUndef(ConditionX64::NotZero, inst.b, next);
|
2023-03-17 19:20:37 +00:00
|
|
|
break;
|
2023-04-14 19:06:22 +01:00
|
|
|
}
|
2023-09-01 18:58:27 +01:00
|
|
|
case IrCmd::CHECK_NODE_VALUE:
|
|
|
|
{
|
|
|
|
build.cmp(dword[regOp(inst.a) + offsetof(LuaNode, val) + offsetof(TValue, tt)], LUA_TNIL);
|
|
|
|
jumpOrAbortOnUndef(ConditionX64::Equal, inst.b, next);
|
|
|
|
break;
|
|
|
|
}
|
2023-11-10 21:10:07 +00:00
|
|
|
case IrCmd::CHECK_BUFFER_LEN:
|
|
|
|
{
|
|
|
|
int accessSize = intOp(inst.c);
|
|
|
|
LUAU_ASSERT(accessSize > 0);
|
|
|
|
|
|
|
|
if (inst.b.kind == IrOpKind::Inst)
|
|
|
|
{
|
|
|
|
if (accessSize == 1)
|
|
|
|
{
|
|
|
|
// Simpler check for a single byte access
|
|
|
|
build.cmp(dword[regOp(inst.a) + offsetof(Buffer, len)], regOp(inst.b));
|
|
|
|
jumpOrAbortOnUndef(ConditionX64::BelowEqual, inst.d, next);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp1{regs, SizeX64::qword};
|
|
|
|
ScopedRegX64 tmp2{regs, SizeX64::dword};
|
|
|
|
|
|
|
|
// To perform the bounds check using a single branch, we take index that is limited to 32 bit int
|
|
|
|
// Access size is then added using a 64 bit addition
|
|
|
|
// This will make sure that addition will not wrap around for values like 0xffffffff
|
|
|
|
|
|
|
|
if (IrCmd source = function.instOp(inst.b).cmd; source == IrCmd::NUM_TO_INT)
|
|
|
|
{
|
|
|
|
// When previous operation is a conversion to an integer (common case), it is guaranteed to have high register bits cleared
|
|
|
|
build.lea(tmp1.reg, addr[qwordReg(regOp(inst.b)) + accessSize]);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// When the source of the index is unknown, it could contain garbage in the high bits, so we zero-extend it explicitly
|
|
|
|
build.mov(dwordReg(tmp1.reg), regOp(inst.b));
|
|
|
|
build.add(tmp1.reg, accessSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
build.mov(tmp2.reg, dword[regOp(inst.a) + offsetof(Buffer, len)]);
|
|
|
|
build.cmp(qwordReg(tmp2.reg), tmp1.reg);
|
|
|
|
|
|
|
|
jumpOrAbortOnUndef(ConditionX64::Below, inst.d, next);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
int offset = intOp(inst.b);
|
|
|
|
|
|
|
|
// Constant folding can take care of it, but for safety we avoid overflow/underflow cases here
|
|
|
|
if (offset < 0 || unsigned(offset) + unsigned(accessSize) >= unsigned(INT_MAX))
|
|
|
|
jumpOrAbortOnUndef(inst.d, next);
|
|
|
|
else
|
|
|
|
build.cmp(dword[regOp(inst.a) + offsetof(Buffer, len)], offset + accessSize);
|
|
|
|
|
|
|
|
jumpOrAbortOnUndef(ConditionX64::Below, inst.d, next);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::INTERRUPT:
|
2023-06-16 18:35:18 +01:00
|
|
|
{
|
|
|
|
unsigned pcpos = uintOp(inst.a);
|
|
|
|
|
|
|
|
// We unconditionally spill values here because that allows us to ignore register state when we synthesize interrupt handler
|
|
|
|
// This can be changed in the future if we can somehow record interrupt handler code separately
|
|
|
|
// Since interrupts are loop edges or call/ret, we don't have a significant opportunity for register reuse here anyway
|
|
|
|
regs.preserveAndFreeInstValues();
|
|
|
|
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::qword};
|
|
|
|
|
|
|
|
Label self;
|
|
|
|
|
|
|
|
build.mov(tmp.reg, qword[rState + offsetof(lua_State, global)]);
|
|
|
|
build.cmp(qword[tmp.reg + offsetof(global_State, cb.interrupt)], 0);
|
|
|
|
build.jcc(ConditionX64::NotEqual, self);
|
|
|
|
|
|
|
|
Label next = build.setLabel();
|
|
|
|
|
|
|
|
interruptHandlers.push_back({self, pcpos, next});
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-06-16 18:35:18 +01:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::CHECK_GC:
|
2023-04-07 22:01:29 +01:00
|
|
|
callStepGc(regs, build);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::BARRIER_OBJ:
|
2023-09-01 18:58:27 +01:00
|
|
|
callBarrierObject(regs, build, regOp(inst.a), inst.a, inst.b, inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::BARRIER_TABLE_BACK:
|
2023-04-07 22:01:29 +01:00
|
|
|
callBarrierTableFast(regs, build, regOp(inst.a), inst.a);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::BARRIER_TABLE_FORWARD:
|
|
|
|
{
|
|
|
|
Label skip;
|
2023-03-31 19:42:49 +01:00
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp{regs, SizeX64::qword};
|
2023-09-01 18:58:27 +01:00
|
|
|
|
|
|
|
checkObjectBarrierConditions(build, tmp.reg, regOp(inst.a), inst.b, inst.c.kind == IrOpKind::Undef ? -1 : tagOp(inst.c), skip);
|
2023-03-31 19:42:49 +01:00
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
{
|
|
|
|
ScopedSpills spillGuard(regs);
|
|
|
|
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::qword, regOp(inst.a), inst.a);
|
|
|
|
callWrap.addArgument(SizeX64::qword, tmp);
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaC_barriertable)]);
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.setLabel(skip);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::SET_SAVEDPC:
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp1{regs, SizeX64::qword};
|
|
|
|
ScopedRegX64 tmp2{regs, SizeX64::qword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.mov(tmp2.reg, sCode);
|
|
|
|
build.add(tmp2.reg, uintOp(inst.a) * sizeof(Instruction));
|
|
|
|
build.mov(tmp1.reg, qword[rState + offsetof(lua_State, ci)]);
|
|
|
|
build.mov(qword[tmp1.reg + offsetof(CallInfo, savedpc)], tmp2.reg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::CLOSE_UPVALS:
|
|
|
|
{
|
|
|
|
Label next;
|
2023-02-24 21:49:38 +00:00
|
|
|
ScopedRegX64 tmp1{regs, SizeX64::qword};
|
|
|
|
ScopedRegX64 tmp2{regs, SizeX64::qword};
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
// L->openupval != 0
|
|
|
|
build.mov(tmp1.reg, qword[rState + offsetof(lua_State, openupval)]);
|
|
|
|
build.test(tmp1.reg, tmp1.reg);
|
|
|
|
build.jcc(ConditionX64::Zero, next);
|
|
|
|
|
|
|
|
// ra <= L->openuval->v
|
2023-03-24 18:03:04 +00:00
|
|
|
build.lea(tmp2.reg, addr[rBase + vmRegOp(inst.a) * sizeof(TValue)]);
|
2023-01-27 22:28:31 +00:00
|
|
|
build.cmp(tmp2.reg, qword[tmp1.reg + offsetof(UpVal, v)]);
|
|
|
|
build.jcc(ConditionX64::Above, next);
|
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
tmp1.free();
|
2023-02-10 19:40:38 +00:00
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
{
|
|
|
|
ScopedSpills spillGuard(regs);
|
|
|
|
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::qword, tmp2);
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaF_close)]);
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
build.setLabel(next);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::CAPTURE:
|
|
|
|
// No-op right now
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Fallbacks to non-IR instruction implementations
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::SETLIST:
|
|
|
|
regs.assertAllFree();
|
2023-08-25 18:23:55 +01:00
|
|
|
emitInstSetList(
|
|
|
|
regs, build, vmRegOp(inst.b), vmRegOp(inst.c), intOp(inst.d), uintOp(inst.e), inst.f.kind == IrOpKind::Undef ? -1 : int(uintOp(inst.f)));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::CALL:
|
|
|
|
regs.assertAllFree();
|
2023-04-07 22:01:29 +01:00
|
|
|
regs.assertNoSpills();
|
2023-03-24 18:03:04 +00:00
|
|
|
emitInstCall(build, helpers, vmRegOp(inst.a), intOp(inst.b), intOp(inst.c));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::RETURN:
|
|
|
|
regs.assertAllFree();
|
2023-04-07 22:01:29 +01:00
|
|
|
regs.assertNoSpills();
|
2023-06-16 18:35:18 +01:00
|
|
|
emitInstReturn(build, helpers, vmRegOp(inst.a), intOp(inst.b), function.variadic);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::FORGLOOP:
|
|
|
|
regs.assertAllFree();
|
2023-04-14 19:06:22 +01:00
|
|
|
emitInstForGLoop(build, vmRegOp(inst.a), intOp(inst.b), labelOp(inst.c));
|
|
|
|
jumpOrFallthrough(blockOp(inst.d), next);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::FORGLOOP_FALLBACK:
|
2023-04-14 19:06:22 +01:00
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::dword, vmRegOp(inst.a));
|
|
|
|
callWrap.addArgument(SizeX64::dword, intOp(inst.b));
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, forgLoopNonTableFallback)]);
|
|
|
|
|
|
|
|
emitUpdateBase(build);
|
|
|
|
|
|
|
|
build.test(al, al);
|
|
|
|
build.jcc(ConditionX64::NotZero, labelOp(inst.c));
|
|
|
|
jumpOrFallthrough(blockOp(inst.d), next);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-04-14 19:06:22 +01:00
|
|
|
}
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::FORGPREP_XNEXT_FALLBACK:
|
2023-04-14 19:06:22 +01:00
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::qword, luauRegAddress(vmRegOp(inst.b)));
|
|
|
|
callWrap.addArgument(SizeX64::dword, uintOp(inst.a) + 1);
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, forgPrepXnextFallback)]);
|
|
|
|
jumpOrFallthrough(blockOp(inst.c), next);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-04-14 19:06:22 +01:00
|
|
|
}
|
2023-03-31 19:42:49 +01:00
|
|
|
case IrCmd::COVERAGE:
|
2023-04-14 19:06:22 +01:00
|
|
|
{
|
|
|
|
ScopedRegX64 tmp1{regs, SizeX64::qword};
|
|
|
|
ScopedRegX64 tmp2{regs, SizeX64::dword};
|
|
|
|
ScopedRegX64 tmp3{regs, SizeX64::dword};
|
|
|
|
|
|
|
|
build.mov(tmp1.reg, sCode);
|
|
|
|
build.add(tmp1.reg, uintOp(inst.a) * sizeof(Instruction));
|
|
|
|
|
|
|
|
// hits = LUAU_INSN_E(*pc)
|
|
|
|
build.mov(tmp2.reg, dword[tmp1.reg]);
|
|
|
|
build.sar(tmp2.reg, 8);
|
|
|
|
|
|
|
|
// hits = (hits < (1 << 23) - 1) ? hits + 1 : hits;
|
|
|
|
build.xor_(tmp3.reg, tmp3.reg);
|
|
|
|
build.cmp(tmp2.reg, (1 << 23) - 1);
|
|
|
|
build.setcc(ConditionX64::NotEqual, byteReg(tmp3.reg));
|
|
|
|
build.add(tmp2.reg, tmp3.reg);
|
|
|
|
|
|
|
|
// VM_PATCH_E(pc, hits);
|
|
|
|
build.sal(tmp2.reg, 8);
|
|
|
|
build.movzx(tmp3.reg, byte[tmp1.reg]);
|
|
|
|
build.or_(tmp3.reg, tmp2.reg);
|
|
|
|
build.mov(dword[tmp1.reg], tmp3.reg);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-04-14 19:06:22 +01:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
// Full instruction fallbacks
|
|
|
|
case IrCmd::FALLBACK_GETGLOBAL:
|
2023-02-10 19:40:38 +00:00
|
|
|
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
|
|
|
|
2023-05-19 20:37:30 +01:00
|
|
|
emitFallback(regs, build, offsetof(NativeContext, executeGETGLOBAL), uintOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_SETGLOBAL:
|
2023-02-10 19:40:38 +00:00
|
|
|
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
|
|
|
|
2023-05-19 20:37:30 +01:00
|
|
|
emitFallback(regs, build, offsetof(NativeContext, executeSETGLOBAL), uintOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_GETTABLEKS:
|
2023-02-10 19:40:38 +00:00
|
|
|
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
|
|
|
|
2023-05-19 20:37:30 +01:00
|
|
|
emitFallback(regs, build, offsetof(NativeContext, executeGETTABLEKS), uintOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_SETTABLEKS:
|
2023-02-10 19:40:38 +00:00
|
|
|
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
|
|
|
|
2023-05-19 20:37:30 +01:00
|
|
|
emitFallback(regs, build, offsetof(NativeContext, executeSETTABLEKS), uintOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_NAMECALL:
|
2023-02-10 19:40:38 +00:00
|
|
|
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
|
|
|
|
2023-05-19 20:37:30 +01:00
|
|
|
emitFallback(regs, build, offsetof(NativeContext, executeNAMECALL), uintOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_PREPVARARGS:
|
2023-02-10 19:40:38 +00:00
|
|
|
LUAU_ASSERT(inst.b.kind == IrOpKind::Constant);
|
|
|
|
|
2023-05-19 20:37:30 +01:00
|
|
|
emitFallback(regs, build, offsetof(NativeContext, executePREPVARARGS), uintOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_GETVARARGS:
|
2023-02-10 19:40:38 +00:00
|
|
|
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.c.kind == IrOpKind::Constant);
|
|
|
|
|
2023-08-04 20:18:54 +01:00
|
|
|
if (intOp(inst.c) == LUA_MULTRET)
|
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
|
|
|
|
RegisterX64 reg = callWrap.suggestNextArgumentRegister(SizeX64::qword);
|
|
|
|
build.mov(reg, sCode);
|
|
|
|
callWrap.addArgument(SizeX64::qword, addr[reg + uintOp(inst.a) * sizeof(Instruction)]);
|
|
|
|
|
|
|
|
callWrap.addArgument(SizeX64::qword, rBase);
|
|
|
|
callWrap.addArgument(SizeX64::dword, vmRegOp(inst.b));
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, executeGETVARARGSMultRet)]);
|
|
|
|
|
|
|
|
emitUpdateBase(build);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rBase);
|
|
|
|
callWrap.addArgument(SizeX64::dword, vmRegOp(inst.b));
|
|
|
|
callWrap.addArgument(SizeX64::dword, intOp(inst.c));
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, executeGETVARARGSConst)]);
|
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-07-28 16:13:53 +01:00
|
|
|
case IrCmd::NEWCLOSURE:
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp2{regs, SizeX64::qword};
|
|
|
|
build.mov(tmp2.reg, sClosure);
|
|
|
|
build.mov(tmp2.reg, qword[tmp2.reg + offsetof(Closure, l.p)]);
|
|
|
|
build.mov(tmp2.reg, qword[tmp2.reg + offsetof(Proto, p)]);
|
|
|
|
build.mov(tmp2.reg, qword[tmp2.reg + sizeof(Proto*) * uintOp(inst.c)]);
|
|
|
|
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::dword, uintOp(inst.a), inst.a);
|
|
|
|
callWrap.addArgument(SizeX64::qword, regOp(inst.b), inst.b);
|
|
|
|
callWrap.addArgument(SizeX64::qword, tmp2);
|
2023-02-10 19:40:38 +00:00
|
|
|
|
2023-07-28 16:13:53 +01:00
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaF_newLclosure)]);
|
|
|
|
|
|
|
|
inst.regX64 = regs.takeReg(rax, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-07-28 16:13:53 +01:00
|
|
|
}
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrCmd::FALLBACK_DUPCLOSURE:
|
2023-02-10 19:40:38 +00:00
|
|
|
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
|
|
|
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
|
|
|
|
2023-05-19 20:37:30 +01:00
|
|
|
emitFallback(regs, build, offsetof(NativeContext, executeDUPCLOSURE), uintOp(inst.a));
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
case IrCmd::FALLBACK_FORGPREP:
|
2023-05-19 20:37:30 +01:00
|
|
|
emitFallback(regs, build, offsetof(NativeContext, executeFORGPREP), uintOp(inst.a));
|
2023-04-14 19:06:22 +01:00
|
|
|
jumpOrFallthrough(blockOp(inst.c), next);
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
2023-04-21 23:14:26 +01:00
|
|
|
case IrCmd::BITAND_UINT:
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
if (inst.a.kind != IrOpKind::Inst || inst.regX64 != regOp(inst.a))
|
|
|
|
build.mov(inst.regX64, memRegUintOp(inst.a));
|
2023-04-21 23:14:26 +01:00
|
|
|
|
|
|
|
build.and_(inst.regX64, memRegUintOp(inst.b));
|
|
|
|
break;
|
|
|
|
case IrCmd::BITXOR_UINT:
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
if (inst.a.kind != IrOpKind::Inst || inst.regX64 != regOp(inst.a))
|
|
|
|
build.mov(inst.regX64, memRegUintOp(inst.a));
|
2023-04-21 23:14:26 +01:00
|
|
|
|
|
|
|
build.xor_(inst.regX64, memRegUintOp(inst.b));
|
|
|
|
break;
|
|
|
|
case IrCmd::BITOR_UINT:
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
if (inst.a.kind != IrOpKind::Inst || inst.regX64 != regOp(inst.a))
|
|
|
|
build.mov(inst.regX64, memRegUintOp(inst.a));
|
2023-04-21 23:14:26 +01:00
|
|
|
|
|
|
|
build.or_(inst.regX64, memRegUintOp(inst.b));
|
|
|
|
break;
|
|
|
|
case IrCmd::BITNOT_UINT:
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
if (inst.a.kind != IrOpKind::Inst || inst.regX64 != regOp(inst.a))
|
|
|
|
build.mov(inst.regX64, memRegUintOp(inst.a));
|
2023-04-21 23:14:26 +01:00
|
|
|
|
|
|
|
build.not_(inst.regX64);
|
|
|
|
break;
|
|
|
|
case IrCmd::BITLSHIFT_UINT:
|
|
|
|
{
|
2023-09-22 20:12:15 +01:00
|
|
|
ScopedRegX64 shiftTmp{regs};
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
// Custom bit shift value can only be placed in cl
|
|
|
|
// but we use it if the shift value is not a constant stored in b
|
|
|
|
if (inst.b.kind != IrOpKind::Constant)
|
|
|
|
shiftTmp.take(ecx);
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
if (inst.a.kind != IrOpKind::Inst || inst.regX64 != regOp(inst.a))
|
|
|
|
build.mov(inst.regX64, memRegUintOp(inst.a));
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
// if shift value is a constant, we extract the byte-sized shift amount
|
|
|
|
int8_t shift = int8_t(unsigned(intOp(inst.b)));
|
|
|
|
build.shl(inst.regX64, shift);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.mov(shiftTmp.reg, memRegUintOp(inst.b));
|
|
|
|
build.shl(inst.regX64, byteReg(shiftTmp.reg));
|
|
|
|
}
|
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::BITRSHIFT_UINT:
|
|
|
|
{
|
2023-09-22 20:12:15 +01:00
|
|
|
ScopedRegX64 shiftTmp{regs};
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
// Custom bit shift value can only be placed in cl
|
|
|
|
// but we use it if the shift value is not a constant stored in b
|
|
|
|
if (inst.b.kind != IrOpKind::Constant)
|
|
|
|
shiftTmp.take(ecx);
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
if (inst.a.kind != IrOpKind::Inst || inst.regX64 != regOp(inst.a))
|
|
|
|
build.mov(inst.regX64, memRegUintOp(inst.a));
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
// if shift value is a constant, we extract the byte-sized shift amount
|
|
|
|
int8_t shift = int8_t(unsigned(intOp(inst.b)));
|
|
|
|
build.shr(inst.regX64, shift);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.mov(shiftTmp.reg, memRegUintOp(inst.b));
|
|
|
|
build.shr(inst.regX64, byteReg(shiftTmp.reg));
|
|
|
|
}
|
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::BITARSHIFT_UINT:
|
|
|
|
{
|
2023-09-22 20:12:15 +01:00
|
|
|
ScopedRegX64 shiftTmp{regs};
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
// Custom bit shift value can only be placed in cl
|
|
|
|
// but we use it if the shift value is not a constant stored in b
|
|
|
|
if (inst.b.kind != IrOpKind::Constant)
|
|
|
|
shiftTmp.take(ecx);
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
if (inst.a.kind != IrOpKind::Inst || inst.regX64 != regOp(inst.a))
|
|
|
|
build.mov(inst.regX64, memRegUintOp(inst.a));
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
// if shift value is a constant, we extract the byte-sized shift amount
|
|
|
|
int8_t shift = int8_t(unsigned(intOp(inst.b)));
|
|
|
|
build.sar(inst.regX64, shift);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.mov(shiftTmp.reg, memRegUintOp(inst.b));
|
|
|
|
build.sar(inst.regX64, byteReg(shiftTmp.reg));
|
|
|
|
}
|
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::BITLROTATE_UINT:
|
|
|
|
{
|
2023-09-22 20:12:15 +01:00
|
|
|
ScopedRegX64 shiftTmp{regs};
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
// Custom bit shift value can only be placed in cl
|
|
|
|
// but we use it if the shift value is not a constant stored in b
|
|
|
|
if (inst.b.kind != IrOpKind::Constant)
|
|
|
|
shiftTmp.take(ecx);
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
if (inst.a.kind != IrOpKind::Inst || inst.regX64 != regOp(inst.a))
|
|
|
|
build.mov(inst.regX64, memRegUintOp(inst.a));
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
// if shift value is a constant, we extract the byte-sized shift amount
|
|
|
|
int8_t shift = int8_t(unsigned(intOp(inst.b)));
|
|
|
|
build.rol(inst.regX64, shift);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.mov(shiftTmp.reg, memRegUintOp(inst.b));
|
|
|
|
build.rol(inst.regX64, byteReg(shiftTmp.reg));
|
|
|
|
}
|
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::BITRROTATE_UINT:
|
|
|
|
{
|
2023-09-22 20:12:15 +01:00
|
|
|
ScopedRegX64 shiftTmp{regs};
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
// Custom bit shift value can only be placed in cl
|
|
|
|
// but we use it if the shift value is not a constant stored in b
|
|
|
|
if (inst.b.kind != IrOpKind::Constant)
|
|
|
|
shiftTmp.take(ecx);
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
if (inst.a.kind != IrOpKind::Inst || inst.regX64 != regOp(inst.a))
|
|
|
|
build.mov(inst.regX64, memRegUintOp(inst.a));
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-09-22 20:12:15 +01:00
|
|
|
if (inst.b.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
// if shift value is a constant, we extract the byte-sized shift amount
|
|
|
|
int8_t shift = int8_t(unsigned(intOp(inst.b)));
|
|
|
|
build.ror(inst.regX64, shift);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
build.mov(shiftTmp.reg, memRegUintOp(inst.b));
|
|
|
|
build.ror(inst.regX64, byteReg(shiftTmp.reg));
|
|
|
|
}
|
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::BITCOUNTLZ_UINT:
|
|
|
|
{
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
|
|
|
|
|
|
|
Label zero, exit;
|
|
|
|
|
|
|
|
build.test(regOp(inst.a), regOp(inst.a));
|
|
|
|
build.jcc(ConditionX64::Equal, zero);
|
|
|
|
|
|
|
|
build.bsr(inst.regX64, regOp(inst.a));
|
|
|
|
build.xor_(inst.regX64, 0x1f);
|
|
|
|
build.jmp(exit);
|
|
|
|
|
|
|
|
build.setLabel(zero);
|
|
|
|
build.mov(inst.regX64, 32);
|
|
|
|
|
|
|
|
build.setLabel(exit);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::BITCOUNTRZ_UINT:
|
|
|
|
{
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
|
|
|
|
|
|
|
Label zero, exit;
|
|
|
|
|
|
|
|
build.test(regOp(inst.a), regOp(inst.a));
|
|
|
|
build.jcc(ConditionX64::Equal, zero);
|
|
|
|
|
|
|
|
build.bsf(inst.regX64, regOp(inst.a));
|
|
|
|
build.jmp(exit);
|
|
|
|
|
|
|
|
build.setLabel(zero);
|
|
|
|
build.mov(inst.regX64, 32);
|
|
|
|
|
|
|
|
build.setLabel(exit);
|
|
|
|
break;
|
2023-11-03 23:45:04 +00:00
|
|
|
}
|
|
|
|
case IrCmd::BYTESWAP_UINT:
|
|
|
|
{
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
|
|
|
|
|
|
|
|
if (inst.a.kind != IrOpKind::Inst || inst.regX64 != regOp(inst.a))
|
|
|
|
build.mov(inst.regX64, memRegUintOp(inst.a));
|
|
|
|
|
|
|
|
build.bswap(inst.regX64);
|
|
|
|
break;
|
2023-04-21 23:14:26 +01:00
|
|
|
}
|
|
|
|
case IrCmd::INVOKE_LIBM:
|
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build, index);
|
|
|
|
callWrap.addArgument(SizeX64::xmmword, memRegDoubleOp(inst.b), inst.b);
|
|
|
|
|
|
|
|
if (inst.c.kind != IrOpKind::None)
|
2023-05-05 22:52:49 +01:00
|
|
|
{
|
|
|
|
bool isInt = (inst.c.kind == IrOpKind::Constant) ? constOp(inst.c).kind == IrConstKind::Int
|
|
|
|
: getCmdValueKind(function.instOp(inst.c).cmd) == IrValueKind::Int;
|
|
|
|
|
|
|
|
if (isInt)
|
|
|
|
callWrap.addArgument(SizeX64::dword, memRegUintOp(inst.c), inst.c);
|
|
|
|
else
|
|
|
|
callWrap.addArgument(SizeX64::xmmword, memRegDoubleOp(inst.c), inst.c);
|
|
|
|
}
|
2023-04-21 23:14:26 +01:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
callWrap.call(qword[rNativeContext + getNativeContextOffset(uintOp(inst.a))]);
|
2023-04-21 23:14:26 +01:00
|
|
|
inst.regX64 = regs.takeReg(xmm0, index);
|
|
|
|
break;
|
|
|
|
}
|
2023-06-24 07:19:39 +01:00
|
|
|
case IrCmd::GET_TYPE:
|
|
|
|
{
|
|
|
|
inst.regX64 = regs.allocReg(SizeX64::qword, index);
|
|
|
|
|
|
|
|
build.mov(inst.regX64, qword[rState + offsetof(lua_State, global)]);
|
|
|
|
|
|
|
|
if (inst.a.kind == IrOpKind::Inst)
|
|
|
|
build.mov(inst.regX64, qword[inst.regX64 + qwordReg(regOp(inst.a)) * sizeof(TString*) + offsetof(global_State, ttname)]);
|
|
|
|
else if (inst.a.kind == IrOpKind::Constant)
|
|
|
|
build.mov(inst.regX64, qword[inst.regX64 + tagOp(inst.a) * sizeof(TString*) + offsetof(global_State, ttname)]);
|
|
|
|
else
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IrCmd::GET_TYPEOF:
|
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::qword, luauRegAddress(vmRegOp(inst.a)));
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaT_objtypenamestr)]);
|
|
|
|
|
|
|
|
inst.regX64 = regs.takeReg(rax, index);
|
|
|
|
break;
|
|
|
|
}
|
2023-04-14 19:06:22 +01:00
|
|
|
|
2023-07-28 16:13:53 +01:00
|
|
|
case IrCmd::FINDUPVAL:
|
|
|
|
{
|
|
|
|
IrCallWrapperX64 callWrap(regs, build);
|
|
|
|
callWrap.addArgument(SizeX64::qword, rState);
|
|
|
|
callWrap.addArgument(SizeX64::qword, luauRegAddress(vmRegOp(inst.a)));
|
|
|
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaF_findupval)]);
|
|
|
|
|
|
|
|
inst.regX64 = regs.takeReg(rax, index);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-11-10 21:10:07 +00:00
|
|
|
case IrCmd::BUFFER_READI8:
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a, inst.b});
|
|
|
|
|
|
|
|
build.movsx(inst.regX64, byte[bufferAddrOp(inst.a, inst.b)]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IrCmd::BUFFER_READU8:
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a, inst.b});
|
|
|
|
|
|
|
|
build.movzx(inst.regX64, byte[bufferAddrOp(inst.a, inst.b)]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IrCmd::BUFFER_WRITEI8:
|
|
|
|
{
|
2023-11-17 18:46:18 +00:00
|
|
|
if (FFlag::LuauCodeGenFixByteLower)
|
|
|
|
{
|
|
|
|
OperandX64 value = inst.c.kind == IrOpKind::Inst ? byteReg(regOp(inst.c)) : OperandX64(int8_t(intOp(inst.c)));
|
2023-11-10 21:10:07 +00:00
|
|
|
|
2023-11-17 18:46:18 +00:00
|
|
|
build.mov(byte[bufferAddrOp(inst.a, inst.b)], value);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
OperandX64 value = inst.c.kind == IrOpKind::Inst ? byteReg(regOp(inst.c)) : OperandX64(intOp(inst.c));
|
|
|
|
|
|
|
|
build.mov(byte[bufferAddrOp(inst.a, inst.b)], value);
|
|
|
|
}
|
2023-11-10 21:10:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case IrCmd::BUFFER_READI16:
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a, inst.b});
|
|
|
|
|
|
|
|
build.movsx(inst.regX64, word[bufferAddrOp(inst.a, inst.b)]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IrCmd::BUFFER_READU16:
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a, inst.b});
|
|
|
|
|
|
|
|
build.movzx(inst.regX64, word[bufferAddrOp(inst.a, inst.b)]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IrCmd::BUFFER_WRITEI16:
|
|
|
|
{
|
2023-11-17 18:46:18 +00:00
|
|
|
if (FFlag::LuauCodeGenFixByteLower)
|
|
|
|
{
|
|
|
|
OperandX64 value = inst.c.kind == IrOpKind::Inst ? wordReg(regOp(inst.c)) : OperandX64(int16_t(intOp(inst.c)));
|
|
|
|
|
|
|
|
build.mov(word[bufferAddrOp(inst.a, inst.b)], value);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
OperandX64 value = inst.c.kind == IrOpKind::Inst ? wordReg(regOp(inst.c)) : OperandX64(intOp(inst.c));
|
2023-11-10 21:10:07 +00:00
|
|
|
|
2023-11-17 18:46:18 +00:00
|
|
|
build.mov(word[bufferAddrOp(inst.a, inst.b)], value);
|
|
|
|
}
|
2023-11-10 21:10:07 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case IrCmd::BUFFER_READI32:
|
|
|
|
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a, inst.b});
|
|
|
|
|
|
|
|
build.mov(inst.regX64, dword[bufferAddrOp(inst.a, inst.b)]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IrCmd::BUFFER_WRITEI32:
|
|
|
|
{
|
|
|
|
OperandX64 value = inst.c.kind == IrOpKind::Inst ? regOp(inst.c) : OperandX64(intOp(inst.c));
|
|
|
|
|
|
|
|
build.mov(dword[bufferAddrOp(inst.a, inst.b)], value);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case IrCmd::BUFFER_READF32:
|
|
|
|
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
|
|
|
|
|
|
|
|
build.vcvtss2sd(inst.regX64, inst.regX64, dword[bufferAddrOp(inst.a, inst.b)]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IrCmd::BUFFER_WRITEF32:
|
|
|
|
storeDoubleAsFloat(dword[bufferAddrOp(inst.a, inst.b)], inst.c);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IrCmd::BUFFER_READF64:
|
|
|
|
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
|
|
|
|
|
|
|
|
build.vmovsd(inst.regX64, qword[bufferAddrOp(inst.a, inst.b)]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IrCmd::BUFFER_WRITEF64:
|
|
|
|
if (inst.c.kind == IrOpKind::Constant)
|
|
|
|
{
|
|
|
|
ScopedRegX64 tmp{regs, SizeX64::xmmword};
|
|
|
|
build.vmovsd(tmp.reg, build.f64(doubleOp(inst.c)));
|
|
|
|
build.vmovsd(qword[bufferAddrOp(inst.a, inst.b)], tmp.reg);
|
|
|
|
}
|
|
|
|
else if (inst.c.kind == IrOpKind::Inst)
|
|
|
|
{
|
|
|
|
build.vmovsd(qword[bufferAddrOp(inst.a, inst.b)], regOp(inst.c));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2023-04-14 19:06:22 +01:00
|
|
|
// Pseudo instructions
|
|
|
|
case IrCmd::NOP:
|
|
|
|
case IrCmd::SUBSTITUTE:
|
|
|
|
LUAU_ASSERT(!"Pseudo instructions should not be lowered");
|
2023-01-27 22:28:31 +00:00
|
|
|
break;
|
|
|
|
}
|
2023-03-24 18:03:04 +00:00
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
valueTracker.afterInstLowering(inst, index);
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
regs.freeLastUseRegs(inst, index);
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
|
2023-08-18 19:15:41 +01:00
|
|
|
void IrLoweringX64::finishBlock(const IrBlock& curr, const IrBlock& next)
|
2023-04-21 23:14:26 +01:00
|
|
|
{
|
2023-08-18 19:15:41 +01:00
|
|
|
if (!regs.spills.empty())
|
|
|
|
{
|
|
|
|
// If we have spills remaining, we have to immediately lower the successor block
|
|
|
|
for (uint32_t predIdx : predecessors(function.cfg, function.getBlockIndex(next)))
|
2023-12-02 07:46:57 +00:00
|
|
|
LUAU_ASSERT(predIdx == function.getBlockIndex(curr) || function.blocks[predIdx].kind == IrBlockKind::Dead);
|
2023-08-18 19:15:41 +01:00
|
|
|
|
|
|
|
// And the next block cannot be a join block in cfg
|
|
|
|
LUAU_ASSERT(next.useCount == 1);
|
|
|
|
}
|
2023-04-21 23:14:26 +01:00
|
|
|
}
|
|
|
|
|
2023-06-16 18:35:18 +01:00
|
|
|
void IrLoweringX64::finishFunction()
|
|
|
|
{
|
|
|
|
if (build.logText)
|
|
|
|
build.logAppend("; interrupt handlers\n");
|
|
|
|
|
|
|
|
for (InterruptHandler& handler : interruptHandlers)
|
|
|
|
{
|
|
|
|
build.setLabel(handler.self);
|
2023-06-24 07:19:39 +01:00
|
|
|
build.mov(eax, handler.pcpos + 1);
|
2023-06-16 18:35:18 +01:00
|
|
|
build.lea(rbx, handler.next);
|
|
|
|
build.jmp(helpers.interrupt);
|
|
|
|
}
|
2023-07-07 21:10:48 +01:00
|
|
|
|
|
|
|
if (build.logText)
|
|
|
|
build.logAppend("; exit handlers\n");
|
|
|
|
|
|
|
|
for (ExitHandler& handler : exitHandlers)
|
|
|
|
{
|
2023-08-11 15:42:37 +01:00
|
|
|
LUAU_ASSERT(handler.pcpos != kVmExitEntryGuardPc);
|
|
|
|
|
2023-07-07 21:10:48 +01:00
|
|
|
build.setLabel(handler.self);
|
2023-08-11 15:42:37 +01:00
|
|
|
|
2023-07-07 21:10:48 +01:00
|
|
|
build.mov(edx, handler.pcpos * sizeof(Instruction));
|
|
|
|
build.jmp(helpers.updatePcAndContinueInVm);
|
|
|
|
}
|
2023-09-15 18:26:59 +01:00
|
|
|
|
|
|
|
if (stats)
|
|
|
|
{
|
|
|
|
if (regs.maxUsedSlot > kSpillSlots)
|
|
|
|
stats->regAllocErrors++;
|
|
|
|
|
|
|
|
if (regs.maxUsedSlot > stats->maxSpillSlotsUsed)
|
|
|
|
stats->maxSpillSlotsUsed = regs.maxUsedSlot;
|
|
|
|
}
|
2023-06-16 18:35:18 +01:00
|
|
|
}
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
bool IrLoweringX64::hasError() const
|
|
|
|
{
|
|
|
|
// If register allocator had to use more stack slots than we have available, this function can't run natively
|
|
|
|
if (regs.maxUsedSlot > kSpillSlots)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
bool IrLoweringX64::isFallthroughBlock(const IrBlock& target, const IrBlock& next)
|
2023-01-27 22:28:31 +00:00
|
|
|
{
|
|
|
|
return target.start == next.start;
|
|
|
|
}
|
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
Label& IrLoweringX64::getTargetLabel(IrOp op, Label& fresh)
|
|
|
|
{
|
|
|
|
if (op.kind == IrOpKind::Undef)
|
|
|
|
return fresh;
|
|
|
|
|
|
|
|
if (op.kind == IrOpKind::VmExit)
|
|
|
|
{
|
|
|
|
// Special exit case that doesn't have to update pcpos
|
|
|
|
if (vmExitOp(op) == kVmExitEntryGuardPc)
|
|
|
|
return helpers.exitContinueVmClearNativeFlag;
|
|
|
|
|
|
|
|
if (uint32_t* index = exitHandlerMap.find(vmExitOp(op)))
|
|
|
|
return exitHandlers[*index].self;
|
|
|
|
|
|
|
|
return fresh;
|
|
|
|
}
|
|
|
|
|
|
|
|
return labelOp(op);
|
|
|
|
}
|
|
|
|
|
|
|
|
void IrLoweringX64::finalizeTargetLabel(IrOp op, Label& fresh)
|
|
|
|
{
|
|
|
|
if (op.kind == IrOpKind::VmExit && fresh.id != 0 && fresh.id != helpers.exitContinueVmClearNativeFlag.id)
|
|
|
|
{
|
|
|
|
exitHandlerMap[vmExitOp(op)] = uint32_t(exitHandlers.size());
|
|
|
|
exitHandlers.push_back({fresh, vmExitOp(op)});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void IrLoweringX64::jumpOrFallthrough(IrBlock& target, const IrBlock& next)
|
2023-01-27 22:28:31 +00:00
|
|
|
{
|
|
|
|
if (!isFallthroughBlock(target, next))
|
|
|
|
build.jmp(target.label);
|
|
|
|
}
|
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
void IrLoweringX64::jumpOrAbortOnUndef(ConditionX64 cond, IrOp target, const IrBlock& next)
|
2023-05-25 22:36:34 +01:00
|
|
|
{
|
2023-08-11 15:42:37 +01:00
|
|
|
Label fresh;
|
|
|
|
Label& label = getTargetLabel(target, fresh);
|
2023-07-07 21:10:48 +01:00
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
if (target.kind == IrOpKind::Undef)
|
2023-07-14 19:08:53 +01:00
|
|
|
{
|
2023-08-11 15:42:37 +01:00
|
|
|
if (cond == ConditionX64::Count)
|
2023-07-14 19:08:53 +01:00
|
|
|
{
|
2023-08-11 15:42:37 +01:00
|
|
|
build.ud2(); // Unconditional jump to abort is just an abort
|
2023-07-14 19:08:53 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-08-11 15:42:37 +01:00
|
|
|
build.jcc(getReverseCondition(cond), label);
|
|
|
|
build.ud2();
|
|
|
|
build.setLabel(label);
|
2023-07-14 19:08:53 +01:00
|
|
|
}
|
|
|
|
}
|
2023-08-11 15:42:37 +01:00
|
|
|
else if (cond == ConditionX64::Count)
|
|
|
|
{
|
|
|
|
// Unconditional jump can be skipped if it's a fallthrough
|
|
|
|
if (target.kind == IrOpKind::VmExit || !isFallthroughBlock(blockOp(target), next))
|
|
|
|
build.jmp(label);
|
|
|
|
}
|
2023-05-25 22:36:34 +01:00
|
|
|
else
|
|
|
|
{
|
2023-08-11 15:42:37 +01:00
|
|
|
build.jcc(cond, label);
|
2023-05-25 22:36:34 +01:00
|
|
|
}
|
2023-08-11 15:42:37 +01:00
|
|
|
|
|
|
|
finalizeTargetLabel(target, fresh);
|
|
|
|
}
|
|
|
|
|
|
|
|
void IrLoweringX64::jumpOrAbortOnUndef(IrOp target, const IrBlock& next)
|
|
|
|
{
|
|
|
|
jumpOrAbortOnUndef(ConditionX64::Count, target, next);
|
2023-05-25 22:36:34 +01:00
|
|
|
}
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
OperandX64 IrLoweringX64::memRegDoubleOp(IrOp op)
|
2023-01-27 22:28:31 +00:00
|
|
|
{
|
|
|
|
switch (op.kind)
|
|
|
|
{
|
|
|
|
case IrOpKind::Inst:
|
|
|
|
return regOp(op);
|
|
|
|
case IrOpKind::Constant:
|
|
|
|
return build.f64(doubleOp(op));
|
|
|
|
case IrOpKind::VmReg:
|
2023-03-24 18:03:04 +00:00
|
|
|
return luauRegValue(vmRegOp(op));
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrOpKind::VmConst:
|
2023-03-24 18:03:04 +00:00
|
|
|
return luauConstantValue(vmConstOp(op));
|
2023-01-27 22:28:31 +00:00
|
|
|
default:
|
|
|
|
LUAU_ASSERT(!"Unsupported operand kind");
|
|
|
|
}
|
|
|
|
|
|
|
|
return noreg;
|
|
|
|
}
|
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
OperandX64 IrLoweringX64::memRegUintOp(IrOp op)
|
|
|
|
{
|
|
|
|
switch (op.kind)
|
|
|
|
{
|
|
|
|
case IrOpKind::Inst:
|
|
|
|
return regOp(op);
|
|
|
|
case IrOpKind::Constant:
|
2023-04-28 20:55:13 +01:00
|
|
|
return OperandX64(unsigned(intOp(op)));
|
2023-08-04 20:18:54 +01:00
|
|
|
case IrOpKind::VmReg:
|
|
|
|
return luauRegValueInt(vmRegOp(op));
|
2023-04-21 23:14:26 +01:00
|
|
|
default:
|
|
|
|
LUAU_ASSERT(!"Unsupported operand kind");
|
|
|
|
}
|
|
|
|
|
|
|
|
return noreg;
|
|
|
|
}
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
OperandX64 IrLoweringX64::memRegTagOp(IrOp op)
|
2023-01-27 22:28:31 +00:00
|
|
|
{
|
|
|
|
switch (op.kind)
|
|
|
|
{
|
|
|
|
case IrOpKind::Inst:
|
|
|
|
return regOp(op);
|
|
|
|
case IrOpKind::VmReg:
|
2023-03-24 18:03:04 +00:00
|
|
|
return luauRegTag(vmRegOp(op));
|
2023-01-27 22:28:31 +00:00
|
|
|
case IrOpKind::VmConst:
|
2023-03-24 18:03:04 +00:00
|
|
|
return luauConstantTag(vmConstOp(op));
|
2023-01-27 22:28:31 +00:00
|
|
|
default:
|
|
|
|
LUAU_ASSERT(!"Unsupported operand kind");
|
|
|
|
}
|
|
|
|
|
|
|
|
return noreg;
|
|
|
|
}
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
RegisterX64 IrLoweringX64::regOp(IrOp op)
|
2023-01-27 22:28:31 +00:00
|
|
|
{
|
2023-03-24 18:03:04 +00:00
|
|
|
IrInst& inst = function.instOp(op);
|
2023-04-07 22:01:29 +01:00
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
if (inst.spilled || inst.needsReload)
|
2023-04-07 22:01:29 +01:00
|
|
|
regs.restore(inst, false);
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
LUAU_ASSERT(inst.regX64 != noreg);
|
|
|
|
return inst.regX64;
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
|
2023-11-10 21:10:07 +00:00
|
|
|
OperandX64 IrLoweringX64::bufferAddrOp(IrOp bufferOp, IrOp indexOp)
|
|
|
|
{
|
|
|
|
if (indexOp.kind == IrOpKind::Inst)
|
|
|
|
return regOp(bufferOp) + qwordReg(regOp(indexOp)) + offsetof(Buffer, data);
|
|
|
|
else if (indexOp.kind == IrOpKind::Constant)
|
|
|
|
return regOp(bufferOp) + intOp(indexOp) + offsetof(Buffer, data);
|
|
|
|
|
|
|
|
LUAU_ASSERT(!"Unsupported instruction form");
|
|
|
|
return noreg;
|
|
|
|
}
|
|
|
|
|
2023-01-27 22:28:31 +00:00
|
|
|
IrConst IrLoweringX64::constOp(IrOp op) const
|
|
|
|
{
|
2023-02-10 19:40:38 +00:00
|
|
|
return function.constOp(op);
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t IrLoweringX64::tagOp(IrOp op) const
|
|
|
|
{
|
2023-02-10 19:40:38 +00:00
|
|
|
return function.tagOp(op);
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int IrLoweringX64::intOp(IrOp op) const
|
|
|
|
{
|
2023-02-10 19:40:38 +00:00
|
|
|
return function.intOp(op);
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned IrLoweringX64::uintOp(IrOp op) const
|
|
|
|
{
|
2023-02-10 19:40:38 +00:00
|
|
|
return function.uintOp(op);
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
double IrLoweringX64::doubleOp(IrOp op) const
|
|
|
|
{
|
2023-02-10 19:40:38 +00:00
|
|
|
return function.doubleOp(op);
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrBlock& IrLoweringX64::blockOp(IrOp op) const
|
|
|
|
{
|
2023-02-10 19:40:38 +00:00
|
|
|
return function.blockOp(op);
|
2023-01-27 22:28:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Label& IrLoweringX64::labelOp(IrOp op) const
|
|
|
|
{
|
|
|
|
return blockOp(op).label;
|
|
|
|
}
|
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
} // namespace X64
|
2023-01-27 22:28:31 +00:00
|
|
|
} // namespace CodeGen
|
|
|
|
} // namespace Luau
|