luau/CodeGen/src/IrLoweringX64.cpp

1147 lines
37 KiB
C++
Raw Normal View History

2023-01-27 13:28:45 -08:00
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "IrLoweringX64.h"
#include "Luau/CodeGen.h"
#include "Luau/DenseHash.h"
2023-02-10 10:50:54 -08:00
#include "Luau/IrAnalysis.h"
2023-02-03 14:34:12 +02:00
#include "Luau/IrDump.h"
#include "Luau/IrUtils.h"
2023-01-27 13:28:45 -08:00
2023-02-24 10:24:22 -08:00
#include "EmitBuiltinsX64.h"
2023-01-27 13:28:45 -08:00
#include "EmitCommonX64.h"
#include "EmitInstructionX64.h"
#include "NativeState.h"
#include "lstate.h"
namespace Luau
{
namespace CodeGen
{
2023-03-03 15:45:38 +02:00
namespace X64
{
2023-01-27 13:28:45 -08:00
2023-03-24 10:34:14 -07:00
IrLoweringX64::IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function)
2023-01-27 13:28:45 -08:00
: build(build)
, helpers(helpers)
, data(data)
, function(function)
2023-02-24 10:24:22 -08:00
, regs(function)
2023-01-27 13:28:45 -08:00
{
2023-02-10 10:50:54 -08:00
// In order to allocate registers during lowering, we need to know where instruction results are last used
updateLastUseLocations(function);
2023-01-27 13:28:45 -08:00
}
void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
{
switch (inst.cmd)
{
case IrCmd::LOAD_TAG:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprReg(SizeX64::dword);
2023-01-27 13:28:45 -08:00
if (inst.a.kind == IrOpKind::VmReg)
2023-03-24 10:34:14 -07:00
build.mov(inst.regX64, luauRegTag(vmRegOp(inst.a)));
2023-01-27 13:28:45 -08:00
else if (inst.a.kind == IrOpKind::VmConst)
2023-03-24 10:34:14 -07:00
build.mov(inst.regX64, luauConstantTag(vmConstOp(inst.a)));
2023-02-10 10:50:54 -08:00
// If we have a register, we assume it's a pointer to TValue
// We might introduce explicit operand types in the future to make this more robust
else if (inst.a.kind == IrOpKind::Inst)
build.mov(inst.regX64, dword[regOp(inst.a) + offsetof(TValue, tt)]);
2023-01-27 13:28:45 -08:00
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
case IrCmd::LOAD_POINTER:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprReg(SizeX64::qword);
2023-01-27 13:28:45 -08:00
if (inst.a.kind == IrOpKind::VmReg)
2023-03-24 10:34:14 -07:00
build.mov(inst.regX64, luauRegValue(vmRegOp(inst.a)));
2023-01-27 13:28:45 -08:00
else if (inst.a.kind == IrOpKind::VmConst)
2023-03-24 10:34:14 -07:00
build.mov(inst.regX64, luauConstantValue(vmConstOp(inst.a)));
2023-03-17 16:59:30 +02:00
// If we have a register, we assume it's a pointer to TValue
// We might introduce explicit operand types in the future to make this more robust
else if (inst.a.kind == IrOpKind::Inst)
build.mov(inst.regX64, qword[regOp(inst.a) + offsetof(TValue, value)]);
2023-01-27 13:28:45 -08:00
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
case IrCmd::LOAD_DOUBLE:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmReg();
2023-01-27 13:28:45 -08:00
if (inst.a.kind == IrOpKind::VmReg)
2023-03-24 10:34:14 -07:00
build.vmovsd(inst.regX64, luauRegValue(vmRegOp(inst.a)));
2023-01-27 13:28:45 -08:00
else if (inst.a.kind == IrOpKind::VmConst)
2023-03-24 10:34:14 -07:00
build.vmovsd(inst.regX64, luauConstantValue(vmConstOp(inst.a)));
2023-01-27 13:28:45 -08:00
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
case IrCmd::LOAD_INT:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprReg(SizeX64::dword);
2023-01-27 13:28:45 -08:00
2023-03-24 10:34:14 -07:00
build.mov(inst.regX64, luauRegValueInt(vmRegOp(inst.a)));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::LOAD_TVALUE:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmReg();
2023-01-27 13:28:45 -08:00
if (inst.a.kind == IrOpKind::VmReg)
2023-03-24 10:34:14 -07:00
build.vmovups(inst.regX64, luauReg(vmRegOp(inst.a)));
2023-01-27 13:28:45 -08:00
else if (inst.a.kind == IrOpKind::VmConst)
2023-03-24 10:34:14 -07:00
build.vmovups(inst.regX64, luauConstant(vmConstOp(inst.a)));
2023-01-27 13:28:45 -08:00
else if (inst.a.kind == IrOpKind::Inst)
build.vmovups(inst.regX64, xmmword[regOp(inst.a)]);
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
case IrCmd::LOAD_NODE_VALUE_TV:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmReg();
2023-01-27 13:28:45 -08:00
build.vmovups(inst.regX64, luauNodeValue(regOp(inst.a)));
break;
case IrCmd::LOAD_ENV:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprReg(SizeX64::qword);
2023-01-27 13:28:45 -08:00
build.mov(inst.regX64, sClosure);
build.mov(inst.regX64, qword[inst.regX64 + offsetof(Closure, env)]);
break;
case IrCmd::GET_ARR_ADDR:
if (inst.b.kind == IrOpKind::Inst)
{
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprRegOrReuse(SizeX64::qword, index, {inst.b});
2023-01-27 13:28:45 -08:00
2023-02-10 10:50:54 -08:00
if (dwordReg(inst.regX64) != regOp(inst.b))
build.mov(dwordReg(inst.regX64), regOp(inst.b));
2023-01-27 13:28:45 -08:00
build.shl(dwordReg(inst.regX64), kTValueSizeLog2);
build.add(inst.regX64, qword[regOp(inst.a) + offsetof(Table, array)]);
}
else if (inst.b.kind == IrOpKind::Constant)
{
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprRegOrReuse(SizeX64::qword, index, {inst.a});
2023-01-27 13:28:45 -08:00
build.mov(inst.regX64, qword[regOp(inst.a) + offsetof(Table, array)]);
2023-02-17 16:53:37 +02:00
if (intOp(inst.b) != 0)
build.lea(inst.regX64, addr[inst.regX64 + intOp(inst.b) * sizeof(TValue)]);
2023-01-27 13:28:45 -08:00
}
else
{
LUAU_ASSERT(!"Unsupported instruction form");
}
break;
case IrCmd::GET_SLOT_NODE_ADDR:
{
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprReg(SizeX64::qword);
2023-01-27 13:28:45 -08:00
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp{regs, SizeX64::qword};
2023-01-27 13:28:45 -08:00
getTableNodeAtCachedSlot(build, tmp.reg, inst.regX64, regOp(inst.a), uintOp(inst.b));
break;
}
2023-03-17 16:59:30 +02:00
case IrCmd::GET_HASH_NODE_ADDR:
{
inst.regX64 = regs.allocGprReg(SizeX64::qword);
// Custom bit shift value can only be placed in cl
ScopedRegX64 shiftTmp{regs, regs.takeGprReg(rcx)};
ScopedRegX64 tmp{regs, SizeX64::qword};
build.mov(inst.regX64, qword[regOp(inst.a) + offsetof(Table, node)]);
build.mov(dwordReg(tmp.reg), 1);
build.mov(byteReg(shiftTmp.reg), byte[regOp(inst.a) + offsetof(Table, lsizenode)]);
build.shl(dwordReg(tmp.reg), byteReg(shiftTmp.reg));
build.dec(dwordReg(tmp.reg));
build.and_(dwordReg(tmp.reg), uintOp(inst.b));
build.shl(tmp.reg, kLuaNodeSizeLog2);
build.add(inst.regX64, tmp.reg);
break;
};
2023-01-27 13:28:45 -08:00
case IrCmd::STORE_TAG:
if (inst.b.kind == IrOpKind::Constant)
2023-03-24 10:34:14 -07:00
build.mov(luauRegTag(vmRegOp(inst.a)), tagOp(inst.b));
2023-01-27 13:28:45 -08:00
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
case IrCmd::STORE_POINTER:
2023-03-24 10:34:14 -07:00
build.mov(luauRegValue(vmRegOp(inst.a)), regOp(inst.b));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::STORE_DOUBLE:
if (inst.b.kind == IrOpKind::Constant)
{
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp{regs, SizeX64::xmmword};
2023-01-27 13:28:45 -08:00
build.vmovsd(tmp.reg, build.f64(doubleOp(inst.b)));
2023-03-24 10:34:14 -07:00
build.vmovsd(luauRegValue(vmRegOp(inst.a)), tmp.reg);
2023-01-27 13:28:45 -08:00
}
else if (inst.b.kind == IrOpKind::Inst)
{
2023-03-24 10:34:14 -07:00
build.vmovsd(luauRegValue(vmRegOp(inst.a)), regOp(inst.b));
2023-01-27 13:28:45 -08:00
}
else
{
LUAU_ASSERT(!"Unsupported instruction form");
}
break;
case IrCmd::STORE_INT:
{
if (inst.b.kind == IrOpKind::Constant)
2023-03-24 10:34:14 -07:00
build.mov(luauRegValueInt(vmRegOp(inst.a)), intOp(inst.b));
2023-01-27 13:28:45 -08:00
else if (inst.b.kind == IrOpKind::Inst)
2023-03-24 10:34:14 -07:00
build.mov(luauRegValueInt(vmRegOp(inst.a)), regOp(inst.b));
2023-01-27 13:28:45 -08:00
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
}
case IrCmd::STORE_TVALUE:
if (inst.a.kind == IrOpKind::VmReg)
2023-03-24 10:34:14 -07:00
build.vmovups(luauReg(vmRegOp(inst.a)), regOp(inst.b));
2023-01-27 13:28:45 -08:00
else if (inst.a.kind == IrOpKind::Inst)
build.vmovups(xmmword[regOp(inst.a)], regOp(inst.b));
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
case IrCmd::STORE_NODE_VALUE_TV:
build.vmovups(luauNodeValue(regOp(inst.a)), regOp(inst.b));
break;
case IrCmd::ADD_INT:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprRegOrReuse(SizeX64::dword, index, {inst.a});
2023-01-27 13:28:45 -08:00
if (inst.regX64 == regOp(inst.a) && intOp(inst.b) == 1)
build.inc(inst.regX64);
else if (inst.regX64 == regOp(inst.a))
build.add(inst.regX64, intOp(inst.b));
else
build.lea(inst.regX64, addr[regOp(inst.a) + intOp(inst.b)]);
break;
case IrCmd::SUB_INT:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprRegOrReuse(SizeX64::dword, index, {inst.a});
2023-01-27 13:28:45 -08:00
if (inst.regX64 == regOp(inst.a) && intOp(inst.b) == 1)
build.dec(inst.regX64);
else if (inst.regX64 == regOp(inst.a))
build.sub(inst.regX64, intOp(inst.b));
else
build.lea(inst.regX64, addr[regOp(inst.a) - intOp(inst.b)]);
break;
case IrCmd::ADD_NUM:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmRegOrReuse(index, {inst.a, inst.b});
if (inst.a.kind == IrOpKind::Constant)
{
ScopedRegX64 tmp{regs, SizeX64::xmmword};
2023-01-27 13:28:45 -08:00
2023-02-24 10:24:22 -08:00
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
build.vaddsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
}
else
{
build.vaddsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
}
2023-01-27 13:28:45 -08:00
break;
case IrCmd::SUB_NUM:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmRegOrReuse(index, {inst.a, inst.b});
if (inst.a.kind == IrOpKind::Constant)
{
ScopedRegX64 tmp{regs, SizeX64::xmmword};
2023-01-27 13:28:45 -08:00
2023-02-24 10:24:22 -08:00
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
build.vsubsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
}
else
{
build.vsubsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
}
2023-01-27 13:28:45 -08:00
break;
case IrCmd::MUL_NUM:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmRegOrReuse(index, {inst.a, inst.b});
2023-01-27 13:28:45 -08:00
2023-02-24 10:24:22 -08:00
if (inst.a.kind == IrOpKind::Constant)
{
ScopedRegX64 tmp{regs, SizeX64::xmmword};
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
build.vmulsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
}
else
{
build.vmulsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
}
2023-01-27 13:28:45 -08:00
break;
case IrCmd::DIV_NUM:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmRegOrReuse(index, {inst.a, inst.b});
if (inst.a.kind == IrOpKind::Constant)
{
ScopedRegX64 tmp{regs, SizeX64::xmmword};
2023-01-27 13:28:45 -08:00
2023-02-24 10:24:22 -08:00
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
build.vdivsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
}
else
{
build.vdivsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
}
2023-01-27 13:28:45 -08:00
break;
case IrCmd::MOD_NUM:
{
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmRegOrReuse(index, {inst.a, inst.b});
2023-01-27 13:28:45 -08:00
2023-03-10 11:20:04 -08:00
ScopedRegX64 optLhsTmp{regs};
RegisterX64 lhs;
if (inst.a.kind == IrOpKind::Constant)
{
optLhsTmp.alloc(SizeX64::xmmword);
build.vmovsd(optLhsTmp.reg, memRegDoubleOp(inst.a));
lhs = optLhsTmp.reg;
}
else
{
lhs = regOp(inst.a);
}
2023-01-27 13:28:45 -08:00
if (inst.b.kind == IrOpKind::Inst)
{
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp{regs, SizeX64::xmmword};
2023-01-27 13:28:45 -08:00
build.vdivsd(tmp.reg, lhs, memRegDoubleOp(inst.b));
build.vroundsd(tmp.reg, tmp.reg, tmp.reg, RoundingModeX64::RoundToNegativeInfinity);
build.vmulsd(tmp.reg, tmp.reg, memRegDoubleOp(inst.b));
build.vsubsd(inst.regX64, lhs, tmp.reg);
}
else
{
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
2023-01-27 13:28:45 -08:00
build.vmovsd(tmp1.reg, memRegDoubleOp(inst.b));
build.vdivsd(tmp2.reg, lhs, tmp1.reg);
build.vroundsd(tmp2.reg, tmp2.reg, tmp2.reg, RoundingModeX64::RoundToNegativeInfinity);
build.vmulsd(tmp1.reg, tmp2.reg, tmp1.reg);
build.vsubsd(inst.regX64, lhs, tmp1.reg);
}
break;
}
case IrCmd::POW_NUM:
{
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmRegOrReuse(index, {inst.a, inst.b});
2023-01-27 13:28:45 -08:00
2023-03-10 11:20:04 -08:00
ScopedRegX64 optLhsTmp{regs};
2023-02-24 10:24:22 -08:00
RegisterX64 lhs;
if (inst.a.kind == IrOpKind::Constant)
{
2023-03-10 11:20:04 -08:00
optLhsTmp.alloc(SizeX64::xmmword);
build.vmovsd(optLhsTmp.reg, memRegDoubleOp(inst.a));
lhs = optLhsTmp.reg;
2023-02-24 10:24:22 -08:00
}
else
{
lhs = regOp(inst.a);
}
2023-01-27 13:28:45 -08:00
if (inst.b.kind == IrOpKind::Inst)
{
// TODO: this doesn't happen with current local-only register allocation, but has to be handled in the future
LUAU_ASSERT(regOp(inst.b) != xmm0);
if (lhs != xmm0)
build.vmovsd(xmm0, lhs, lhs);
if (regOp(inst.b) != xmm1)
build.vmovsd(xmm1, regOp(inst.b), regOp(inst.b));
build.call(qword[rNativeContext + offsetof(NativeContext, libm_pow)]);
if (inst.regX64 != xmm0)
build.vmovsd(inst.regX64, xmm0, xmm0);
}
else if (inst.b.kind == IrOpKind::Constant)
{
double rhs = doubleOp(inst.b);
if (rhs == 2.0)
{
build.vmulsd(inst.regX64, lhs, lhs);
}
else if (rhs == 0.5)
{
build.vsqrtsd(inst.regX64, lhs, lhs);
}
else if (rhs == 3.0)
{
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp{regs, SizeX64::xmmword};
2023-01-27 13:28:45 -08:00
build.vmulsd(tmp.reg, lhs, lhs);
build.vmulsd(inst.regX64, lhs, tmp.reg);
}
else
{
if (lhs != xmm0)
build.vmovsd(xmm0, xmm0, lhs);
build.vmovsd(xmm1, build.f64(rhs));
build.call(qword[rNativeContext + offsetof(NativeContext, libm_pow)]);
if (inst.regX64 != xmm0)
build.vmovsd(inst.regX64, xmm0, xmm0);
}
}
else
{
2023-02-10 10:50:54 -08:00
if (lhs != xmm0)
build.vmovsd(xmm0, lhs, lhs);
build.vmovsd(xmm1, memRegDoubleOp(inst.b));
build.call(qword[rNativeContext + offsetof(NativeContext, libm_pow)]);
if (inst.regX64 != xmm0)
build.vmovsd(inst.regX64, xmm0, xmm0);
2023-01-27 13:28:45 -08:00
}
break;
}
2023-03-03 15:45:38 +02:00
case IrCmd::MIN_NUM:
inst.regX64 = regs.allocXmmRegOrReuse(index, {inst.a, inst.b});
if (inst.a.kind == IrOpKind::Constant)
{
ScopedRegX64 tmp{regs, SizeX64::xmmword};
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
build.vminsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
}
else
{
build.vminsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
}
break;
case IrCmd::MAX_NUM:
inst.regX64 = regs.allocXmmRegOrReuse(index, {inst.a, inst.b});
if (inst.a.kind == IrOpKind::Constant)
{
ScopedRegX64 tmp{regs, SizeX64::xmmword};
build.vmovsd(tmp.reg, memRegDoubleOp(inst.a));
build.vmaxsd(inst.regX64, tmp.reg, memRegDoubleOp(inst.b));
}
else
{
build.vmaxsd(inst.regX64, regOp(inst.a), memRegDoubleOp(inst.b));
}
break;
2023-01-27 13:28:45 -08:00
case IrCmd::UNM_NUM:
{
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmRegOrReuse(index, {inst.a});
2023-01-27 13:28:45 -08:00
RegisterX64 src = regOp(inst.a);
if (inst.regX64 == src)
{
build.vxorpd(inst.regX64, inst.regX64, build.f64(-0.0));
}
else
{
build.vmovsd(inst.regX64, src, src);
build.vxorpd(inst.regX64, inst.regX64, build.f64(-0.0));
}
break;
}
case IrCmd::NOT_ANY:
{
// TODO: if we have a single user which is a STORE_INT, we are missing the opportunity to write directly to target
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprRegOrReuse(SizeX64::dword, index, {inst.a, inst.b});
2023-01-27 13:28:45 -08:00
Label saveone, savezero, exit;
2023-02-24 10:24:22 -08:00
if (inst.a.kind == IrOpKind::Constant)
{
// Other cases should've been constant folded
LUAU_ASSERT(tagOp(inst.a) == LUA_TBOOLEAN);
}
else
{
build.cmp(regOp(inst.a), LUA_TNIL);
build.jcc(ConditionX64::Equal, saveone);
2023-01-27 13:28:45 -08:00
2023-02-24 10:24:22 -08:00
build.cmp(regOp(inst.a), LUA_TBOOLEAN);
build.jcc(ConditionX64::NotEqual, savezero);
}
2023-01-27 13:28:45 -08:00
build.cmp(regOp(inst.b), 0);
build.jcc(ConditionX64::Equal, saveone);
build.setLabel(savezero);
build.mov(inst.regX64, 0);
build.jmp(exit);
build.setLabel(saveone);
build.mov(inst.regX64, 1);
build.setLabel(exit);
break;
}
case IrCmd::JUMP:
jumpOrFallthrough(blockOp(inst.a), next);
break;
case IrCmd::JUMP_IF_TRUTHY:
2023-03-24 10:34:14 -07:00
jumpIfTruthy(build, vmRegOp(inst.a), labelOp(inst.b), labelOp(inst.c));
2023-01-27 13:28:45 -08:00
jumpOrFallthrough(blockOp(inst.c), next);
break;
case IrCmd::JUMP_IF_FALSY:
2023-03-24 10:34:14 -07:00
jumpIfFalsy(build, vmRegOp(inst.a), labelOp(inst.b), labelOp(inst.c));
2023-01-27 13:28:45 -08:00
jumpOrFallthrough(blockOp(inst.c), next);
break;
case IrCmd::JUMP_EQ_TAG:
{
LUAU_ASSERT(inst.b.kind == IrOpKind::Inst || inst.b.kind == IrOpKind::Constant);
OperandX64 opb = inst.b.kind == IrOpKind::Inst ? regOp(inst.b) : OperandX64(tagOp(inst.b));
build.cmp(memRegTagOp(inst.a), opb);
if (isFallthroughBlock(blockOp(inst.d), next))
{
build.jcc(ConditionX64::Equal, labelOp(inst.c));
jumpOrFallthrough(blockOp(inst.d), next);
}
else
{
build.jcc(ConditionX64::NotEqual, labelOp(inst.d));
jumpOrFallthrough(blockOp(inst.c), next);
}
break;
}
2023-02-10 10:50:54 -08:00
case IrCmd::JUMP_EQ_INT:
build.cmp(regOp(inst.a), intOp(inst.b));
2023-01-27 13:28:45 -08:00
build.jcc(ConditionX64::Equal, labelOp(inst.c));
jumpOrFallthrough(blockOp(inst.d), next);
break;
case IrCmd::JUMP_EQ_POINTER:
build.cmp(regOp(inst.a), regOp(inst.b));
build.jcc(ConditionX64::Equal, labelOp(inst.c));
jumpOrFallthrough(blockOp(inst.d), next);
break;
case IrCmd::JUMP_CMP_NUM:
{
2023-03-24 10:34:14 -07:00
IrCondition cond = conditionOp(inst.c);
2023-01-27 13:28:45 -08:00
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp{regs, SizeX64::xmmword};
2023-01-27 13:28:45 -08:00
// TODO: jumpOnNumberCmp should work on IrCondition directly
2023-03-03 15:45:38 +02:00
jumpOnNumberCmp(build, tmp.reg, memRegDoubleOp(inst.a), memRegDoubleOp(inst.b), cond, labelOp(inst.d));
2023-01-27 13:28:45 -08:00
jumpOrFallthrough(blockOp(inst.e), next);
break;
}
case IrCmd::JUMP_CMP_ANY:
2023-03-24 10:34:14 -07:00
jumpOnAnyCmpFallback(build, vmRegOp(inst.a), vmRegOp(inst.b), conditionOp(inst.c), labelOp(inst.d));
2023-01-27 13:28:45 -08:00
jumpOrFallthrough(blockOp(inst.e), next);
break;
2023-03-17 16:59:30 +02:00
case IrCmd::JUMP_SLOT_MATCH:
{
ScopedRegX64 tmp{regs, SizeX64::qword};
2023-03-24 10:34:14 -07:00
jumpIfNodeKeyNotInExpectedSlot(build, tmp.reg, regOp(inst.a), luauConstantValue(vmConstOp(inst.b)), labelOp(inst.d));
2023-03-17 16:59:30 +02:00
jumpOrFallthrough(blockOp(inst.c), next);
break;
}
2023-01-27 13:28:45 -08:00
case IrCmd::TABLE_LEN:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmReg();
2023-01-27 13:28:45 -08:00
build.mov(rArg1, regOp(inst.a));
build.call(qword[rNativeContext + offsetof(NativeContext, luaH_getn)]);
build.vcvtsi2sd(inst.regX64, inst.regX64, eax);
break;
case IrCmd::NEW_TABLE:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprReg(SizeX64::qword);
2023-01-27 13:28:45 -08:00
build.mov(rArg1, rState);
build.mov(dwordReg(rArg2), uintOp(inst.a));
build.mov(dwordReg(rArg3), uintOp(inst.b));
build.call(qword[rNativeContext + offsetof(NativeContext, luaH_new)]);
2023-02-10 10:50:54 -08:00
if (inst.regX64 != rax)
build.mov(inst.regX64, rax);
2023-01-27 13:28:45 -08:00
break;
case IrCmd::DUP_TABLE:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprReg(SizeX64::qword);
2023-01-27 13:28:45 -08:00
// Re-ordered to avoid register conflict
build.mov(rArg2, regOp(inst.a));
build.mov(rArg1, rState);
build.call(qword[rNativeContext + offsetof(NativeContext, luaH_clone)]);
2023-02-10 10:50:54 -08:00
if (inst.regX64 != rax)
build.mov(inst.regX64, rax);
2023-01-27 13:28:45 -08:00
break;
2023-03-17 16:59:30 +02:00
case IrCmd::TRY_NUM_TO_INDEX:
2023-01-27 13:28:45 -08:00
{
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocGprReg(SizeX64::dword);
2023-01-27 13:28:45 -08:00
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp{regs, SizeX64::xmmword};
2023-01-27 13:28:45 -08:00
convertNumberToIndexOrJump(build, tmp.reg, regOp(inst.a), inst.regX64, labelOp(inst.b));
break;
}
2023-03-17 16:59:30 +02:00
case IrCmd::TRY_CALL_FASTGETTM:
{
inst.regX64 = regs.allocGprReg(SizeX64::qword);
callGetFastTmOrFallback(build, regOp(inst.a), TMS(intOp(inst.b)), labelOp(inst.c));
if (inst.regX64 != rax)
build.mov(inst.regX64, rax);
break;
}
2023-02-10 10:50:54 -08:00
case IrCmd::INT_TO_NUM:
2023-02-24 10:24:22 -08:00
inst.regX64 = regs.allocXmmReg();
2023-02-10 10:50:54 -08:00
build.vcvtsi2sd(inst.regX64, inst.regX64, regOp(inst.a));
break;
2023-02-24 10:24:22 -08:00
case IrCmd::ADJUST_STACK_TO_REG:
{
if (inst.b.kind == IrOpKind::Constant)
{
ScopedRegX64 tmp{regs, SizeX64::qword};
2023-03-24 10:34:14 -07:00
build.lea(tmp.reg, addr[rBase + (vmRegOp(inst.a) + intOp(inst.b)) * sizeof(TValue)]);
2023-02-24 10:24:22 -08:00
build.mov(qword[rState + offsetof(lua_State, top)], tmp.reg);
}
else if (inst.b.kind == IrOpKind::Inst)
{
ScopedRegX64 tmp(regs, regs.allocGprRegOrReuse(SizeX64::dword, index, {inst.b}));
build.shl(qwordReg(tmp.reg), kTValueSizeLog2);
2023-03-24 10:34:14 -07:00
build.lea(qwordReg(tmp.reg), addr[rBase + qwordReg(tmp.reg) + vmRegOp(inst.a) * sizeof(TValue)]);
2023-02-24 10:24:22 -08:00
build.mov(qword[rState + offsetof(lua_State, top)], qwordReg(tmp.reg));
}
else
{
LUAU_ASSERT(!"Unsupported instruction form");
}
break;
}
case IrCmd::ADJUST_STACK_TO_TOP:
{
ScopedRegX64 tmp{regs, SizeX64::qword};
build.mov(tmp.reg, qword[rState + offsetof(lua_State, ci)]);
build.mov(tmp.reg, qword[tmp.reg + offsetof(CallInfo, top)]);
build.mov(qword[rState + offsetof(lua_State, top)], tmp.reg);
break;
}
2023-03-03 15:45:38 +02:00
case IrCmd::FASTCALL:
2023-03-24 10:34:14 -07:00
emitBuiltin(regs, build, uintOp(inst.a), vmRegOp(inst.b), vmRegOp(inst.c), inst.d, intOp(inst.e), intOp(inst.f));
2023-03-03 15:45:38 +02:00
break;
case IrCmd::INVOKE_FASTCALL:
{
unsigned bfid = uintOp(inst.a);
OperandX64 args = 0;
if (inst.d.kind == IrOpKind::VmReg)
2023-03-24 10:34:14 -07:00
args = luauRegAddress(vmRegOp(inst.d));
2023-03-03 15:45:38 +02:00
else if (inst.d.kind == IrOpKind::VmConst)
2023-03-24 10:34:14 -07:00
args = luauConstantAddress(vmConstOp(inst.d));
2023-03-03 15:45:38 +02:00
else
LUAU_ASSERT(boolOp(inst.d) == false);
2023-03-24 10:34:14 -07:00
int ra = vmRegOp(inst.b);
int arg = vmRegOp(inst.c);
2023-03-03 15:45:38 +02:00
int nparams = intOp(inst.e);
int nresults = intOp(inst.f);
regs.assertAllFree();
build.mov(rax, qword[rNativeContext + offsetof(NativeContext, luauF_table) + bfid * sizeof(luau_FastFunction)]);
// 5th parameter (args) is left unset for LOP_FASTCALL1
if (args.cat == CategoryX64::mem)
{
if (build.abi == ABIX64::Windows)
{
build.lea(rcx, args);
build.mov(sArg5, rcx);
}
else
{
build.lea(rArg5, args);
}
}
if (nparams == LUA_MULTRET)
{
// L->top - (ra + 1)
RegisterX64 reg = (build.abi == ABIX64::Windows) ? rcx : rArg6;
build.mov(reg, qword[rState + offsetof(lua_State, top)]);
build.lea(rdx, addr[rBase + (ra + 1) * sizeof(TValue)]);
build.sub(reg, rdx);
build.shr(reg, kTValueSizeLog2);
if (build.abi == ABIX64::Windows)
build.mov(sArg6, reg);
}
else
{
if (build.abi == ABIX64::Windows)
build.mov(sArg6, nparams);
else
build.mov(rArg6, nparams);
}
build.mov(rArg1, rState);
build.lea(rArg2, luauRegAddress(ra));
build.lea(rArg3, luauRegAddress(arg));
build.mov(dwordReg(rArg4), nresults);
build.call(rax);
inst.regX64 = regs.takeGprReg(eax); // Result of a builtin call is returned in eax
break;
}
case IrCmd::CHECK_FASTCALL_RES:
{
RegisterX64 res = regOp(inst.a);
build.test(res, res); // test here will set SF=1 for a negative number and it always sets OF to 0
build.jcc(ConditionX64::Less, labelOp(inst.b)); // jl jumps if SF != OF
break;
}
2023-01-27 13:28:45 -08:00
case IrCmd::DO_ARITH:
if (inst.c.kind == IrOpKind::VmReg)
2023-03-24 10:34:14 -07:00
callArithHelper(build, vmRegOp(inst.a), vmRegOp(inst.b), luauRegAddress(vmRegOp(inst.c)), TMS(intOp(inst.d)));
2023-01-27 13:28:45 -08:00
else
2023-03-24 10:34:14 -07:00
callArithHelper(build, vmRegOp(inst.a), vmRegOp(inst.b), luauConstantAddress(vmConstOp(inst.c)), TMS(intOp(inst.d)));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::DO_LEN:
2023-03-24 10:34:14 -07:00
callLengthHelper(build, vmRegOp(inst.a), vmRegOp(inst.b));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::GET_TABLE:
if (inst.c.kind == IrOpKind::VmReg)
{
2023-03-24 10:34:14 -07:00
callGetTable(build, vmRegOp(inst.b), luauRegAddress(vmRegOp(inst.c)), vmRegOp(inst.a));
2023-01-27 13:28:45 -08:00
}
else if (inst.c.kind == IrOpKind::Constant)
{
TValue n;
setnvalue(&n, uintOp(inst.c));
2023-03-24 10:34:14 -07:00
callGetTable(build, vmRegOp(inst.b), build.bytes(&n, sizeof(n)), vmRegOp(inst.a));
2023-01-27 13:28:45 -08:00
}
else
{
LUAU_ASSERT(!"Unsupported instruction form");
}
break;
case IrCmd::SET_TABLE:
if (inst.c.kind == IrOpKind::VmReg)
{
2023-03-24 10:34:14 -07:00
callSetTable(build, vmRegOp(inst.b), luauRegAddress(vmRegOp(inst.c)), vmRegOp(inst.a));
2023-01-27 13:28:45 -08:00
}
else if (inst.c.kind == IrOpKind::Constant)
{
TValue n;
setnvalue(&n, uintOp(inst.c));
2023-03-24 10:34:14 -07:00
callSetTable(build, vmRegOp(inst.b), build.bytes(&n, sizeof(n)), vmRegOp(inst.a));
2023-01-27 13:28:45 -08:00
}
else
{
LUAU_ASSERT(!"Unsupported instruction form");
}
break;
case IrCmd::GET_IMPORT:
2023-03-24 10:34:14 -07:00
emitInstGetImportFallback(build, vmRegOp(inst.a), uintOp(inst.b));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::CONCAT:
build.mov(rArg1, rState);
2023-02-17 16:53:37 +02:00
build.mov(dwordReg(rArg2), uintOp(inst.b));
2023-03-24 10:34:14 -07:00
build.mov(dwordReg(rArg3), vmRegOp(inst.a) + uintOp(inst.b) - 1);
2023-01-27 13:28:45 -08:00
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_concat)]);
emitUpdateBase(build);
break;
case IrCmd::GET_UPVALUE:
{
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp1{regs, SizeX64::qword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
2023-01-27 13:28:45 -08:00
build.mov(tmp1.reg, sClosure);
2023-03-24 10:34:14 -07:00
build.add(tmp1.reg, offsetof(Closure, l.uprefs) + sizeof(TValue) * vmUpvalueOp(inst.b));
2023-01-27 13:28:45 -08:00
// uprefs[] is either an actual value, or it points to UpVal object which has a pointer to value
Label skip;
// TODO: jumpIfTagIsNot can be generalized to take OperandX64 and then we can use it here; let's wait until we see this more though
build.cmp(dword[tmp1.reg + offsetof(TValue, tt)], LUA_TUPVAL);
build.jcc(ConditionX64::NotEqual, skip);
// UpVal.v points to the value (either on stack, or on heap inside each UpVal, but we can deref it unconditionally)
build.mov(tmp1.reg, qword[tmp1.reg + offsetof(TValue, value.gc)]);
build.mov(tmp1.reg, qword[tmp1.reg + offsetof(UpVal, v)]);
build.setLabel(skip);
build.vmovups(tmp2.reg, xmmword[tmp1.reg]);
2023-03-24 10:34:14 -07:00
build.vmovups(luauReg(vmRegOp(inst.a)), tmp2.reg);
2023-01-27 13:28:45 -08:00
break;
}
case IrCmd::SET_UPVALUE:
{
Label next;
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp1{regs, SizeX64::qword};
ScopedRegX64 tmp2{regs, SizeX64::qword};
ScopedRegX64 tmp3{regs, SizeX64::xmmword};
2023-01-27 13:28:45 -08:00
build.mov(tmp1.reg, sClosure);
2023-03-24 10:34:14 -07:00
build.mov(tmp2.reg, qword[tmp1.reg + offsetof(Closure, l.uprefs) + sizeof(TValue) * vmUpvalueOp(inst.a) + offsetof(TValue, value.gc)]);
2023-01-27 13:28:45 -08:00
build.mov(tmp1.reg, qword[tmp2.reg + offsetof(UpVal, v)]);
2023-03-24 10:34:14 -07:00
build.vmovups(tmp3.reg, luauReg(vmRegOp(inst.b)));
2023-01-27 13:28:45 -08:00
build.vmovups(xmmword[tmp1.reg], tmp3.reg);
2023-03-24 10:34:14 -07:00
callBarrierObject(build, tmp1.reg, tmp2.reg, vmRegOp(inst.b), next);
2023-01-27 13:28:45 -08:00
build.setLabel(next);
break;
}
2023-02-10 10:50:54 -08:00
case IrCmd::PREPARE_FORN:
2023-03-24 10:34:14 -07:00
callPrepareForN(build, vmRegOp(inst.a), vmRegOp(inst.b), vmRegOp(inst.c));
2023-02-10 10:50:54 -08:00
break;
2023-01-27 13:28:45 -08:00
case IrCmd::CHECK_TAG:
if (inst.a.kind == IrOpKind::Inst)
{
build.cmp(regOp(inst.a), tagOp(inst.b));
build.jcc(ConditionX64::NotEqual, labelOp(inst.c));
}
else if (inst.a.kind == IrOpKind::VmReg)
{
2023-03-24 10:34:14 -07:00
jumpIfTagIsNot(build, vmRegOp(inst.a), lua_Type(tagOp(inst.b)), labelOp(inst.c));
2023-01-27 13:28:45 -08:00
}
2023-02-24 10:24:22 -08:00
else if (inst.a.kind == IrOpKind::VmConst)
{
2023-03-24 10:34:14 -07:00
build.cmp(luauConstantTag(vmConstOp(inst.a)), tagOp(inst.b));
2023-02-24 10:24:22 -08:00
build.jcc(ConditionX64::NotEqual, labelOp(inst.c));
}
2023-01-27 13:28:45 -08:00
else
{
LUAU_ASSERT(!"Unsupported instruction form");
}
break;
case IrCmd::CHECK_READONLY:
jumpIfTableIsReadOnly(build, regOp(inst.a), labelOp(inst.b));
break;
case IrCmd::CHECK_NO_METATABLE:
jumpIfMetatablePresent(build, regOp(inst.a), labelOp(inst.b));
break;
case IrCmd::CHECK_SAFE_ENV:
{
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp{regs, SizeX64::qword};
2023-01-27 13:28:45 -08:00
jumpIfUnsafeEnv(build, tmp.reg, labelOp(inst.a));
break;
}
case IrCmd::CHECK_ARRAY_SIZE:
if (inst.b.kind == IrOpKind::Inst)
build.cmp(dword[regOp(inst.a) + offsetof(Table, sizearray)], regOp(inst.b));
else if (inst.b.kind == IrOpKind::Constant)
2023-02-17 16:53:37 +02:00
build.cmp(dword[regOp(inst.a) + offsetof(Table, sizearray)], intOp(inst.b));
2023-01-27 13:28:45 -08:00
else
LUAU_ASSERT(!"Unsupported instruction form");
build.jcc(ConditionX64::BelowEqual, labelOp(inst.c));
break;
case IrCmd::CHECK_SLOT_MATCH:
{
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp{regs, SizeX64::qword};
2023-01-27 13:28:45 -08:00
2023-03-24 10:34:14 -07:00
jumpIfNodeKeyNotInExpectedSlot(build, tmp.reg, regOp(inst.a), luauConstantValue(vmConstOp(inst.b)), labelOp(inst.c));
2023-01-27 13:28:45 -08:00
break;
}
2023-03-17 16:59:30 +02:00
case IrCmd::CHECK_NODE_NO_NEXT:
jumpIfNodeHasNext(build, regOp(inst.a), labelOp(inst.b));
break;
2023-01-27 13:28:45 -08:00
case IrCmd::INTERRUPT:
emitInterrupt(build, uintOp(inst.a));
break;
case IrCmd::CHECK_GC:
{
Label skip;
callCheckGc(build, -1, false, skip);
build.setLabel(skip);
break;
}
case IrCmd::BARRIER_OBJ:
{
Label skip;
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp{regs, SizeX64::qword};
2023-01-27 13:28:45 -08:00
2023-03-24 10:34:14 -07:00
callBarrierObject(build, tmp.reg, regOp(inst.a), vmRegOp(inst.b), skip);
2023-01-27 13:28:45 -08:00
build.setLabel(skip);
break;
}
case IrCmd::BARRIER_TABLE_BACK:
{
Label skip;
callBarrierTableFast(build, regOp(inst.a), skip);
build.setLabel(skip);
break;
}
case IrCmd::BARRIER_TABLE_FORWARD:
{
Label skip;
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp{regs, SizeX64::qword};
2023-01-27 13:28:45 -08:00
2023-03-24 10:34:14 -07:00
callBarrierTable(build, tmp.reg, regOp(inst.a), vmRegOp(inst.b), skip);
2023-01-27 13:28:45 -08:00
build.setLabel(skip);
break;
}
case IrCmd::SET_SAVEDPC:
{
// This is like emitSetSavedPc, but using register allocation instead of relying on rax/rdx
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp1{regs, SizeX64::qword};
ScopedRegX64 tmp2{regs, SizeX64::qword};
2023-01-27 13:28:45 -08:00
build.mov(tmp2.reg, sCode);
build.add(tmp2.reg, uintOp(inst.a) * sizeof(Instruction));
build.mov(tmp1.reg, qword[rState + offsetof(lua_State, ci)]);
build.mov(qword[tmp1.reg + offsetof(CallInfo, savedpc)], tmp2.reg);
break;
}
case IrCmd::CLOSE_UPVALS:
{
Label next;
2023-02-24 10:24:22 -08:00
ScopedRegX64 tmp1{regs, SizeX64::qword};
ScopedRegX64 tmp2{regs, SizeX64::qword};
2023-01-27 13:28:45 -08:00
// L->openupval != 0
build.mov(tmp1.reg, qword[rState + offsetof(lua_State, openupval)]);
build.test(tmp1.reg, tmp1.reg);
build.jcc(ConditionX64::Zero, next);
// ra <= L->openuval->v
2023-03-24 10:34:14 -07:00
build.lea(tmp2.reg, addr[rBase + vmRegOp(inst.a) * sizeof(TValue)]);
2023-01-27 13:28:45 -08:00
build.cmp(tmp2.reg, qword[tmp1.reg + offsetof(UpVal, v)]);
build.jcc(ConditionX64::Above, next);
2023-02-10 10:50:54 -08:00
if (rArg2 != tmp2.reg)
build.mov(rArg2, tmp2.reg);
2023-01-27 13:28:45 -08:00
build.mov(rArg1, rState);
build.call(qword[rNativeContext + offsetof(NativeContext, luaF_close)]);
build.setLabel(next);
break;
}
case IrCmd::CAPTURE:
// No-op right now
break;
// Fallbacks to non-IR instruction implementations
case IrCmd::LOP_SETLIST:
{
Label next;
2023-03-24 10:34:14 -07:00
emitInstSetList(build, next, vmRegOp(inst.b), vmRegOp(inst.c), intOp(inst.d), uintOp(inst.e));
2023-01-27 13:28:45 -08:00
build.setLabel(next);
break;
}
case IrCmd::LOP_CALL:
2023-03-24 10:34:14 -07:00
emitInstCall(build, helpers, vmRegOp(inst.a), intOp(inst.b), intOp(inst.c));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::LOP_RETURN:
2023-03-24 10:34:14 -07:00
emitInstReturn(build, helpers, vmRegOp(inst.a), intOp(inst.b));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::LOP_FORGLOOP:
2023-03-24 10:34:14 -07:00
emitinstForGLoop(build, vmRegOp(inst.a), intOp(inst.b), labelOp(inst.c), labelOp(inst.d));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::LOP_FORGLOOP_FALLBACK:
2023-03-24 10:34:14 -07:00
emitinstForGLoopFallback(build, uintOp(inst.a), vmRegOp(inst.b), intOp(inst.c), labelOp(inst.d));
2023-03-03 15:45:38 +02:00
build.jmp(labelOp(inst.e));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::LOP_FORGPREP_XNEXT_FALLBACK:
2023-03-24 10:34:14 -07:00
emitInstForGPrepXnextFallback(build, uintOp(inst.a), vmRegOp(inst.b), labelOp(inst.c));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::LOP_AND:
2023-03-24 10:34:14 -07:00
emitInstAnd(build, vmRegOp(inst.a), vmRegOp(inst.b), vmRegOp(inst.c));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::LOP_ANDK:
2023-03-24 10:34:14 -07:00
emitInstAndK(build, vmRegOp(inst.a), vmRegOp(inst.b), vmConstOp(inst.c));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::LOP_OR:
2023-03-24 10:34:14 -07:00
emitInstOr(build, vmRegOp(inst.a), vmRegOp(inst.b), vmRegOp(inst.c));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::LOP_ORK:
2023-03-24 10:34:14 -07:00
emitInstOrK(build, vmRegOp(inst.a), vmRegOp(inst.b), vmConstOp(inst.c));
2023-01-27 13:28:45 -08:00
break;
case IrCmd::LOP_COVERAGE:
emitInstCoverage(build, uintOp(inst.a));
break;
// Full instruction fallbacks
case IrCmd::FALLBACK_GETGLOBAL:
2023-02-10 10:50:54 -08:00
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
2023-01-27 13:28:45 -08:00
emitFallback(build, data, LOP_GETGLOBAL, uintOp(inst.a));
break;
case IrCmd::FALLBACK_SETGLOBAL:
2023-02-10 10:50:54 -08:00
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
2023-01-27 13:28:45 -08:00
emitFallback(build, data, LOP_SETGLOBAL, uintOp(inst.a));
break;
case IrCmd::FALLBACK_GETTABLEKS:
2023-02-10 10:50:54 -08:00
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
2023-01-27 13:28:45 -08:00
emitFallback(build, data, LOP_GETTABLEKS, uintOp(inst.a));
break;
case IrCmd::FALLBACK_SETTABLEKS:
2023-02-10 10:50:54 -08:00
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
2023-01-27 13:28:45 -08:00
emitFallback(build, data, LOP_SETTABLEKS, uintOp(inst.a));
break;
case IrCmd::FALLBACK_NAMECALL:
2023-02-10 10:50:54 -08:00
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
2023-01-27 13:28:45 -08:00
emitFallback(build, data, LOP_NAMECALL, uintOp(inst.a));
break;
case IrCmd::FALLBACK_PREPVARARGS:
2023-02-10 10:50:54 -08:00
LUAU_ASSERT(inst.b.kind == IrOpKind::Constant);
2023-01-27 13:28:45 -08:00
emitFallback(build, data, LOP_PREPVARARGS, uintOp(inst.a));
break;
case IrCmd::FALLBACK_GETVARARGS:
2023-02-10 10:50:54 -08:00
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::Constant);
2023-01-27 13:28:45 -08:00
emitFallback(build, data, LOP_GETVARARGS, uintOp(inst.a));
break;
case IrCmd::FALLBACK_NEWCLOSURE:
2023-02-10 10:50:54 -08:00
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::Constant);
2023-01-27 13:28:45 -08:00
emitFallback(build, data, LOP_NEWCLOSURE, uintOp(inst.a));
break;
case IrCmd::FALLBACK_DUPCLOSURE:
2023-02-10 10:50:54 -08:00
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
2023-01-27 13:28:45 -08:00
emitFallback(build, data, LOP_DUPCLOSURE, uintOp(inst.a));
break;
case IrCmd::FALLBACK_FORGPREP:
emitFallback(build, data, LOP_FORGPREP, uintOp(inst.a));
break;
default:
LUAU_ASSERT(!"Not supported yet");
break;
}
2023-03-24 10:34:14 -07:00
regs.freeLastUseRegs(inst, index);
2023-01-27 13:28:45 -08:00
}
bool IrLoweringX64::isFallthroughBlock(IrBlock target, IrBlock next)
{
return target.start == next.start;
}
void IrLoweringX64::jumpOrFallthrough(IrBlock& target, IrBlock& next)
{
if (!isFallthroughBlock(target, next))
build.jmp(target.label);
}
OperandX64 IrLoweringX64::memRegDoubleOp(IrOp op) const
{
switch (op.kind)
{
case IrOpKind::Inst:
return regOp(op);
case IrOpKind::Constant:
return build.f64(doubleOp(op));
case IrOpKind::VmReg:
2023-03-24 10:34:14 -07:00
return luauRegValue(vmRegOp(op));
2023-01-27 13:28:45 -08:00
case IrOpKind::VmConst:
2023-03-24 10:34:14 -07:00
return luauConstantValue(vmConstOp(op));
2023-01-27 13:28:45 -08:00
default:
LUAU_ASSERT(!"Unsupported operand kind");
}
return noreg;
}
OperandX64 IrLoweringX64::memRegTagOp(IrOp op) const
{
switch (op.kind)
{
case IrOpKind::Inst:
return regOp(op);
case IrOpKind::VmReg:
2023-03-24 10:34:14 -07:00
return luauRegTag(vmRegOp(op));
2023-01-27 13:28:45 -08:00
case IrOpKind::VmConst:
2023-03-24 10:34:14 -07:00
return luauConstantTag(vmConstOp(op));
2023-01-27 13:28:45 -08:00
default:
LUAU_ASSERT(!"Unsupported operand kind");
}
return noreg;
}
RegisterX64 IrLoweringX64::regOp(IrOp op) const
{
2023-03-24 10:34:14 -07:00
IrInst& inst = function.instOp(op);
LUAU_ASSERT(inst.regX64 != noreg);
return inst.regX64;
2023-01-27 13:28:45 -08:00
}
IrConst IrLoweringX64::constOp(IrOp op) const
{
2023-02-10 10:50:54 -08:00
return function.constOp(op);
2023-01-27 13:28:45 -08:00
}
uint8_t IrLoweringX64::tagOp(IrOp op) const
{
2023-02-10 10:50:54 -08:00
return function.tagOp(op);
2023-01-27 13:28:45 -08:00
}
bool IrLoweringX64::boolOp(IrOp op) const
{
2023-02-10 10:50:54 -08:00
return function.boolOp(op);
2023-01-27 13:28:45 -08:00
}
int IrLoweringX64::intOp(IrOp op) const
{
2023-02-10 10:50:54 -08:00
return function.intOp(op);
2023-01-27 13:28:45 -08:00
}
unsigned IrLoweringX64::uintOp(IrOp op) const
{
2023-02-10 10:50:54 -08:00
return function.uintOp(op);
2023-01-27 13:28:45 -08:00
}
double IrLoweringX64::doubleOp(IrOp op) const
{
2023-02-10 10:50:54 -08:00
return function.doubleOp(op);
2023-01-27 13:28:45 -08:00
}
IrBlock& IrLoweringX64::blockOp(IrOp op) const
{
2023-02-10 10:50:54 -08:00
return function.blockOp(op);
2023-01-27 13:28:45 -08:00
}
Label& IrLoweringX64::labelOp(IrOp op) const
{
return blockOp(op).label;
}
2023-03-03 15:45:38 +02:00
} // namespace X64
2023-01-27 13:28:45 -08:00
} // namespace CodeGen
} // namespace Luau