2023-01-20 20:27:03 +00:00
|
|
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
Sync to upstream/release/562 (#828)
* Fixed rare use-after-free in analysis during table unification
A lot of work these past months went into two new Luau components:
* A near full rewrite of the typechecker using a new deferred constraint
resolution system
* Native code generation for AoT/JiT compilation of VM bytecode into x64
(avx)/arm64 instructions
Both of these components are far from finished and we don't provide
documentation on building and using them at this point.
However, curious community members expressed interest in learning about
changes that go into these components each week, so we are now listing
them here in the 'sync' pull request descriptions.
---
New typechecker can be enabled by setting
DebugLuauDeferredConstraintResolution flag to 'true'.
It is considered unstable right now, so try it at your own risk.
Even though it already provides better type inference than the current
one in some cases, our main goal right now is to reach feature parity
with current typechecker.
Features which improve over the capabilities of the current typechecker
are marked as '(NEW)'.
Changes to new typechecker:
* Regular for loop index and parameters are now typechecked
* Invalid type annotations on local variables are ignored to improve
autocomplete
* Fixed missing autocomplete type suggestions for function arguments
* Type reduction is now performed to produce simpler types to be
presented to the user (error messages, custom LSPs)
* Internally, complex types like '((number | string) & ~(false?)) |
string' can be produced, which is just 'string | number' when simplified
* Fixed spots where support for unknown and never types was missing
* (NEW) Length operator '#' is now valid to use on top table type, this
type comes up when doing typeof(x) == "table" guards and isn't available
in current typechecker
---
Changes to native code generation:
* Additional math library fast calls are now lowered to x64: math.ldexp,
math.round, math.frexp, math.modf, math.sign and math.clamp
2023-02-03 19:26:13 +00:00
|
|
|
#include "Luau/IrBuilder.h"
|
2023-01-20 20:27:03 +00:00
|
|
|
|
2023-08-18 19:15:41 +01:00
|
|
|
#include "Luau/Bytecode.h"
|
2023-12-02 07:46:57 +00:00
|
|
|
#include "Luau/BytecodeAnalysis.h"
|
2023-08-18 19:15:41 +01:00
|
|
|
#include "Luau/BytecodeUtils.h"
|
2023-08-11 15:42:37 +01:00
|
|
|
#include "Luau/IrData.h"
|
Sync to upstream/release/562 (#828)
* Fixed rare use-after-free in analysis during table unification
A lot of work these past months went into two new Luau components:
* A near full rewrite of the typechecker using a new deferred constraint
resolution system
* Native code generation for AoT/JiT compilation of VM bytecode into x64
(avx)/arm64 instructions
Both of these components are far from finished and we don't provide
documentation on building and using them at this point.
However, curious community members expressed interest in learning about
changes that go into these components each week, so we are now listing
them here in the 'sync' pull request descriptions.
---
New typechecker can be enabled by setting
DebugLuauDeferredConstraintResolution flag to 'true'.
It is considered unstable right now, so try it at your own risk.
Even though it already provides better type inference than the current
one in some cases, our main goal right now is to reach feature parity
with current typechecker.
Features which improve over the capabilities of the current typechecker
are marked as '(NEW)'.
Changes to new typechecker:
* Regular for loop index and parameters are now typechecked
* Invalid type annotations on local variables are ignored to improve
autocomplete
* Fixed missing autocomplete type suggestions for function arguments
* Type reduction is now performed to produce simpler types to be
presented to the user (error messages, custom LSPs)
* Internally, complex types like '((number | string) & ~(false?)) |
string' can be produced, which is just 'string | number' when simplified
* Fixed spots where support for unknown and never types was missing
* (NEW) Length operator '#' is now valid to use on top table type, this
type comes up when doing typeof(x) == "table" guards and isn't available
in current typechecker
---
Changes to native code generation:
* Additional math library fast calls are now lowered to x64: math.ldexp,
math.round, math.frexp, math.modf, math.sign and math.clamp
2023-02-03 19:26:13 +00:00
|
|
|
#include "Luau/IrUtils.h"
|
2023-01-20 20:27:03 +00:00
|
|
|
|
|
|
|
#include "IrTranslation.h"
|
|
|
|
|
|
|
|
#include "lapi.h"
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
#include <string.h>
|
|
|
|
|
2024-05-31 20:18:18 +01:00
|
|
|
LUAU_FASTFLAG(LuauLoadUserdataInfo)
|
2024-06-14 21:21:20 +01:00
|
|
|
LUAU_FASTFLAG(LuauCodegenInstG)
|
2024-06-21 00:37:55 +01:00
|
|
|
LUAU_FASTFLAG(LuauCodegenFastcall3)
|
2024-04-25 23:26:09 +01:00
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
namespace Luau
|
|
|
|
{
|
|
|
|
namespace CodeGen
|
|
|
|
{
|
|
|
|
|
|
|
|
constexpr unsigned kNoAssociatedBlockIndex = ~0u;
|
|
|
|
|
2024-05-10 19:21:45 +01:00
|
|
|
IrBuilder::IrBuilder(const HostIrHooks& hostHooks)
|
|
|
|
: hostHooks(hostHooks)
|
|
|
|
, constantMap({IrConstKind::Tag, ~0ull})
|
2023-03-17 19:20:37 +00:00
|
|
|
{
|
|
|
|
}
|
2023-07-07 21:10:48 +01:00
|
|
|
|
2024-04-25 23:26:09 +01:00
|
|
|
static bool hasTypedParameters(const BytecodeTypeInfo& typeInfo)
|
|
|
|
{
|
2024-05-31 20:18:18 +01:00
|
|
|
for (auto el : typeInfo.argumentTypes)
|
2024-04-25 23:26:09 +01:00
|
|
|
{
|
2024-05-31 20:18:18 +01:00
|
|
|
if (el != LBC_TYPE_ANY)
|
|
|
|
return true;
|
2024-04-25 23:26:09 +01:00
|
|
|
}
|
2024-05-31 20:18:18 +01:00
|
|
|
|
|
|
|
return false;
|
2024-04-25 23:26:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void buildArgumentTypeChecks(IrBuilder& build)
|
|
|
|
{
|
|
|
|
const BytecodeTypeInfo& typeInfo = build.function.bcTypeInfo;
|
|
|
|
CODEGEN_ASSERT(hasTypedParameters(typeInfo));
|
|
|
|
|
|
|
|
for (size_t i = 0; i < typeInfo.argumentTypes.size(); i++)
|
|
|
|
{
|
|
|
|
uint8_t et = typeInfo.argumentTypes[i];
|
|
|
|
|
|
|
|
uint8_t tag = et & ~LBC_TYPE_OPTIONAL_BIT;
|
|
|
|
uint8_t optional = et & LBC_TYPE_OPTIONAL_BIT;
|
|
|
|
|
|
|
|
if (tag == LBC_TYPE_ANY)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
IrOp load = build.inst(IrCmd::LOAD_TAG, build.vmReg(uint8_t(i)));
|
|
|
|
|
|
|
|
IrOp nextCheck;
|
|
|
|
if (optional)
|
|
|
|
{
|
|
|
|
nextCheck = build.block(IrBlockKind::Internal);
|
|
|
|
IrOp fallbackCheck = build.block(IrBlockKind::Internal);
|
|
|
|
|
|
|
|
build.inst(IrCmd::JUMP_EQ_TAG, load, build.constTag(LUA_TNIL), nextCheck, fallbackCheck);
|
|
|
|
|
|
|
|
build.beginBlock(fallbackCheck);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (tag)
|
|
|
|
{
|
|
|
|
case LBC_TYPE_NIL:
|
|
|
|
build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TNIL), build.vmExit(kVmExitEntryGuardPc));
|
|
|
|
break;
|
|
|
|
case LBC_TYPE_BOOLEAN:
|
|
|
|
build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TBOOLEAN), build.vmExit(kVmExitEntryGuardPc));
|
|
|
|
break;
|
|
|
|
case LBC_TYPE_NUMBER:
|
|
|
|
build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TNUMBER), build.vmExit(kVmExitEntryGuardPc));
|
|
|
|
break;
|
|
|
|
case LBC_TYPE_STRING:
|
|
|
|
build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TSTRING), build.vmExit(kVmExitEntryGuardPc));
|
|
|
|
break;
|
|
|
|
case LBC_TYPE_TABLE:
|
|
|
|
build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TTABLE), build.vmExit(kVmExitEntryGuardPc));
|
|
|
|
break;
|
|
|
|
case LBC_TYPE_FUNCTION:
|
|
|
|
build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TFUNCTION), build.vmExit(kVmExitEntryGuardPc));
|
|
|
|
break;
|
|
|
|
case LBC_TYPE_THREAD:
|
|
|
|
build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TTHREAD), build.vmExit(kVmExitEntryGuardPc));
|
|
|
|
break;
|
|
|
|
case LBC_TYPE_USERDATA:
|
|
|
|
build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TUSERDATA), build.vmExit(kVmExitEntryGuardPc));
|
|
|
|
break;
|
|
|
|
case LBC_TYPE_VECTOR:
|
|
|
|
build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TVECTOR), build.vmExit(kVmExitEntryGuardPc));
|
|
|
|
break;
|
|
|
|
case LBC_TYPE_BUFFER:
|
|
|
|
build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TBUFFER), build.vmExit(kVmExitEntryGuardPc));
|
|
|
|
break;
|
2024-05-31 20:18:18 +01:00
|
|
|
default:
|
|
|
|
if (FFlag::LuauLoadUserdataInfo)
|
|
|
|
{
|
|
|
|
if (tag >= LBC_TYPE_TAGGED_USERDATA_BASE && tag < LBC_TYPE_TAGGED_USERDATA_END)
|
|
|
|
{
|
|
|
|
build.inst(IrCmd::CHECK_TAG, load, build.constTag(LUA_TUSERDATA), build.vmExit(kVmExitEntryGuardPc));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
CODEGEN_ASSERT(!"unknown argument type tag");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2024-04-25 23:26:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (optional)
|
|
|
|
{
|
|
|
|
build.inst(IrCmd::JUMP, nextCheck);
|
|
|
|
build.beginBlock(nextCheck);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the last argument is optional, we can skip creating a new internal block since one will already have been created.
|
|
|
|
if (!(typeInfo.argumentTypes.back() & LBC_TYPE_OPTIONAL_BIT))
|
|
|
|
{
|
|
|
|
IrOp next = build.block(IrBlockKind::Internal);
|
|
|
|
build.inst(IrCmd::JUMP, next);
|
|
|
|
|
|
|
|
build.beginBlock(next);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
void IrBuilder::buildFunctionIr(Proto* proto)
|
|
|
|
{
|
|
|
|
function.proto = proto;
|
2023-06-09 18:08:00 +01:00
|
|
|
function.variadic = proto->is_vararg != 0;
|
2023-01-20 20:27:03 +00:00
|
|
|
|
2024-06-21 00:37:55 +01:00
|
|
|
loadBytecodeTypeInfo(function);
|
2024-04-25 23:26:09 +01:00
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
// Reserve entry block
|
2024-06-21 00:37:55 +01:00
|
|
|
bool generateTypeChecks = hasTypedParameters(function.bcTypeInfo);
|
2023-08-11 15:42:37 +01:00
|
|
|
IrOp entry = generateTypeChecks ? block(IrBlockKind::Internal) : IrOp{};
|
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
// Rebuild original control flow blocks
|
|
|
|
rebuildBytecodeBasicBlocks(proto);
|
|
|
|
|
2023-12-02 07:46:57 +00:00
|
|
|
// Infer register tags in bytecode
|
2024-05-10 19:21:45 +01:00
|
|
|
analyzeBytecodeTypes(function, hostHooks);
|
2023-12-02 07:46:57 +00:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
function.bcMapping.resize(proto->sizecode, {~0u, ~0u});
|
2023-01-20 20:27:03 +00:00
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
if (generateTypeChecks)
|
|
|
|
{
|
|
|
|
beginBlock(entry);
|
2024-04-25 23:26:09 +01:00
|
|
|
|
2024-06-21 00:37:55 +01:00
|
|
|
buildArgumentTypeChecks(*this);
|
2024-04-25 23:26:09 +01:00
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
inst(IrCmd::JUMP, blockAtInst(0));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
entry = blockAtInst(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
function.entryBlock = entry.index;
|
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
// Translate all instructions to IR inside blocks
|
|
|
|
for (int i = 0; i < proto->sizecode;)
|
|
|
|
{
|
|
|
|
const Instruction* pc = &proto->code[i];
|
|
|
|
LuauOpcode op = LuauOpcode(LUAU_INSN_OP(*pc));
|
|
|
|
|
|
|
|
int nexti = i + getOpLength(op);
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(nexti <= proto->sizecode);
|
2023-01-20 20:27:03 +00:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
function.bcMapping[i] = {uint32_t(function.instructions.size()), ~0u};
|
2023-01-20 20:27:03 +00:00
|
|
|
|
|
|
|
// Begin new block at this instruction if it was in the bytecode or requested during translation
|
|
|
|
if (instIndexToBlock[i] != kNoAssociatedBlockIndex)
|
|
|
|
beginBlock(blockAtInst(i));
|
|
|
|
|
2023-10-06 20:02:32 +01:00
|
|
|
// Numeric for loops require additional processing to maintain loop stack
|
|
|
|
// Notably, this must be performed even when the block is dead so that we maintain the pairing FORNPREP-FORNLOOP
|
|
|
|
if (op == LOP_FORNPREP)
|
2023-12-02 07:46:57 +00:00
|
|
|
beforeInstForNPrep(*this, pc, i);
|
2023-10-06 20:02:32 +01:00
|
|
|
|
2023-02-10 19:40:38 +00:00
|
|
|
// We skip dead bytecode instructions when they appear after block was already terminated
|
|
|
|
if (!inTerminatedBlock)
|
2023-07-14 19:08:53 +01:00
|
|
|
{
|
2023-09-08 01:13:49 +01:00
|
|
|
if (interruptRequested)
|
|
|
|
{
|
|
|
|
interruptRequested = false;
|
|
|
|
inst(IrCmd::INTERRUPT, constUint(i));
|
|
|
|
}
|
|
|
|
|
2023-02-10 19:40:38 +00:00
|
|
|
translateInst(op, pc, i);
|
2023-01-20 20:27:03 +00:00
|
|
|
|
2024-05-10 19:21:45 +01:00
|
|
|
if (cmdSkipTarget != -1)
|
2023-07-14 19:08:53 +01:00
|
|
|
{
|
2024-05-10 19:21:45 +01:00
|
|
|
nexti = cmdSkipTarget;
|
|
|
|
cmdSkipTarget = -1;
|
2023-07-14 19:08:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-06 20:02:32 +01:00
|
|
|
// See above for FORNPREP..FORNLOOP processing
|
|
|
|
if (op == LOP_FORNLOOP)
|
|
|
|
afterInstForNLoop(*this, pc);
|
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
i = nexti;
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(i <= proto->sizecode);
|
2023-01-20 20:27:03 +00:00
|
|
|
|
|
|
|
// If we are going into a new block at the next instruction and it's a fallthrough, jump has to be placed to mark block termination
|
|
|
|
if (i < int(instIndexToBlock.size()) && instIndexToBlock[i] != kNoAssociatedBlockIndex)
|
|
|
|
{
|
|
|
|
if (!isBlockTerminator(function.instructions.back().cmd))
|
|
|
|
inst(IrCmd::JUMP, blockAtInst(i));
|
|
|
|
}
|
|
|
|
}
|
2023-02-10 19:40:38 +00:00
|
|
|
|
|
|
|
// Now that all has been generated, compute use counts
|
|
|
|
updateUseCounts(function);
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void IrBuilder::rebuildBytecodeBasicBlocks(Proto* proto)
|
|
|
|
{
|
|
|
|
instIndexToBlock.resize(proto->sizecode, kNoAssociatedBlockIndex);
|
|
|
|
|
|
|
|
// Mark jump targets
|
|
|
|
std::vector<uint8_t> jumpTargets(proto->sizecode, 0);
|
|
|
|
|
|
|
|
for (int i = 0; i < proto->sizecode;)
|
|
|
|
{
|
|
|
|
const Instruction* pc = &proto->code[i];
|
|
|
|
LuauOpcode op = LuauOpcode(LUAU_INSN_OP(*pc));
|
|
|
|
|
|
|
|
int target = getJumpTarget(*pc, uint32_t(i));
|
|
|
|
|
|
|
|
if (target >= 0 && !isFastCall(op))
|
|
|
|
jumpTargets[target] = true;
|
|
|
|
|
|
|
|
i += getOpLength(op);
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(i <= proto->sizecode);
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Bytecode blocks are created at bytecode jump targets and the start of a function
|
|
|
|
jumpTargets[0] = true;
|
|
|
|
|
|
|
|
for (int i = 0; i < proto->sizecode; i++)
|
|
|
|
{
|
|
|
|
if (jumpTargets[i])
|
|
|
|
{
|
|
|
|
IrOp b = block(IrBlockKind::Bytecode);
|
|
|
|
instIndexToBlock[i] = b.index;
|
|
|
|
}
|
|
|
|
}
|
2023-12-02 07:46:57 +00:00
|
|
|
|
2024-01-12 22:25:27 +00:00
|
|
|
buildBytecodeBlocks(function, jumpTargets);
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
|
|
|
|
{
|
|
|
|
switch (op)
|
|
|
|
{
|
|
|
|
case LOP_NOP:
|
|
|
|
break;
|
|
|
|
case LOP_LOADNIL:
|
|
|
|
translateInstLoadNil(*this, pc);
|
|
|
|
break;
|
|
|
|
case LOP_LOADB:
|
|
|
|
translateInstLoadB(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_LOADN:
|
|
|
|
translateInstLoadN(*this, pc);
|
|
|
|
break;
|
|
|
|
case LOP_LOADK:
|
|
|
|
translateInstLoadK(*this, pc);
|
|
|
|
break;
|
|
|
|
case LOP_LOADKX:
|
|
|
|
translateInstLoadKX(*this, pc);
|
|
|
|
break;
|
|
|
|
case LOP_MOVE:
|
|
|
|
translateInstMove(*this, pc);
|
|
|
|
break;
|
|
|
|
case LOP_GETGLOBAL:
|
|
|
|
translateInstGetGlobal(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_SETGLOBAL:
|
|
|
|
translateInstSetGlobal(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_CALL:
|
2023-03-24 18:03:04 +00:00
|
|
|
inst(IrCmd::INTERRUPT, constUint(i));
|
|
|
|
inst(IrCmd::SET_SAVEDPC, constUint(i + 1));
|
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
inst(IrCmd::CALL, vmReg(LUAU_INSN_A(*pc)), constInt(LUAU_INSN_B(*pc) - 1), constInt(LUAU_INSN_C(*pc) - 1));
|
2023-01-20 20:27:03 +00:00
|
|
|
|
|
|
|
if (activeFastcallFallback)
|
|
|
|
{
|
|
|
|
inst(IrCmd::JUMP, fastcallFallbackReturn);
|
|
|
|
|
|
|
|
beginBlock(fastcallFallbackReturn);
|
|
|
|
|
|
|
|
activeFastcallFallback = false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case LOP_RETURN:
|
2023-03-24 18:03:04 +00:00
|
|
|
inst(IrCmd::INTERRUPT, constUint(i));
|
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
inst(IrCmd::RETURN, vmReg(LUAU_INSN_A(*pc)), constInt(LUAU_INSN_B(*pc) - 1));
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_GETTABLE:
|
|
|
|
translateInstGetTable(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_SETTABLE:
|
|
|
|
translateInstSetTable(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_GETTABLEKS:
|
|
|
|
translateInstGetTableKS(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_SETTABLEKS:
|
|
|
|
translateInstSetTableKS(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_GETTABLEN:
|
|
|
|
translateInstGetTableN(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_SETTABLEN:
|
|
|
|
translateInstSetTableN(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_JUMP:
|
|
|
|
translateInstJump(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPBACK:
|
|
|
|
translateInstJumpBack(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPIF:
|
|
|
|
translateInstJumpIf(*this, pc, i, /* not_ */ false);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPIFNOT:
|
|
|
|
translateInstJumpIf(*this, pc, i, /* not_ */ true);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPIFEQ:
|
|
|
|
translateInstJumpIfEq(*this, pc, i, /* not_ */ false);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPIFLE:
|
|
|
|
translateInstJumpIfCond(*this, pc, i, IrCondition::LessEqual);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPIFLT:
|
|
|
|
translateInstJumpIfCond(*this, pc, i, IrCondition::Less);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPIFNOTEQ:
|
|
|
|
translateInstJumpIfEq(*this, pc, i, /* not_ */ true);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPIFNOTLE:
|
|
|
|
translateInstJumpIfCond(*this, pc, i, IrCondition::NotLessEqual);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPIFNOTLT:
|
|
|
|
translateInstJumpIfCond(*this, pc, i, IrCondition::NotLess);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPX:
|
|
|
|
translateInstJumpX(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPXEQKNIL:
|
|
|
|
translateInstJumpxEqNil(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPXEQKB:
|
|
|
|
translateInstJumpxEqB(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPXEQKN:
|
|
|
|
translateInstJumpxEqN(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_JUMPXEQKS:
|
|
|
|
translateInstJumpxEqS(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_ADD:
|
|
|
|
translateInstBinary(*this, pc, i, TM_ADD);
|
|
|
|
break;
|
|
|
|
case LOP_SUB:
|
|
|
|
translateInstBinary(*this, pc, i, TM_SUB);
|
|
|
|
break;
|
|
|
|
case LOP_MUL:
|
|
|
|
translateInstBinary(*this, pc, i, TM_MUL);
|
|
|
|
break;
|
|
|
|
case LOP_DIV:
|
|
|
|
translateInstBinary(*this, pc, i, TM_DIV);
|
|
|
|
break;
|
2023-09-01 18:58:27 +01:00
|
|
|
case LOP_IDIV:
|
|
|
|
translateInstBinary(*this, pc, i, TM_IDIV);
|
|
|
|
break;
|
2023-01-20 20:27:03 +00:00
|
|
|
case LOP_MOD:
|
|
|
|
translateInstBinary(*this, pc, i, TM_MOD);
|
|
|
|
break;
|
|
|
|
case LOP_POW:
|
|
|
|
translateInstBinary(*this, pc, i, TM_POW);
|
|
|
|
break;
|
|
|
|
case LOP_ADDK:
|
|
|
|
translateInstBinaryK(*this, pc, i, TM_ADD);
|
|
|
|
break;
|
|
|
|
case LOP_SUBK:
|
|
|
|
translateInstBinaryK(*this, pc, i, TM_SUB);
|
|
|
|
break;
|
|
|
|
case LOP_MULK:
|
|
|
|
translateInstBinaryK(*this, pc, i, TM_MUL);
|
|
|
|
break;
|
|
|
|
case LOP_DIVK:
|
|
|
|
translateInstBinaryK(*this, pc, i, TM_DIV);
|
|
|
|
break;
|
2023-09-01 18:58:27 +01:00
|
|
|
case LOP_IDIVK:
|
|
|
|
translateInstBinaryK(*this, pc, i, TM_IDIV);
|
|
|
|
break;
|
2023-01-20 20:27:03 +00:00
|
|
|
case LOP_MODK:
|
|
|
|
translateInstBinaryK(*this, pc, i, TM_MOD);
|
|
|
|
break;
|
|
|
|
case LOP_POWK:
|
|
|
|
translateInstBinaryK(*this, pc, i, TM_POW);
|
|
|
|
break;
|
Add SUBRK and DIVRK bytecode instructions to bytecode v5 (#1115)
Right now, we can compile R\*K for all arithmetic instructions, but K\*R
gets compiled into two instructions (LOADN/LOADK + arithmetic opcode).
This is problematic since it leads to reduced performance for some code.
However, we'd like to avoid adding reverse variants of ADDK et al for
all opcodes to avoid the increase in I$ footprint for interpreter.
Looking at the arithmetic instructions, % and // don't have interesting
use cases for K\*V; ^ is sometimes used with constant on the left hand
side but this would need to call pow() by necessity in all cases so it
would be slow regardless of the dispatch overhead. This leaves the four
basic arithmetic operations.
For + and \*, we can implement a compiler-side optimization in the
future that transforms K\*R to R\*K automatically. This could either be
done unconditionally at -O2, or conditionally based on the type of the
value (driven by type annotations / inference) -- this technically
changes behavior in presence of metamethods, although it might be
sensible to just always do this because non-commutative +/* are evil.
However, for - and / it is impossible for the compiler to optimize this
in the future, so we need dedicated opcodes. This only increases the
interpreter size by ~300 bytes (~1.5%) on X64.
This makes spectral-norm and math-partial-sums 6% faster; maybe more
importantly, voxelgen gets 1.5% faster (so this change does have
real-world impact).
To avoid the proliferation of bytecode versions this change piggybacks
on the bytecode version bump that was just made in 604 for vector
constants; we would still be able to enable these independently but
we'll consider v5 complete when both are enabled.
Related: #626
---------
Co-authored-by: vegorov-rbx <75688451+vegorov-rbx@users.noreply.github.com>
2023-11-28 15:35:01 +00:00
|
|
|
case LOP_SUBRK:
|
|
|
|
translateInstBinaryRK(*this, pc, i, TM_SUB);
|
|
|
|
break;
|
|
|
|
case LOP_DIVRK:
|
|
|
|
translateInstBinaryRK(*this, pc, i, TM_DIV);
|
|
|
|
break;
|
2023-01-20 20:27:03 +00:00
|
|
|
case LOP_NOT:
|
|
|
|
translateInstNot(*this, pc);
|
|
|
|
break;
|
|
|
|
case LOP_MINUS:
|
|
|
|
translateInstMinus(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_LENGTH:
|
|
|
|
translateInstLength(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_NEWTABLE:
|
|
|
|
translateInstNewTable(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_DUPTABLE:
|
|
|
|
translateInstDupTable(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_SETLIST:
|
2023-09-15 18:26:59 +01:00
|
|
|
inst(IrCmd::SETLIST, constUint(i), vmReg(LUAU_INSN_A(*pc)), vmReg(LUAU_INSN_B(*pc)), constInt(LUAU_INSN_C(*pc) - 1), constUint(pc[1]),
|
|
|
|
undef());
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_GETUPVAL:
|
|
|
|
translateInstGetUpval(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_SETUPVAL:
|
|
|
|
translateInstSetUpval(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_CLOSEUPVALS:
|
|
|
|
translateInstCloseUpvals(*this, pc);
|
|
|
|
break;
|
|
|
|
case LOP_FASTCALL:
|
2024-06-21 00:37:55 +01:00
|
|
|
handleFastcallFallback(translateFastCallN(*this, pc, i, false, 0, {}, {}), pc, i);
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_FASTCALL1:
|
2024-06-21 00:37:55 +01:00
|
|
|
handleFastcallFallback(translateFastCallN(*this, pc, i, true, 1, undef(), undef()), pc, i);
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_FASTCALL2:
|
2024-06-21 00:37:55 +01:00
|
|
|
handleFastcallFallback(translateFastCallN(*this, pc, i, true, 2, vmReg(pc[1]), undef()), pc, i);
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_FASTCALL2K:
|
2024-06-21 00:37:55 +01:00
|
|
|
handleFastcallFallback(translateFastCallN(*this, pc, i, true, 2, vmConst(pc[1]), undef()), pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_FASTCALL3:
|
|
|
|
CODEGEN_ASSERT(FFlag::LuauCodegenFastcall3);
|
|
|
|
|
|
|
|
handleFastcallFallback(translateFastCallN(*this, pc, i, true, 3, vmReg(pc[1] & 0xff), vmReg((pc[1] >> 8) & 0xff)), pc, i);
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_FORNPREP:
|
2023-02-10 19:40:38 +00:00
|
|
|
translateInstForNPrep(*this, pc, i);
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_FORNLOOP:
|
2023-02-10 19:40:38 +00:00
|
|
|
translateInstForNLoop(*this, pc, i);
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_FORGLOOP:
|
|
|
|
{
|
2023-03-03 20:21:14 +00:00
|
|
|
int aux = int(pc[1]);
|
|
|
|
|
2023-02-10 19:40:38 +00:00
|
|
|
// We have a translation for ipairs-style traversal, general loop iteration is still too complex
|
2023-03-03 20:21:14 +00:00
|
|
|
if (aux < 0)
|
2023-02-10 19:40:38 +00:00
|
|
|
{
|
|
|
|
translateInstForGLoopIpairs(*this, pc, i);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2023-03-03 20:21:14 +00:00
|
|
|
int ra = LUAU_INSN_A(*pc);
|
|
|
|
|
2023-02-10 19:40:38 +00:00
|
|
|
IrOp loopRepeat = blockAtInst(i + 1 + LUAU_INSN_D(*pc));
|
|
|
|
IrOp loopExit = blockAtInst(i + getOpLength(LOP_FORGLOOP));
|
|
|
|
IrOp fallback = block(IrBlockKind::Fallback);
|
2023-01-20 20:27:03 +00:00
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
inst(IrCmd::INTERRUPT, constUint(i));
|
|
|
|
loadAndCheckTag(vmReg(ra), LUA_TNIL, fallback);
|
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
inst(IrCmd::FORGLOOP, vmReg(ra), constInt(aux), loopRepeat, loopExit);
|
2023-01-20 20:27:03 +00:00
|
|
|
|
2023-02-10 19:40:38 +00:00
|
|
|
beginBlock(fallback);
|
2023-03-31 19:42:49 +01:00
|
|
|
inst(IrCmd::SET_SAVEDPC, constUint(i + 1));
|
|
|
|
inst(IrCmd::FORGLOOP_FALLBACK, vmReg(ra), constInt(aux), loopRepeat, loopExit);
|
2023-01-20 20:27:03 +00:00
|
|
|
|
2023-02-10 19:40:38 +00:00
|
|
|
beginBlock(loopExit);
|
|
|
|
}
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case LOP_FORGPREP_NEXT:
|
2023-02-10 19:40:38 +00:00
|
|
|
translateInstForGPrepNext(*this, pc, i);
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_FORGPREP_INEXT:
|
2023-02-10 19:40:38 +00:00
|
|
|
translateInstForGPrepInext(*this, pc, i);
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_AND:
|
2023-04-07 22:01:29 +01:00
|
|
|
translateInstAndX(*this, pc, i, vmReg(LUAU_INSN_C(*pc)));
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_ANDK:
|
2023-04-07 22:01:29 +01:00
|
|
|
translateInstAndX(*this, pc, i, vmConst(LUAU_INSN_C(*pc)));
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_OR:
|
2023-04-07 22:01:29 +01:00
|
|
|
translateInstOrX(*this, pc, i, vmReg(LUAU_INSN_C(*pc)));
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_ORK:
|
2023-04-07 22:01:29 +01:00
|
|
|
translateInstOrX(*this, pc, i, vmConst(LUAU_INSN_C(*pc)));
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_COVERAGE:
|
2023-03-31 19:42:49 +01:00
|
|
|
inst(IrCmd::COVERAGE, constUint(i));
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_GETIMPORT:
|
|
|
|
translateInstGetImport(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_CONCAT:
|
|
|
|
translateInstConcat(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_CAPTURE:
|
|
|
|
translateInstCapture(*this, pc, i);
|
|
|
|
break;
|
|
|
|
case LOP_NAMECALL:
|
2024-06-29 01:34:49 +01:00
|
|
|
if (translateInstNamecall(*this, pc, i))
|
|
|
|
cmdSkipTarget = i + 3;
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_PREPVARARGS:
|
2023-02-10 19:40:38 +00:00
|
|
|
inst(IrCmd::FALLBACK_PREPVARARGS, constUint(i), constInt(LUAU_INSN_A(*pc)));
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_GETVARARGS:
|
2023-02-10 19:40:38 +00:00
|
|
|
inst(IrCmd::FALLBACK_GETVARARGS, constUint(i), vmReg(LUAU_INSN_A(*pc)), constInt(LUAU_INSN_B(*pc) - 1));
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_NEWCLOSURE:
|
2023-07-28 16:13:53 +01:00
|
|
|
translateInstNewClosure(*this, pc, i);
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_DUPCLOSURE:
|
2023-02-10 19:40:38 +00:00
|
|
|
inst(IrCmd::FALLBACK_DUPCLOSURE, constUint(i), vmReg(LUAU_INSN_A(*pc)), vmConst(LUAU_INSN_D(*pc)));
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
|
|
|
case LOP_FORGPREP:
|
2023-02-10 19:40:38 +00:00
|
|
|
{
|
|
|
|
IrOp loopStart = blockAtInst(i + 1 + LUAU_INSN_D(*pc));
|
|
|
|
|
|
|
|
inst(IrCmd::FALLBACK_FORGPREP, constUint(i), vmReg(LUAU_INSN_A(*pc)), loopStart);
|
2023-01-20 20:27:03 +00:00
|
|
|
break;
|
2023-02-10 19:40:38 +00:00
|
|
|
}
|
2023-01-20 20:27:03 +00:00
|
|
|
default:
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(!"Unknown instruction");
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-14 19:08:53 +01:00
|
|
|
void IrBuilder::handleFastcallFallback(IrOp fallbackOrUndef, const Instruction* pc, int i)
|
|
|
|
{
|
|
|
|
int skip = LUAU_INSN_C(*pc);
|
|
|
|
|
|
|
|
if (fallbackOrUndef.kind != IrOpKind::Undef)
|
|
|
|
{
|
|
|
|
IrOp next = blockAtInst(i + skip + 2);
|
|
|
|
inst(IrCmd::JUMP, next);
|
|
|
|
beginBlock(fallbackOrUndef);
|
|
|
|
|
|
|
|
activeFastcallFallback = true;
|
|
|
|
fastcallFallbackReturn = next;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2024-05-10 19:21:45 +01:00
|
|
|
cmdSkipTarget = i + skip + 2;
|
2023-07-14 19:08:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
bool IrBuilder::isInternalBlock(IrOp block)
|
|
|
|
{
|
|
|
|
IrBlock& target = function.blocks[block.index];
|
|
|
|
|
|
|
|
return target.kind == IrBlockKind::Internal;
|
|
|
|
}
|
|
|
|
|
|
|
|
void IrBuilder::beginBlock(IrOp block)
|
|
|
|
{
|
2023-01-27 22:28:31 +00:00
|
|
|
IrBlock& target = function.blocks[block.index];
|
2023-02-24 21:49:38 +00:00
|
|
|
activeBlockIdx = block.index;
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(target.start == ~0u || target.start == uint32_t(function.instructions.size()));
|
2023-01-27 22:28:31 +00:00
|
|
|
|
|
|
|
target.start = uint32_t(function.instructions.size());
|
2023-06-16 18:35:18 +01:00
|
|
|
target.sortkey = target.start;
|
2023-02-10 19:40:38 +00:00
|
|
|
|
|
|
|
inTerminatedBlock = false;
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
void IrBuilder::loadAndCheckTag(IrOp loc, uint8_t tag, IrOp fallback)
|
|
|
|
{
|
|
|
|
inst(IrCmd::CHECK_TAG, inst(IrCmd::LOAD_TAG, loc), constTag(tag), fallback);
|
|
|
|
}
|
|
|
|
|
|
|
|
void IrBuilder::clone(const IrBlock& source, bool removeCurrentTerminator)
|
|
|
|
{
|
|
|
|
DenseHashMap<uint32_t, uint32_t> instRedir{~0u};
|
|
|
|
|
|
|
|
auto redirect = [&instRedir](IrOp& op) {
|
|
|
|
if (op.kind == IrOpKind::Inst)
|
|
|
|
{
|
|
|
|
if (const uint32_t* newIndex = instRedir.find(op.index))
|
|
|
|
op.index = *newIndex;
|
|
|
|
else
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(!"Values can only be used if they are defined in the same block");
|
2023-03-03 20:21:14 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if (removeCurrentTerminator && inTerminatedBlock)
|
|
|
|
{
|
|
|
|
IrBlock& active = function.blocks[activeBlockIdx];
|
|
|
|
IrInst& term = function.instructions[active.finish];
|
|
|
|
|
|
|
|
kill(function, term);
|
|
|
|
inTerminatedBlock = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t index = source.start; index <= source.finish; index++)
|
|
|
|
{
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(index < function.instructions.size());
|
2023-03-03 20:21:14 +00:00
|
|
|
IrInst clone = function.instructions[index];
|
|
|
|
|
|
|
|
// Skip pseudo instructions to make clone more compact, but validate that they have no users
|
2023-04-21 23:14:26 +01:00
|
|
|
if (isPseudo(clone.cmd))
|
2023-03-03 20:21:14 +00:00
|
|
|
{
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(clone.useCount == 0);
|
2023-03-03 20:21:14 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
redirect(clone.a);
|
|
|
|
redirect(clone.b);
|
|
|
|
redirect(clone.c);
|
|
|
|
redirect(clone.d);
|
|
|
|
redirect(clone.e);
|
|
|
|
redirect(clone.f);
|
|
|
|
|
2024-06-14 21:21:20 +01:00
|
|
|
if (FFlag::LuauCodegenInstG)
|
|
|
|
redirect(clone.g);
|
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
addUse(function, clone.a);
|
|
|
|
addUse(function, clone.b);
|
|
|
|
addUse(function, clone.c);
|
|
|
|
addUse(function, clone.d);
|
|
|
|
addUse(function, clone.e);
|
|
|
|
addUse(function, clone.f);
|
|
|
|
|
2024-06-14 21:21:20 +01:00
|
|
|
if (FFlag::LuauCodegenInstG)
|
|
|
|
addUse(function, clone.g);
|
|
|
|
|
2023-03-03 20:21:14 +00:00
|
|
|
// Instructions that referenced the original will have to be adjusted to use the clone
|
|
|
|
instRedir[index] = uint32_t(function.instructions.size());
|
|
|
|
|
|
|
|
// Reconstruct the fresh clone
|
2024-06-14 21:21:20 +01:00
|
|
|
if (FFlag::LuauCodegenInstG)
|
|
|
|
inst(clone.cmd, clone.a, clone.b, clone.c, clone.d, clone.e, clone.f, clone.g);
|
|
|
|
else
|
|
|
|
inst(clone.cmd, clone.a, clone.b, clone.c, clone.d, clone.e, clone.f);
|
2023-03-03 20:21:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
IrOp IrBuilder::undef()
|
|
|
|
{
|
|
|
|
return {IrOpKind::Undef, 0};
|
|
|
|
}
|
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
IrOp IrBuilder::constInt(int value)
|
|
|
|
{
|
|
|
|
IrConst constant;
|
|
|
|
constant.kind = IrConstKind::Int;
|
|
|
|
constant.valueInt = value;
|
2023-03-17 19:20:37 +00:00
|
|
|
return constAny(constant, uint64_t(value));
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::constUint(unsigned value)
|
|
|
|
{
|
|
|
|
IrConst constant;
|
|
|
|
constant.kind = IrConstKind::Uint;
|
|
|
|
constant.valueUint = value;
|
2023-03-17 19:20:37 +00:00
|
|
|
return constAny(constant, uint64_t(value));
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::constDouble(double value)
|
|
|
|
{
|
|
|
|
IrConst constant;
|
|
|
|
constant.kind = IrConstKind::Double;
|
|
|
|
constant.valueDouble = value;
|
2023-03-17 19:20:37 +00:00
|
|
|
|
|
|
|
uint64_t asCommonKey;
|
|
|
|
static_assert(sizeof(asCommonKey) == sizeof(value), "Expecting double to be 64-bit");
|
|
|
|
memcpy(&asCommonKey, &value, sizeof(value));
|
|
|
|
|
|
|
|
return constAny(constant, asCommonKey);
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::constTag(uint8_t value)
|
|
|
|
{
|
|
|
|
IrConst constant;
|
|
|
|
constant.kind = IrConstKind::Tag;
|
|
|
|
constant.valueTag = value;
|
2023-03-17 19:20:37 +00:00
|
|
|
return constAny(constant, uint64_t(value));
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
IrOp IrBuilder::constAny(IrConst constant, uint64_t asCommonKey)
|
2023-01-20 20:27:03 +00:00
|
|
|
{
|
2023-03-17 19:20:37 +00:00
|
|
|
ConstantKey key{constant.kind, asCommonKey};
|
|
|
|
|
|
|
|
if (uint32_t* cache = constantMap.find(key))
|
|
|
|
return {IrOpKind::Constant, *cache};
|
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
uint32_t index = uint32_t(function.constants.size());
|
|
|
|
function.constants.push_back(constant);
|
2023-03-17 19:20:37 +00:00
|
|
|
|
|
|
|
constantMap[key] = index;
|
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
return {IrOpKind::Constant, index};
|
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::cond(IrCondition cond)
|
|
|
|
{
|
|
|
|
return {IrOpKind::Condition, uint32_t(cond)};
|
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::inst(IrCmd cmd)
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
return inst(cmd, {}, {}, {}, {}, {}, {});
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::inst(IrCmd cmd, IrOp a)
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
return inst(cmd, a, {}, {}, {}, {}, {});
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b)
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
return inst(cmd, a, b, {}, {}, {}, {});
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b, IrOp c)
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
return inst(cmd, a, b, c, {}, {}, {});
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d)
|
|
|
|
{
|
2023-02-24 21:49:38 +00:00
|
|
|
return inst(cmd, a, b, c, d, {}, {});
|
2023-01-20 20:27:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d, IrOp e)
|
2023-02-24 21:49:38 +00:00
|
|
|
{
|
|
|
|
return inst(cmd, a, b, c, d, e, {});
|
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d, IrOp e, IrOp f)
|
2023-01-20 20:27:03 +00:00
|
|
|
{
|
2024-06-14 21:21:20 +01:00
|
|
|
if (FFlag::LuauCodegenInstG)
|
|
|
|
{
|
|
|
|
return inst(cmd, a, b, c, d, e, f, {});
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
uint32_t index = uint32_t(function.instructions.size());
|
|
|
|
function.instructions.push_back({cmd, a, b, c, d, e, f});
|
|
|
|
|
|
|
|
CODEGEN_ASSERT(!inTerminatedBlock);
|
|
|
|
|
|
|
|
if (isBlockTerminator(cmd))
|
|
|
|
{
|
|
|
|
function.blocks[activeBlockIdx].finish = index;
|
|
|
|
inTerminatedBlock = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return {IrOpKind::Inst, index};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d, IrOp e, IrOp f, IrOp g)
|
|
|
|
{
|
|
|
|
CODEGEN_ASSERT(FFlag::LuauCodegenInstG);
|
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
uint32_t index = uint32_t(function.instructions.size());
|
2024-06-14 21:21:20 +01:00
|
|
|
function.instructions.push_back({cmd, a, b, c, d, e, f, g});
|
2023-02-24 21:49:38 +00:00
|
|
|
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(!inTerminatedBlock);
|
2023-02-10 19:40:38 +00:00
|
|
|
|
|
|
|
if (isBlockTerminator(cmd))
|
2023-02-24 21:49:38 +00:00
|
|
|
{
|
|
|
|
function.blocks[activeBlockIdx].finish = index;
|
2023-02-10 19:40:38 +00:00
|
|
|
inTerminatedBlock = true;
|
2023-02-24 21:49:38 +00:00
|
|
|
}
|
2023-02-10 19:40:38 +00:00
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
return {IrOpKind::Inst, index};
|
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::block(IrBlockKind kind)
|
|
|
|
{
|
|
|
|
if (kind == IrBlockKind::Internal && activeFastcallFallback)
|
|
|
|
kind = IrBlockKind::Fallback;
|
|
|
|
|
|
|
|
uint32_t index = uint32_t(function.blocks.size());
|
2023-02-10 19:40:38 +00:00
|
|
|
function.blocks.push_back(IrBlock{kind});
|
2023-01-20 20:27:03 +00:00
|
|
|
return IrOp{IrOpKind::Block, index};
|
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::blockAtInst(uint32_t index)
|
|
|
|
{
|
|
|
|
uint32_t blockIndex = instIndexToBlock[index];
|
|
|
|
|
|
|
|
if (blockIndex != kNoAssociatedBlockIndex)
|
|
|
|
return IrOp{IrOpKind::Block, blockIndex};
|
|
|
|
|
|
|
|
return block(IrBlockKind::Internal);
|
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::vmReg(uint8_t index)
|
|
|
|
{
|
|
|
|
return {IrOpKind::VmReg, index};
|
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::vmConst(uint32_t index)
|
|
|
|
{
|
|
|
|
return {IrOpKind::VmConst, index};
|
|
|
|
}
|
|
|
|
|
|
|
|
IrOp IrBuilder::vmUpvalue(uint8_t index)
|
|
|
|
{
|
|
|
|
return {IrOpKind::VmUpvalue, index};
|
|
|
|
}
|
|
|
|
|
2023-07-14 19:08:53 +01:00
|
|
|
IrOp IrBuilder::vmExit(uint32_t pcpos)
|
|
|
|
{
|
|
|
|
return {IrOpKind::VmExit, pcpos};
|
|
|
|
}
|
|
|
|
|
2023-01-20 20:27:03 +00:00
|
|
|
} // namespace CodeGen
|
|
|
|
} // namespace Luau
|