2023-06-24 07:19:39 +01:00
|
|
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include "Luau/AssemblyBuilderA64.h"
|
|
|
|
#include "Luau/AssemblyBuilderX64.h"
|
|
|
|
#include "Luau/CodeGen.h"
|
|
|
|
#include "Luau/IrBuilder.h"
|
|
|
|
#include "Luau/IrDump.h"
|
|
|
|
#include "Luau/IrUtils.h"
|
|
|
|
#include "Luau/OptimizeConstProp.h"
|
2024-03-09 00:47:53 +00:00
|
|
|
#include "Luau/OptimizeDeadStore.h"
|
2023-06-24 07:19:39 +01:00
|
|
|
#include "Luau/OptimizeFinalX64.h"
|
|
|
|
|
|
|
|
#include "EmitCommon.h"
|
|
|
|
#include "IrLoweringA64.h"
|
|
|
|
#include "IrLoweringX64.h"
|
|
|
|
|
|
|
|
#include "lobject.h"
|
|
|
|
#include "lstate.h"
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
LUAU_FASTFLAG(DebugCodegenNoOpt)
|
|
|
|
LUAU_FASTFLAG(DebugCodegenOptSize)
|
|
|
|
LUAU_FASTFLAG(DebugCodegenSkipNumbering)
|
2023-10-21 02:10:30 +01:00
|
|
|
LUAU_FASTINT(CodegenHeuristicsInstructionLimit)
|
|
|
|
LUAU_FASTINT(CodegenHeuristicsBlockLimit)
|
|
|
|
LUAU_FASTINT(CodegenHeuristicsBlockInstructionLimit)
|
2024-06-14 21:21:20 +01:00
|
|
|
LUAU_FASTFLAG(LuauNativeAttribute)
|
2023-06-24 07:19:39 +01:00
|
|
|
|
|
|
|
namespace Luau
|
|
|
|
{
|
|
|
|
namespace CodeGen
|
|
|
|
{
|
|
|
|
|
2024-06-14 21:21:20 +01:00
|
|
|
inline void gatherFunctions_DEPRECATED(std::vector<Proto*>& results, Proto* proto, unsigned int flags)
|
2023-06-24 07:19:39 +01:00
|
|
|
{
|
|
|
|
if (results.size() <= size_t(proto->bytecodeid))
|
|
|
|
results.resize(proto->bytecodeid + 1);
|
|
|
|
|
|
|
|
// Skip protos that we've already compiled in this run: this happens because at -O2, inlined functions get their protos reused
|
|
|
|
if (results[proto->bytecodeid])
|
|
|
|
return;
|
|
|
|
|
2023-10-06 20:02:32 +01:00
|
|
|
// Only compile cold functions if requested
|
|
|
|
if ((proto->flags & LPF_NATIVE_COLD) == 0 || (flags & CodeGen_ColdFunctions) != 0)
|
|
|
|
results[proto->bytecodeid] = proto;
|
2023-06-24 07:19:39 +01:00
|
|
|
|
2023-10-06 20:02:32 +01:00
|
|
|
// Recursively traverse child protos even if we aren't compiling this one
|
2023-06-24 07:19:39 +01:00
|
|
|
for (int i = 0; i < proto->sizep; i++)
|
2024-06-14 21:21:20 +01:00
|
|
|
gatherFunctions_DEPRECATED(results, proto->p[i], flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void gatherFunctionsHelper(
|
|
|
|
std::vector<Proto*>& results, Proto* proto, const unsigned int flags, const bool hasNativeFunctions, const bool root)
|
|
|
|
{
|
|
|
|
if (results.size() <= size_t(proto->bytecodeid))
|
|
|
|
results.resize(proto->bytecodeid + 1);
|
|
|
|
|
|
|
|
// Skip protos that we've already compiled in this run: this happens because at -O2, inlined functions get their protos reused
|
|
|
|
if (results[proto->bytecodeid])
|
|
|
|
return;
|
|
|
|
|
|
|
|
// if native module, compile cold functions if requested
|
|
|
|
// if not native module, compile function if it has native attribute and is not root
|
|
|
|
bool shouldGather = hasNativeFunctions ? (!root && (proto->flags & LPF_NATIVE_FUNCTION) != 0)
|
|
|
|
: ((proto->flags & LPF_NATIVE_COLD) == 0 || (flags & CodeGen_ColdFunctions) != 0);
|
|
|
|
|
|
|
|
if (shouldGather)
|
|
|
|
results[proto->bytecodeid] = proto;
|
|
|
|
|
|
|
|
// Recursively traverse child protos even if we aren't compiling this one
|
|
|
|
for (int i = 0; i < proto->sizep; i++)
|
|
|
|
gatherFunctionsHelper(results, proto->p[i], flags, hasNativeFunctions, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline void gatherFunctions(std::vector<Proto*>& results, Proto* root, const unsigned int flags, const bool hasNativeFunctions = false)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(FFlag::LuauNativeAttribute);
|
|
|
|
gatherFunctionsHelper(results, root, flags, hasNativeFunctions, true);
|
2023-06-24 07:19:39 +01:00
|
|
|
}
|
|
|
|
|
2023-11-03 23:45:04 +00:00
|
|
|
inline unsigned getInstructionCount(const std::vector<IrInst>& instructions, IrCmd cmd)
|
|
|
|
{
|
|
|
|
return unsigned(std::count_if(instructions.begin(), instructions.end(), [&cmd](const IrInst& inst) {
|
|
|
|
return inst.cmd == cmd;
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
2023-06-24 07:19:39 +01:00
|
|
|
template<typename AssemblyBuilder, typename IrLowering>
|
2023-10-13 21:20:12 +01:00
|
|
|
inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& function, const std::vector<uint32_t>& sortedBlocks, int bytecodeid,
|
|
|
|
AssemblyOptions options)
|
2023-06-24 07:19:39 +01:00
|
|
|
{
|
|
|
|
// For each IR instruction that begins a bytecode instruction, which bytecode instruction is it?
|
|
|
|
std::vector<uint32_t> bcLocations(function.instructions.size() + 1, ~0u);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < function.bcMapping.size(); ++i)
|
|
|
|
{
|
|
|
|
uint32_t irLocation = function.bcMapping[i].irLocation;
|
|
|
|
|
|
|
|
if (irLocation != ~0u)
|
|
|
|
bcLocations[irLocation] = uint32_t(i);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool outputEnabled = options.includeAssembly || options.includeIr;
|
|
|
|
|
|
|
|
IrToStringContext ctx{build.text, function.blocks, function.constants, function.cfg};
|
|
|
|
|
|
|
|
// We use this to skip outlined fallback blocks from IR/asm text output
|
|
|
|
size_t textSize = build.text.length();
|
|
|
|
uint32_t codeSize = build.getCodeSize();
|
|
|
|
bool seenFallback = false;
|
|
|
|
|
|
|
|
IrBlock dummy;
|
|
|
|
dummy.start = ~0u;
|
|
|
|
|
2023-09-01 18:58:27 +01:00
|
|
|
// Make sure entry block is first
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(sortedBlocks[0] == 0);
|
2023-09-01 18:58:27 +01:00
|
|
|
|
2023-06-24 07:19:39 +01:00
|
|
|
for (size_t i = 0; i < sortedBlocks.size(); ++i)
|
|
|
|
{
|
|
|
|
uint32_t blockIndex = sortedBlocks[i];
|
|
|
|
IrBlock& block = function.blocks[blockIndex];
|
|
|
|
|
|
|
|
if (block.kind == IrBlockKind::Dead)
|
|
|
|
continue;
|
|
|
|
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(block.start != ~0u);
|
|
|
|
CODEGEN_ASSERT(block.finish != ~0u);
|
2023-06-24 07:19:39 +01:00
|
|
|
|
|
|
|
// If we want to skip fallback code IR/asm, we'll record when those blocks start once we see them
|
|
|
|
if (block.kind == IrBlockKind::Fallback && !seenFallback)
|
|
|
|
{
|
|
|
|
textSize = build.text.length();
|
|
|
|
codeSize = build.getCodeSize();
|
|
|
|
seenFallback = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (options.includeIr)
|
|
|
|
{
|
2024-01-27 03:20:56 +00:00
|
|
|
if (options.includeIrPrefix == IncludeIrPrefix::Yes)
|
2024-01-19 18:04:46 +00:00
|
|
|
build.logAppend("# ");
|
|
|
|
|
|
|
|
toStringDetailed(ctx, block, blockIndex, options.includeUseInfo, options.includeCfgInfo, options.includeRegFlowInfo);
|
2023-06-24 07:19:39 +01:00
|
|
|
}
|
|
|
|
|
2024-01-12 22:25:27 +00:00
|
|
|
// Values can only reference restore operands in the current block chain
|
|
|
|
function.validRestoreOpBlocks.push_back(blockIndex);
|
2023-06-24 07:19:39 +01:00
|
|
|
|
|
|
|
build.setLabel(block.label);
|
|
|
|
|
2023-08-11 15:42:37 +01:00
|
|
|
if (blockIndex == function.entryBlock)
|
|
|
|
{
|
|
|
|
function.entryLocation = build.getLabelOffset(block.label);
|
|
|
|
}
|
|
|
|
|
2023-07-14 19:08:53 +01:00
|
|
|
IrBlock& nextBlock = getNextBlock(function, sortedBlocks, dummy, i);
|
|
|
|
|
2023-09-01 18:58:27 +01:00
|
|
|
// Optimizations often propagate information between blocks
|
|
|
|
// To make sure the register and spill state is correct when blocks are lowered, we check that sorted block order matches the expected one
|
|
|
|
if (block.expectedNextBlock != ~0u)
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(function.getBlockIndex(nextBlock) == block.expectedNextBlock);
|
2023-09-01 18:58:27 +01:00
|
|
|
|
2023-06-24 07:19:39 +01:00
|
|
|
for (uint32_t index = block.start; index <= block.finish; index++)
|
|
|
|
{
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(index < function.instructions.size());
|
2023-06-24 07:19:39 +01:00
|
|
|
|
|
|
|
uint32_t bcLocation = bcLocations[index];
|
|
|
|
|
|
|
|
// If IR instruction is the first one for the original bytecode, we can annotate it with source code text
|
|
|
|
if (outputEnabled && options.annotator && bcLocation != ~0u)
|
|
|
|
{
|
|
|
|
options.annotator(options.annotatorContext, build.text, bytecodeid, bcLocation);
|
2023-12-02 07:46:57 +00:00
|
|
|
|
|
|
|
// If available, report inferred register tags
|
|
|
|
BytecodeTypes bcTypes = function.getBytecodeTypesAt(bcLocation);
|
|
|
|
|
|
|
|
if (bcTypes.result != LBC_TYPE_ANY || bcTypes.a != LBC_TYPE_ANY || bcTypes.b != LBC_TYPE_ANY || bcTypes.c != LBC_TYPE_ANY)
|
|
|
|
{
|
2024-07-08 22:57:06 +01:00
|
|
|
toString(ctx.result, bcTypes, options.compilationOptions.userdataTypes);
|
2024-05-31 20:18:18 +01:00
|
|
|
|
2023-12-02 07:46:57 +00:00
|
|
|
build.logAppend("\n");
|
|
|
|
}
|
2023-06-24 07:19:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// If bytecode needs the location of this instruction for jumps, record it
|
|
|
|
if (bcLocation != ~0u)
|
|
|
|
{
|
|
|
|
Label label = (index == block.start) ? block.label : build.setLabel();
|
|
|
|
function.bcMapping[bcLocation].asmLocation = build.getLabelOffset(label);
|
|
|
|
}
|
|
|
|
|
|
|
|
IrInst& inst = function.instructions[index];
|
|
|
|
|
|
|
|
// Skip pseudo instructions, but make sure they are not used at this stage
|
|
|
|
// This also prevents them from getting into text output when that's enabled
|
|
|
|
if (isPseudo(inst.cmd))
|
|
|
|
{
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(inst.useCount == 0);
|
2023-06-24 07:19:39 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Either instruction result value is not referenced or the use count is not zero
|
2024-02-16 02:04:39 +00:00
|
|
|
CODEGEN_ASSERT(inst.lastUse == 0 || inst.useCount != 0);
|
2023-06-24 07:19:39 +01:00
|
|
|
|
|
|
|
if (options.includeIr)
|
|
|
|
{
|
2024-01-27 03:20:56 +00:00
|
|
|
if (options.includeIrPrefix == IncludeIrPrefix::Yes)
|
2024-01-19 18:04:46 +00:00
|
|
|
build.logAppend("# ");
|
|
|
|
|
|
|
|
toStringDetailed(ctx, block, blockIndex, inst, index, options.includeUseInfo);
|
2023-06-24 07:19:39 +01:00
|
|
|
}
|
|
|
|
|
2023-07-14 19:08:53 +01:00
|
|
|
lowering.lowerInst(inst, index, nextBlock);
|
2023-06-24 07:19:39 +01:00
|
|
|
|
|
|
|
if (lowering.hasError())
|
|
|
|
{
|
|
|
|
// Place labels for all blocks that we're skipping
|
|
|
|
// This is needed to avoid AssemblyBuilder assertions about jumps in earlier blocks with unplaced labels
|
|
|
|
for (size_t j = i + 1; j < sortedBlocks.size(); ++j)
|
|
|
|
{
|
|
|
|
IrBlock& abandoned = function.blocks[sortedBlocks[j]];
|
|
|
|
|
|
|
|
build.setLabel(abandoned.label);
|
|
|
|
}
|
|
|
|
|
|
|
|
lowering.finishFunction();
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-08-18 19:15:41 +01:00
|
|
|
lowering.finishBlock(block, nextBlock);
|
2023-06-24 07:19:39 +01:00
|
|
|
|
2024-01-27 03:20:56 +00:00
|
|
|
if (options.includeIr && options.includeIrPrefix == IncludeIrPrefix::Yes)
|
2023-06-24 07:19:39 +01:00
|
|
|
build.logAppend("#\n");
|
2023-12-02 07:46:57 +00:00
|
|
|
|
2024-01-12 22:25:27 +00:00
|
|
|
if (block.expectedNextBlock == ~0u)
|
2023-12-02 07:46:57 +00:00
|
|
|
function.validRestoreOpBlocks.clear();
|
2023-06-24 07:19:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!seenFallback)
|
|
|
|
{
|
|
|
|
textSize = build.text.length();
|
|
|
|
codeSize = build.getCodeSize();
|
|
|
|
}
|
|
|
|
|
|
|
|
lowering.finishFunction();
|
|
|
|
|
|
|
|
if (outputEnabled && !options.includeOutlinedCode && textSize < build.text.size())
|
|
|
|
{
|
|
|
|
build.text.resize(textSize);
|
|
|
|
|
|
|
|
if (options.includeAssembly)
|
|
|
|
build.logAppend("; skipping %u bytes of outlined code\n", unsigned((build.getCodeSize() - codeSize) * sizeof(build.code[0])));
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-10-13 21:20:12 +01:00
|
|
|
inline bool lowerIr(X64::AssemblyBuilderX64& build, IrBuilder& ir, const std::vector<uint32_t>& sortedBlocks, ModuleHelpers& helpers, Proto* proto,
|
|
|
|
AssemblyOptions options, LoweringStats* stats)
|
2023-06-24 07:19:39 +01:00
|
|
|
{
|
|
|
|
optimizeMemoryOperandsX64(ir.function);
|
|
|
|
|
2023-09-15 18:26:59 +01:00
|
|
|
X64::IrLoweringX64 lowering(build, helpers, ir.function, stats);
|
2023-06-24 07:19:39 +01:00
|
|
|
|
2023-10-13 21:20:12 +01:00
|
|
|
return lowerImpl(build, lowering, ir.function, sortedBlocks, proto->bytecodeid, options);
|
2023-06-24 07:19:39 +01:00
|
|
|
}
|
|
|
|
|
2023-10-13 21:20:12 +01:00
|
|
|
inline bool lowerIr(A64::AssemblyBuilderA64& build, IrBuilder& ir, const std::vector<uint32_t>& sortedBlocks, ModuleHelpers& helpers, Proto* proto,
|
|
|
|
AssemblyOptions options, LoweringStats* stats)
|
2023-06-24 07:19:39 +01:00
|
|
|
{
|
2023-09-15 18:26:59 +01:00
|
|
|
A64::IrLoweringA64 lowering(build, helpers, ir.function, stats);
|
2023-06-24 07:19:39 +01:00
|
|
|
|
2023-10-13 21:20:12 +01:00
|
|
|
return lowerImpl(build, lowering, ir.function, sortedBlocks, proto->bytecodeid, options);
|
2023-06-24 07:19:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename AssemblyBuilder>
|
2024-02-16 02:04:39 +00:00
|
|
|
inline bool lowerFunction(IrBuilder& ir, AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options, LoweringStats* stats,
|
|
|
|
CodeGenCompilationResult& codeGenCompilationResult)
|
2023-06-24 07:19:39 +01:00
|
|
|
{
|
2023-07-07 21:10:48 +01:00
|
|
|
killUnusedBlocks(ir.function);
|
|
|
|
|
2023-10-21 02:10:30 +01:00
|
|
|
unsigned preOptBlockCount = 0;
|
|
|
|
unsigned maxBlockInstructions = 0;
|
|
|
|
|
|
|
|
for (const IrBlock& block : ir.function.blocks)
|
|
|
|
{
|
|
|
|
preOptBlockCount += (block.kind != IrBlockKind::Dead);
|
|
|
|
unsigned blockInstructions = block.finish - block.start;
|
|
|
|
maxBlockInstructions = std::max(maxBlockInstructions, blockInstructions);
|
2023-12-08 21:50:16 +00:00
|
|
|
}
|
2023-10-21 02:10:30 +01:00
|
|
|
|
|
|
|
// we update stats before checking the heuristic so that even if we bail out
|
|
|
|
// our stats include information about the limit that was exceeded.
|
|
|
|
if (stats)
|
|
|
|
{
|
|
|
|
stats->blocksPreOpt += preOptBlockCount;
|
|
|
|
stats->maxBlockInstructions = maxBlockInstructions;
|
|
|
|
}
|
|
|
|
|
2023-12-08 21:50:16 +00:00
|
|
|
if (preOptBlockCount >= unsigned(FInt::CodegenHeuristicsBlockLimit.value))
|
2024-02-16 02:04:39 +00:00
|
|
|
{
|
|
|
|
codeGenCompilationResult = CodeGenCompilationResult::CodeGenOverflowBlockLimit;
|
2023-10-21 02:10:30 +01:00
|
|
|
return false;
|
2024-02-16 02:04:39 +00:00
|
|
|
}
|
2023-10-21 02:10:30 +01:00
|
|
|
|
|
|
|
if (maxBlockInstructions >= unsigned(FInt::CodegenHeuristicsBlockInstructionLimit.value))
|
2024-02-16 02:04:39 +00:00
|
|
|
{
|
|
|
|
codeGenCompilationResult = CodeGenCompilationResult::CodeGenOverflowBlockInstructionLimit;
|
2023-10-21 02:10:30 +01:00
|
|
|
return false;
|
2024-02-16 02:04:39 +00:00
|
|
|
}
|
2023-10-21 02:10:30 +01:00
|
|
|
|
2023-06-24 07:19:39 +01:00
|
|
|
computeCfgInfo(ir.function);
|
|
|
|
|
|
|
|
if (!FFlag::DebugCodegenNoOpt)
|
|
|
|
{
|
|
|
|
bool useValueNumbering = !FFlag::DebugCodegenSkipNumbering;
|
|
|
|
|
|
|
|
constPropInBlockChains(ir, useValueNumbering);
|
|
|
|
|
|
|
|
if (!FFlag::DebugCodegenOptSize)
|
2023-11-03 23:45:04 +00:00
|
|
|
{
|
|
|
|
double startTime = 0.0;
|
|
|
|
unsigned constPropInstructionCount = 0;
|
|
|
|
|
|
|
|
if (stats)
|
|
|
|
{
|
|
|
|
constPropInstructionCount = getInstructionCount(ir.function.instructions, IrCmd::SUBSTITUTE);
|
|
|
|
startTime = lua_clock();
|
|
|
|
}
|
|
|
|
|
2023-06-24 07:19:39 +01:00
|
|
|
createLinearBlocks(ir, useValueNumbering);
|
2023-11-03 23:45:04 +00:00
|
|
|
|
|
|
|
if (stats)
|
|
|
|
{
|
|
|
|
stats->blockLinearizationStats.timeSeconds += lua_clock() - startTime;
|
|
|
|
constPropInstructionCount = getInstructionCount(ir.function.instructions, IrCmd::SUBSTITUTE) - constPropInstructionCount;
|
|
|
|
stats->blockLinearizationStats.constPropInstructionCount += constPropInstructionCount;
|
|
|
|
}
|
|
|
|
}
|
2024-03-09 00:47:53 +00:00
|
|
|
|
2024-06-21 00:37:55 +01:00
|
|
|
markDeadStoresInBlockChains(ir);
|
2023-06-24 07:19:39 +01:00
|
|
|
}
|
|
|
|
|
2023-10-13 21:20:12 +01:00
|
|
|
std::vector<uint32_t> sortedBlocks = getSortedBlockOrder(ir.function);
|
|
|
|
|
|
|
|
// In order to allocate registers during lowering, we need to know where instruction results are last used
|
|
|
|
updateLastUseLocations(ir.function, sortedBlocks);
|
|
|
|
|
2023-10-21 02:10:30 +01:00
|
|
|
if (stats)
|
|
|
|
{
|
|
|
|
for (const IrBlock& block : ir.function.blocks)
|
|
|
|
{
|
|
|
|
if (block.kind != IrBlockKind::Dead)
|
|
|
|
++stats->blocksPostOpt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-16 02:04:39 +00:00
|
|
|
bool result = lowerIr(build, ir, sortedBlocks, helpers, proto, options, stats);
|
|
|
|
|
|
|
|
if (!result)
|
|
|
|
codeGenCompilationResult = CodeGenCompilationResult::CodeGenLoweringFailure;
|
|
|
|
|
|
|
|
return result;
|
2023-06-24 07:19:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace CodeGen
|
|
|
|
} // namespace Luau
|