2022-10-14 20:48:41 +01:00
|
|
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
|
|
|
#include "Luau/CodeGen.h"
|
|
|
|
|
|
|
|
#include "Luau/Common.h"
|
|
|
|
#include "Luau/CodeAllocator.h"
|
|
|
|
#include "Luau/CodeBlockUnwind.h"
|
Sync to upstream/release/562 (#828)
* Fixed rare use-after-free in analysis during table unification
A lot of work these past months went into two new Luau components:
* A near full rewrite of the typechecker using a new deferred constraint
resolution system
* Native code generation for AoT/JiT compilation of VM bytecode into x64
(avx)/arm64 instructions
Both of these components are far from finished and we don't provide
documentation on building and using them at this point.
However, curious community members expressed interest in learning about
changes that go into these components each week, so we are now listing
them here in the 'sync' pull request descriptions.
---
New typechecker can be enabled by setting
DebugLuauDeferredConstraintResolution flag to 'true'.
It is considered unstable right now, so try it at your own risk.
Even though it already provides better type inference than the current
one in some cases, our main goal right now is to reach feature parity
with current typechecker.
Features which improve over the capabilities of the current typechecker
are marked as '(NEW)'.
Changes to new typechecker:
* Regular for loop index and parameters are now typechecked
* Invalid type annotations on local variables are ignored to improve
autocomplete
* Fixed missing autocomplete type suggestions for function arguments
* Type reduction is now performed to produce simpler types to be
presented to the user (error messages, custom LSPs)
* Internally, complex types like '((number | string) & ~(false?)) |
string' can be produced, which is just 'string | number' when simplified
* Fixed spots where support for unknown and never types was missing
* (NEW) Length operator '#' is now valid to use on top table type, this
type comes up when doing typeof(x) == "table" guards and isn't available
in current typechecker
---
Changes to native code generation:
* Additional math library fast calls are now lowered to x64: math.ldexp,
math.round, math.frexp, math.modf, math.sign and math.clamp
2023-02-03 19:26:13 +00:00
|
|
|
#include "Luau/IrAnalysis.h"
|
|
|
|
#include "Luau/IrBuilder.h"
|
2023-03-24 18:03:04 +00:00
|
|
|
#include "Luau/IrDump.h"
|
|
|
|
#include "Luau/IrUtils.h"
|
2023-02-24 21:49:38 +00:00
|
|
|
#include "Luau/OptimizeConstProp.h"
|
2023-02-10 19:40:38 +00:00
|
|
|
#include "Luau/OptimizeFinalX64.h"
|
2023-03-17 19:20:37 +00:00
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
#include "Luau/UnwindBuilder.h"
|
|
|
|
#include "Luau/UnwindBuilderDwarf2.h"
|
|
|
|
#include "Luau/UnwindBuilderWin.h"
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
#include "Luau/AssemblyBuilderA64.h"
|
2023-03-24 18:03:04 +00:00
|
|
|
#include "Luau/AssemblyBuilderX64.h"
|
2023-03-17 19:20:37 +00:00
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
#include "CustomExecUtils.h"
|
2023-03-24 18:03:04 +00:00
|
|
|
#include "NativeState.h"
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
#include "CodeGenA64.h"
|
2023-03-24 18:03:04 +00:00
|
|
|
#include "EmitCommonA64.h"
|
|
|
|
#include "IrLoweringA64.h"
|
|
|
|
|
|
|
|
#include "CodeGenX64.h"
|
2022-10-14 20:48:41 +01:00
|
|
|
#include "EmitCommonX64.h"
|
|
|
|
#include "EmitInstructionX64.h"
|
2023-01-27 22:28:31 +00:00
|
|
|
#include "IrLoweringX64.h"
|
2022-10-14 20:48:41 +01:00
|
|
|
|
|
|
|
#include "lapi.h"
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
#include <algorithm>
|
2022-10-14 20:48:41 +01:00
|
|
|
#include <memory>
|
|
|
|
|
|
|
|
#if defined(__x86_64__) || defined(_M_X64)
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
#include <intrin.h> // __cpuid
|
|
|
|
#else
|
|
|
|
#include <cpuid.h> // __cpuid
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
#if defined(__aarch64__)
|
|
|
|
#ifdef __APPLE__
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
LUAU_FASTFLAGVARIABLE(DebugCodegenNoOpt, false)
|
2023-04-21 23:14:26 +01:00
|
|
|
LUAU_FASTFLAGVARIABLE(DebugCodegenOptSize, false)
|
2023-04-28 20:55:13 +01:00
|
|
|
LUAU_FASTFLAGVARIABLE(DebugCodegenSkipNumbering, false)
|
2023-01-27 22:28:31 +00:00
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
namespace Luau
|
|
|
|
{
|
|
|
|
namespace CodeGen
|
|
|
|
{
|
|
|
|
|
2023-05-12 18:50:47 +01:00
|
|
|
static void* gPerfLogContext = nullptr;
|
|
|
|
static PerfLogFn gPerfLogFn = nullptr;
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
static NativeProto* createNativeProto(Proto* proto, const IrBuilder& ir)
|
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
int sizecode = proto->sizecode;
|
|
|
|
int sizecodeAlloc = (sizecode + 1) & ~1; // align uint32_t array to 8 bytes so that NativeProto is aligned to 8 bytes
|
2023-03-17 19:20:37 +00:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
void* memory = ::operator new(sizeof(NativeProto) + sizecodeAlloc * sizeof(uint32_t));
|
|
|
|
NativeProto* result = new (static_cast<char*>(memory) + sizecodeAlloc * sizeof(uint32_t)) NativeProto;
|
2023-03-17 19:20:37 +00:00
|
|
|
result->proto = proto;
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
uint32_t* instOffsets = result->instOffsets;
|
2023-03-17 19:20:37 +00:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
for (int i = 0; i < sizecode; i++)
|
|
|
|
{
|
|
|
|
// instOffsets uses negative indexing for optimal codegen for RETURN opcode
|
|
|
|
instOffsets[-i] = ir.function.bcMapping[i].asmLocation;
|
2023-03-17 19:20:37 +00:00
|
|
|
}
|
2022-10-21 18:54:01 +01:00
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
static void destroyNativeProto(NativeProto* nativeProto)
|
|
|
|
{
|
|
|
|
int sizecode = nativeProto->proto->sizecode;
|
|
|
|
int sizecodeAlloc = (sizecode + 1) & ~1; // align uint32_t array to 8 bytes so that NativeProto is aligned to 8 bytes
|
|
|
|
void* memory = reinterpret_cast<char*>(nativeProto) - sizecodeAlloc * sizeof(uint32_t);
|
|
|
|
|
|
|
|
::operator delete(memory);
|
|
|
|
}
|
|
|
|
|
2023-05-12 18:50:47 +01:00
|
|
|
static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(p->source);
|
|
|
|
|
|
|
|
const char* source = getstr(p->source);
|
|
|
|
source = (source[0] == '=' || source[0] == '@') ? source + 1 : "[string]";
|
|
|
|
|
|
|
|
char name[256];
|
|
|
|
snprintf(name, sizeof(name), "<luau> %s:%d %s", source, p->linedefined, p->debugname ? getstr(p->debugname) : "");
|
|
|
|
|
|
|
|
if (gPerfLogFn)
|
|
|
|
gPerfLogFn(gPerfLogContext, addr, size, name);
|
|
|
|
}
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
template<typename AssemblyBuilder, typename IrLowering>
|
2023-04-07 22:01:29 +01:00
|
|
|
static bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& function, int bytecodeid, AssemblyOptions options)
|
2023-03-24 18:03:04 +00:00
|
|
|
{
|
|
|
|
// While we will need a better block ordering in the future, right now we want to mostly preserve build order with fallbacks outlined
|
|
|
|
std::vector<uint32_t> sortedBlocks;
|
|
|
|
sortedBlocks.reserve(function.blocks.size());
|
|
|
|
for (uint32_t i = 0; i < function.blocks.size(); i++)
|
|
|
|
sortedBlocks.push_back(i);
|
|
|
|
|
|
|
|
std::sort(sortedBlocks.begin(), sortedBlocks.end(), [&](uint32_t idxA, uint32_t idxB) {
|
|
|
|
const IrBlock& a = function.blocks[idxA];
|
|
|
|
const IrBlock& b = function.blocks[idxB];
|
|
|
|
|
|
|
|
// Place fallback blocks at the end
|
|
|
|
if ((a.kind == IrBlockKind::Fallback) != (b.kind == IrBlockKind::Fallback))
|
|
|
|
return (a.kind == IrBlockKind::Fallback) < (b.kind == IrBlockKind::Fallback);
|
|
|
|
|
|
|
|
// Try to order by instruction order
|
|
|
|
return a.start < b.start;
|
|
|
|
});
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
// For each IR instruction that begins a bytecode instruction, which bytecode instruction is it?
|
|
|
|
std::vector<uint32_t> bcLocations(function.instructions.size() + 1, ~0u);
|
2023-03-24 18:03:04 +00:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
for (size_t i = 0; i < function.bcMapping.size(); ++i)
|
2023-03-24 18:03:04 +00:00
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
uint32_t irLocation = function.bcMapping[i].irLocation;
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
if (irLocation != ~0u)
|
2023-04-28 20:55:13 +01:00
|
|
|
bcLocations[irLocation] = uint32_t(i);
|
2023-03-24 18:03:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool outputEnabled = options.includeAssembly || options.includeIr;
|
|
|
|
|
|
|
|
IrToStringContext ctx{build.text, function.blocks, function.constants, function.cfg};
|
|
|
|
|
|
|
|
// We use this to skip outlined fallback blocks from IR/asm text output
|
|
|
|
size_t textSize = build.text.length();
|
|
|
|
uint32_t codeSize = build.getCodeSize();
|
|
|
|
bool seenFallback = false;
|
|
|
|
|
|
|
|
IrBlock dummy;
|
|
|
|
dummy.start = ~0u;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < sortedBlocks.size(); ++i)
|
|
|
|
{
|
|
|
|
uint32_t blockIndex = sortedBlocks[i];
|
|
|
|
IrBlock& block = function.blocks[blockIndex];
|
|
|
|
|
|
|
|
if (block.kind == IrBlockKind::Dead)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
LUAU_ASSERT(block.start != ~0u);
|
|
|
|
LUAU_ASSERT(block.finish != ~0u);
|
|
|
|
|
|
|
|
// If we want to skip fallback code IR/asm, we'll record when those blocks start once we see them
|
|
|
|
if (block.kind == IrBlockKind::Fallback && !seenFallback)
|
|
|
|
{
|
|
|
|
textSize = build.text.length();
|
|
|
|
codeSize = build.getCodeSize();
|
|
|
|
seenFallback = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (options.includeIr)
|
|
|
|
{
|
|
|
|
build.logAppend("# ");
|
|
|
|
toStringDetailed(ctx, block, blockIndex, /* includeUseInfo */ true);
|
|
|
|
}
|
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
// Values can only reference restore operands in the current block
|
|
|
|
function.validRestoreOpBlockIdx = blockIndex;
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
build.setLabel(block.label);
|
|
|
|
|
|
|
|
for (uint32_t index = block.start; index <= block.finish; index++)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(index < function.instructions.size());
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
uint32_t bcLocation = bcLocations[index];
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
// If IR instruction is the first one for the original bytecode, we can annotate it with source code text
|
2023-04-28 20:55:13 +01:00
|
|
|
if (outputEnabled && options.annotator && bcLocation != ~0u)
|
2023-03-24 18:03:04 +00:00
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
options.annotator(options.annotatorContext, build.text, bytecodeid, bcLocation);
|
2023-03-24 18:03:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If bytecode needs the location of this instruction for jumps, record it
|
2023-04-28 20:55:13 +01:00
|
|
|
if (bcLocation != ~0u)
|
2023-03-24 18:03:04 +00:00
|
|
|
{
|
|
|
|
Label label = (index == block.start) ? block.label : build.setLabel();
|
2023-04-28 20:55:13 +01:00
|
|
|
function.bcMapping[bcLocation].asmLocation = build.getLabelOffset(label);
|
2023-03-24 18:03:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrInst& inst = function.instructions[index];
|
|
|
|
|
|
|
|
// Skip pseudo instructions, but make sure they are not used at this stage
|
|
|
|
// This also prevents them from getting into text output when that's enabled
|
|
|
|
if (isPseudo(inst.cmd))
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(inst.useCount == 0);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2023-05-05 22:52:49 +01:00
|
|
|
// Either instruction result value is not referenced or the use count is not zero
|
|
|
|
LUAU_ASSERT(inst.lastUse == 0 || inst.useCount != 0);
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
if (options.includeIr)
|
|
|
|
{
|
|
|
|
build.logAppend("# ");
|
2023-05-05 22:52:49 +01:00
|
|
|
toStringDetailed(ctx, block, blockIndex, inst, index, /* includeUseInfo */ true);
|
2023-03-24 18:03:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
IrBlock& next = i + 1 < sortedBlocks.size() ? function.blocks[sortedBlocks[i + 1]] : dummy;
|
|
|
|
|
|
|
|
lowering.lowerInst(inst, index, next);
|
2023-04-07 22:01:29 +01:00
|
|
|
|
|
|
|
if (lowering.hasError())
|
2023-04-14 19:06:22 +01:00
|
|
|
{
|
|
|
|
// Place labels for all blocks that we're skipping
|
|
|
|
// This is needed to avoid AssemblyBuilder assertions about jumps in earlier blocks with unplaced labels
|
|
|
|
for (size_t j = i + 1; j < sortedBlocks.size(); ++j)
|
|
|
|
{
|
|
|
|
IrBlock& abandoned = function.blocks[sortedBlocks[j]];
|
|
|
|
|
|
|
|
build.setLabel(abandoned.label);
|
|
|
|
}
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
return false;
|
2023-04-14 19:06:22 +01:00
|
|
|
}
|
2023-03-24 18:03:04 +00:00
|
|
|
}
|
|
|
|
|
2023-04-21 23:14:26 +01:00
|
|
|
lowering.finishBlock();
|
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
if (options.includeIr)
|
|
|
|
build.logAppend("#\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (outputEnabled && !options.includeOutlinedCode && seenFallback)
|
|
|
|
{
|
|
|
|
build.text.resize(textSize);
|
|
|
|
|
|
|
|
if (options.includeAssembly)
|
|
|
|
build.logAppend("; skipping %u bytes of outlined code\n", unsigned((build.getCodeSize() - codeSize) * sizeof(build.code[0])));
|
|
|
|
}
|
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
return true;
|
2023-03-24 18:03:04 +00:00
|
|
|
}
|
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
[[maybe_unused]] static bool lowerIr(
|
2023-03-17 19:20:37 +00:00
|
|
|
X64::AssemblyBuilderX64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
|
2022-10-28 11:37:29 +01:00
|
|
|
{
|
2023-03-17 19:20:37 +00:00
|
|
|
optimizeMemoryOperandsX64(ir.function);
|
2022-11-04 17:33:22 +00:00
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
X64::IrLoweringX64 lowering(build, helpers, data, ir.function);
|
2023-03-17 19:20:37 +00:00
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
return lowerImpl(build, lowering, ir.function, proto->bytecodeid, options);
|
2022-10-28 11:37:29 +01:00
|
|
|
}
|
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
[[maybe_unused]] static bool lowerIr(
|
2023-03-17 19:20:37 +00:00
|
|
|
A64::AssemblyBuilderA64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
|
2022-10-14 20:48:41 +01:00
|
|
|
{
|
2023-03-31 19:42:49 +01:00
|
|
|
A64::IrLoweringA64 lowering(build, helpers, data, proto, ir.function);
|
2022-10-14 20:48:41 +01:00
|
|
|
|
2023-04-07 22:01:29 +01:00
|
|
|
return lowerImpl(build, lowering, ir.function, proto->bytecodeid, options);
|
2023-03-17 19:20:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template<typename AssemblyBuilder>
|
|
|
|
static NativeProto* assembleFunction(AssemblyBuilder& build, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
|
|
|
|
{
|
2023-01-27 22:28:31 +00:00
|
|
|
if (options.includeAssembly || options.includeIr)
|
2022-10-14 20:48:41 +01:00
|
|
|
{
|
|
|
|
if (proto->debugname)
|
2023-03-10 20:21:07 +00:00
|
|
|
build.logAppend("; function %s(", getstr(proto->debugname));
|
2022-10-14 20:48:41 +01:00
|
|
|
else
|
2023-03-10 20:21:07 +00:00
|
|
|
build.logAppend("; function(");
|
|
|
|
|
|
|
|
for (int i = 0; i < proto->numparams; i++)
|
|
|
|
{
|
|
|
|
LocVar* var = proto->locvars ? &proto->locvars[proto->sizelocvars - proto->numparams + i] : nullptr;
|
|
|
|
|
|
|
|
if (var && var->varname)
|
|
|
|
build.logAppend("%s%s", i == 0 ? "" : ", ", getstr(var->varname));
|
|
|
|
else
|
|
|
|
build.logAppend("%s$arg%d", i == 0 ? "" : ", ", i);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (proto->numparams != 0 && proto->is_vararg)
|
|
|
|
build.logAppend(", ...)");
|
|
|
|
else
|
|
|
|
build.logAppend(")");
|
2022-10-14 20:48:41 +01:00
|
|
|
|
|
|
|
if (proto->linedefined >= 0)
|
|
|
|
build.logAppend(" line %d\n", proto->linedefined);
|
|
|
|
else
|
|
|
|
build.logAppend("\n");
|
|
|
|
}
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
IrBuilder ir;
|
|
|
|
ir.buildFunctionIr(proto);
|
2022-10-21 18:54:01 +01:00
|
|
|
|
2023-03-24 18:03:04 +00:00
|
|
|
computeCfgInfo(ir.function);
|
|
|
|
|
2023-02-24 21:49:38 +00:00
|
|
|
if (!FFlag::DebugCodegenNoOpt)
|
2022-10-28 11:37:29 +01:00
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
bool useValueNumbering = !FFlag::DebugCodegenSkipNumbering;
|
|
|
|
|
|
|
|
constPropInBlockChains(ir, useValueNumbering);
|
2023-04-21 23:14:26 +01:00
|
|
|
|
|
|
|
if (!FFlag::DebugCodegenOptSize)
|
2023-04-28 20:55:13 +01:00
|
|
|
createLinearBlocks(ir, useValueNumbering);
|
2022-10-28 11:37:29 +01:00
|
|
|
}
|
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
if (!lowerIr(build, ir, data, helpers, proto, options))
|
|
|
|
{
|
|
|
|
if (build.logText)
|
|
|
|
build.logAppend("; skipping (can't lower)\n\n");
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
2022-10-14 20:48:41 +01:00
|
|
|
|
|
|
|
if (build.logText)
|
|
|
|
build.logAppend("\n");
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
return createNativeProto(proto, ir);
|
2022-10-14 20:48:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void onCloseState(lua_State* L)
|
|
|
|
{
|
|
|
|
destroyNativeState(L);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void onDestroyFunction(lua_State* L, Proto* proto)
|
|
|
|
{
|
|
|
|
NativeProto* nativeProto = getProtoExecData(proto);
|
|
|
|
LUAU_ASSERT(nativeProto->proto == proto);
|
|
|
|
|
|
|
|
setProtoExecData(proto, nullptr);
|
|
|
|
destroyNativeProto(nativeProto);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int onEnter(lua_State* L, Proto* proto)
|
|
|
|
{
|
|
|
|
NativeState* data = getNativeState(L);
|
|
|
|
NativeProto* nativeProto = getProtoExecData(proto);
|
2023-04-28 20:55:13 +01:00
|
|
|
|
2023-05-12 18:50:47 +01:00
|
|
|
LUAU_ASSERT(nativeProto);
|
|
|
|
LUAU_ASSERT(L->ci->savedpc);
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
// instOffsets uses negative indexing for optimal codegen for RETURN opcode
|
2023-05-12 18:50:47 +01:00
|
|
|
uintptr_t target = nativeProto->instBase + nativeProto->instOffsets[proto->code - L->ci->savedpc];
|
2022-10-14 20:48:41 +01:00
|
|
|
|
|
|
|
// Returns 1 to finish the function in the VM
|
2023-05-12 18:50:47 +01:00
|
|
|
return GateFn(data->context.gateEntry)(L, proto, target, &data->context);
|
2022-10-14 20:48:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void onSetBreakpoint(lua_State* L, Proto* proto, int instruction)
|
|
|
|
{
|
|
|
|
if (!getProtoExecData(proto))
|
|
|
|
return;
|
|
|
|
|
|
|
|
LUAU_ASSERT(!"native breakpoints are not implemented");
|
|
|
|
}
|
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
#if defined(__aarch64__)
|
|
|
|
static unsigned int getCpuFeaturesA64()
|
|
|
|
{
|
|
|
|
unsigned int result = 0;
|
|
|
|
|
|
|
|
#ifdef __APPLE__
|
|
|
|
int jscvt = 0;
|
|
|
|
size_t jscvtLen = sizeof(jscvt);
|
|
|
|
if (sysctlbyname("hw.optional.arm.FEAT_JSCVT", &jscvt, &jscvtLen, nullptr, 0) == 0 && jscvt == 1)
|
|
|
|
result |= A64::Feature_JSCVT;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
bool isSupported()
|
|
|
|
{
|
2023-05-12 18:50:47 +01:00
|
|
|
if (!LUA_CUSTOM_EXECUTION)
|
|
|
|
return false;
|
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
if (LUA_EXTRA_SIZE != 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (sizeof(TValue) != 16)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (sizeof(LuaNode) != 32)
|
|
|
|
return false;
|
|
|
|
|
2023-05-12 18:50:47 +01:00
|
|
|
// Windows CRT uses stack unwinding in longjmp so we have to use unwind data; on other platforms, it's only necessary for C++ EH.
|
|
|
|
#if defined(_WIN32)
|
|
|
|
if (!isUnwindSupported())
|
|
|
|
return false;
|
|
|
|
#else
|
|
|
|
if (!LUA_USE_LONGJMP && !isUnwindSupported())
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(__x86_64__) || defined(_M_X64)
|
2022-10-14 20:48:41 +01:00
|
|
|
int cpuinfo[4] = {};
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
__cpuid(cpuinfo, 1);
|
|
|
|
#else
|
|
|
|
__cpuid(1, cpuinfo[0], cpuinfo[1], cpuinfo[2], cpuinfo[3]);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// We require AVX1 support for VEX encoded XMM operations
|
|
|
|
// We also requre SSE4.1 support for ROUNDSD but the AVX check below covers it
|
|
|
|
// https://en.wikipedia.org/wiki/CPUID#EAX=1:_Processor_Info_and_Feature_Bits
|
|
|
|
if ((cpuinfo[2] & (1 << 28)) == 0)
|
|
|
|
return false;
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
return true;
|
|
|
|
#elif defined(__aarch64__)
|
2023-03-31 19:42:49 +01:00
|
|
|
return true;
|
2022-10-14 20:48:41 +01:00
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void create(lua_State* L)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(isSupported());
|
|
|
|
|
|
|
|
NativeState& data = *createNativeState(L);
|
|
|
|
|
|
|
|
#if defined(_WIN32)
|
|
|
|
data.unwindBuilder = std::make_unique<UnwindBuilderWin>();
|
|
|
|
#else
|
|
|
|
data.unwindBuilder = std::make_unique<UnwindBuilderDwarf2>();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
data.codeAllocator.context = data.unwindBuilder.get();
|
|
|
|
data.codeAllocator.createBlockUnwindInfo = createBlockUnwindInfo;
|
|
|
|
data.codeAllocator.destroyBlockUnwindInfo = destroyBlockUnwindInfo;
|
|
|
|
|
|
|
|
initFallbackTable(data);
|
|
|
|
initHelperFunctions(data);
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
#if defined(__x86_64__) || defined(_M_X64)
|
2023-04-14 19:06:22 +01:00
|
|
|
if (!X64::initHeaderFunctions(data))
|
2022-10-14 20:48:41 +01:00
|
|
|
{
|
|
|
|
destroyNativeState(L);
|
|
|
|
return;
|
|
|
|
}
|
2023-03-17 19:20:37 +00:00
|
|
|
#elif defined(__aarch64__)
|
2023-04-14 19:06:22 +01:00
|
|
|
if (!A64::initHeaderFunctions(data))
|
2023-03-17 19:20:37 +00:00
|
|
|
{
|
|
|
|
destroyNativeState(L);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
2022-10-14 20:48:41 +01:00
|
|
|
|
2023-05-12 18:50:47 +01:00
|
|
|
if (gPerfLogFn)
|
|
|
|
gPerfLogFn(gPerfLogContext, uintptr_t(data.context.gateEntry), 4096, "<luau gate>");
|
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
lua_ExecutionCallbacks* ecb = getExecutionCallbacks(L);
|
|
|
|
|
|
|
|
ecb->close = onCloseState;
|
|
|
|
ecb->destroy = onDestroyFunction;
|
|
|
|
ecb->enter = onEnter;
|
|
|
|
ecb->setbreakpoint = onSetBreakpoint;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gatherFunctions(std::vector<Proto*>& results, Proto* proto)
|
|
|
|
{
|
|
|
|
if (results.size() <= size_t(proto->bytecodeid))
|
|
|
|
results.resize(proto->bytecodeid + 1);
|
|
|
|
|
|
|
|
// Skip protos that we've already compiled in this run: this happens because at -O2, inlined functions get their protos reused
|
|
|
|
if (results[proto->bytecodeid])
|
|
|
|
return;
|
|
|
|
|
|
|
|
results[proto->bytecodeid] = proto;
|
|
|
|
|
|
|
|
for (int i = 0; i < proto->sizep; i++)
|
|
|
|
gatherFunctions(results, proto->p[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void compile(lua_State* L, int idx)
|
|
|
|
{
|
|
|
|
LUAU_ASSERT(lua_isLfunction(L, idx));
|
|
|
|
const TValue* func = luaA_toobject(L, idx);
|
|
|
|
|
|
|
|
// If initialization has failed, do not compile any functions
|
|
|
|
if (!getNativeState(L))
|
|
|
|
return;
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
#if defined(__aarch64__)
|
2023-03-31 19:42:49 +01:00
|
|
|
A64::AssemblyBuilderA64 build(/* logText= */ false, getCpuFeaturesA64());
|
2023-03-17 19:20:37 +00:00
|
|
|
#else
|
2023-03-03 20:21:14 +00:00
|
|
|
X64::AssemblyBuilderX64 build(/* logText= */ false);
|
2023-03-17 19:20:37 +00:00
|
|
|
#endif
|
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
NativeState* data = getNativeState(L);
|
|
|
|
|
|
|
|
std::vector<Proto*> protos;
|
|
|
|
gatherFunctions(protos, clvalue(func)->l.p);
|
|
|
|
|
2022-10-28 11:37:29 +01:00
|
|
|
ModuleHelpers helpers;
|
2023-03-24 18:03:04 +00:00
|
|
|
#if defined(__aarch64__)
|
|
|
|
A64::assembleHelpers(build, helpers);
|
|
|
|
#else
|
2023-03-17 19:20:37 +00:00
|
|
|
X64::assembleHelpers(build, helpers);
|
|
|
|
#endif
|
2022-10-28 11:37:29 +01:00
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
std::vector<NativeProto*> results;
|
|
|
|
results.reserve(protos.size());
|
|
|
|
|
|
|
|
// Skip protos that have been compiled during previous invocations of CodeGen::compile
|
|
|
|
for (Proto* p : protos)
|
|
|
|
if (p && getProtoExecData(p) == nullptr)
|
2023-03-31 19:42:49 +01:00
|
|
|
if (NativeProto* np = assembleFunction(build, *data, helpers, p, {}))
|
|
|
|
results.push_back(np);
|
2022-10-14 20:48:41 +01:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
// Very large modules might result in overflowing a jump offset; in this case we currently abandon the entire module
|
|
|
|
if (!build.finalize())
|
|
|
|
{
|
|
|
|
for (NativeProto* result : results)
|
|
|
|
destroyNativeProto(result);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2022-10-14 20:48:41 +01:00
|
|
|
|
2023-03-31 19:42:49 +01:00
|
|
|
// If no functions were assembled, we don't need to allocate/copy executable pages for helpers
|
|
|
|
if (results.empty())
|
|
|
|
return;
|
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
uint8_t* nativeData = nullptr;
|
|
|
|
size_t sizeNativeData = 0;
|
|
|
|
uint8_t* codeStart = nullptr;
|
2023-03-17 19:20:37 +00:00
|
|
|
if (!data->codeAllocator.allocate(build.data.data(), int(build.data.size()), reinterpret_cast<const uint8_t*>(build.code.data()),
|
|
|
|
int(build.code.size() * sizeof(build.code[0])), nativeData, sizeNativeData, codeStart))
|
2022-10-14 20:48:41 +01:00
|
|
|
{
|
|
|
|
for (NativeProto* result : results)
|
|
|
|
destroyNativeProto(result);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-05-12 18:50:47 +01:00
|
|
|
if (gPerfLogFn && results.size() > 0)
|
|
|
|
{
|
|
|
|
gPerfLogFn(gPerfLogContext, uintptr_t(codeStart), results[0]->instOffsets[0], "<luau helpers>");
|
|
|
|
|
|
|
|
for (size_t i = 0; i < results.size(); ++i)
|
|
|
|
{
|
|
|
|
uint32_t begin = results[i]->instOffsets[0];
|
|
|
|
uint32_t end = i + 1 < results.size() ? results[i + 1]->instOffsets[0] : uint32_t(build.code.size() * sizeof(build.code[0]));
|
|
|
|
LUAU_ASSERT(begin < end);
|
|
|
|
|
|
|
|
logPerfFunction(results[i]->proto, uintptr_t(codeStart) + begin, end - begin);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
// Record instruction base address; at runtime, instOffsets[] will be used as offsets from instBase
|
2022-10-14 20:48:41 +01:00
|
|
|
for (NativeProto* result : results)
|
|
|
|
{
|
2023-04-28 20:55:13 +01:00
|
|
|
result->instBase = uintptr_t(codeStart);
|
|
|
|
result->entryTarget = uintptr_t(codeStart) + result->instOffsets[0];
|
2022-10-14 20:48:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Link native proto objects to Proto; the memory is now managed by VM and will be freed via onDestroyFunction
|
|
|
|
for (NativeProto* result : results)
|
|
|
|
setProtoExecData(result->proto, result);
|
|
|
|
}
|
|
|
|
|
2022-10-21 18:54:01 +01:00
|
|
|
std::string getAssembly(lua_State* L, int idx, AssemblyOptions options)
|
2022-10-14 20:48:41 +01:00
|
|
|
{
|
|
|
|
LUAU_ASSERT(lua_isLfunction(L, idx));
|
|
|
|
const TValue* func = luaA_toobject(L, idx);
|
|
|
|
|
2023-03-17 19:20:37 +00:00
|
|
|
#if defined(__aarch64__)
|
2023-03-31 19:42:49 +01:00
|
|
|
A64::AssemblyBuilderA64 build(/* logText= */ options.includeAssembly, getCpuFeaturesA64());
|
2023-03-17 19:20:37 +00:00
|
|
|
#else
|
2023-03-03 20:21:14 +00:00
|
|
|
X64::AssemblyBuilderX64 build(/* logText= */ options.includeAssembly);
|
2023-03-17 19:20:37 +00:00
|
|
|
#endif
|
2022-10-21 18:54:01 +01:00
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
NativeState data;
|
|
|
|
initFallbackTable(data);
|
|
|
|
|
|
|
|
std::vector<Proto*> protos;
|
|
|
|
gatherFunctions(protos, clvalue(func)->l.p);
|
|
|
|
|
2022-10-28 11:37:29 +01:00
|
|
|
ModuleHelpers helpers;
|
2023-03-24 18:03:04 +00:00
|
|
|
#if defined(__aarch64__)
|
|
|
|
A64::assembleHelpers(build, helpers);
|
|
|
|
#else
|
2023-03-17 19:20:37 +00:00
|
|
|
X64::assembleHelpers(build, helpers);
|
|
|
|
#endif
|
2022-10-28 11:37:29 +01:00
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
for (Proto* p : protos)
|
|
|
|
if (p)
|
2023-03-31 19:42:49 +01:00
|
|
|
if (NativeProto* np = assembleFunction(build, data, helpers, p, options))
|
|
|
|
destroyNativeProto(np);
|
2022-10-14 20:48:41 +01:00
|
|
|
|
2023-04-28 20:55:13 +01:00
|
|
|
if (!build.finalize())
|
|
|
|
return std::string();
|
2022-10-14 20:48:41 +01:00
|
|
|
|
2022-10-21 18:54:01 +01:00
|
|
|
if (options.outputBinary)
|
2023-03-24 18:03:04 +00:00
|
|
|
return std::string(reinterpret_cast<const char*>(build.code.data()), reinterpret_cast<const char*>(build.code.data() + build.code.size())) +
|
2023-03-17 19:20:37 +00:00
|
|
|
std::string(build.data.begin(), build.data.end());
|
2022-10-21 18:54:01 +01:00
|
|
|
else
|
|
|
|
return build.text;
|
2022-10-14 20:48:41 +01:00
|
|
|
}
|
|
|
|
|
2023-05-12 18:50:47 +01:00
|
|
|
void setPerfLog(void* context, PerfLogFn logFn)
|
|
|
|
{
|
|
|
|
gPerfLogContext = context;
|
|
|
|
gPerfLogFn = logFn;
|
|
|
|
}
|
|
|
|
|
2022-10-14 20:48:41 +01:00
|
|
|
} // namespace CodeGen
|
|
|
|
} // namespace Luau
|