2024-03-30 22:49:03 +00:00
|
|
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
|
|
|
#include "CodeGenContext.h"
|
|
|
|
|
|
|
|
#include "CodeGenA64.h"
|
2024-04-05 18:41:05 +01:00
|
|
|
#include "CodeGenLower.h"
|
2024-03-30 22:49:03 +00:00
|
|
|
#include "CodeGenX64.h"
|
|
|
|
|
|
|
|
#include "Luau/CodeBlockUnwind.h"
|
|
|
|
#include "Luau/UnwindBuilder.h"
|
|
|
|
#include "Luau/UnwindBuilderDwarf2.h"
|
|
|
|
#include "Luau/UnwindBuilderWin.h"
|
|
|
|
|
2024-04-05 18:41:05 +01:00
|
|
|
#include "lapi.h"
|
|
|
|
|
2024-04-25 21:57:23 +01:00
|
|
|
LUAU_FASTFLAGVARIABLE(LuauCodegenCheckNullContext, false)
|
2024-04-12 11:44:40 +01:00
|
|
|
|
2024-06-07 18:09:03 +01:00
|
|
|
LUAU_FASTINTVARIABLE(LuauCodeGenBlockSize, 4 * 1024 * 1024)
|
|
|
|
LUAU_FASTINTVARIABLE(LuauCodeGenMaxTotalSize, 256 * 1024 * 1024)
|
2024-06-14 17:38:56 +01:00
|
|
|
LUAU_FASTFLAG(LuauNativeAttribute)
|
2024-03-30 22:49:03 +00:00
|
|
|
|
|
|
|
namespace Luau
|
|
|
|
{
|
|
|
|
namespace CodeGen
|
|
|
|
{
|
|
|
|
|
2024-04-05 18:41:05 +01:00
|
|
|
static const Instruction kCodeEntryInsn = LOP_NATIVECALL;
|
|
|
|
|
2024-03-30 22:49:03 +00:00
|
|
|
// From CodeGen.cpp
|
2024-05-26 16:33:40 +01:00
|
|
|
static void* gPerfLogContext = nullptr;
|
|
|
|
static PerfLogFn gPerfLogFn = nullptr;
|
2024-03-30 22:49:03 +00:00
|
|
|
|
2024-04-05 18:41:05 +01:00
|
|
|
unsigned int getCpuFeaturesA64();
|
|
|
|
|
2024-05-26 16:33:40 +01:00
|
|
|
void setPerfLog(void* context, PerfLogFn logFn)
|
|
|
|
{
|
|
|
|
gPerfLogContext = context;
|
|
|
|
gPerfLogFn = logFn;
|
|
|
|
}
|
|
|
|
|
2024-04-05 18:41:05 +01:00
|
|
|
static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
|
|
|
|
{
|
|
|
|
CODEGEN_ASSERT(p->source);
|
|
|
|
|
|
|
|
const char* source = getstr(p->source);
|
|
|
|
source = (source[0] == '=' || source[0] == '@') ? source + 1 : "[string]";
|
|
|
|
|
|
|
|
char name[256];
|
|
|
|
snprintf(name, sizeof(name), "<luau> %s:%d %s", source, p->linedefined, p->debugname ? getstr(p->debugname) : "");
|
|
|
|
|
|
|
|
if (gPerfLogFn)
|
|
|
|
gPerfLogFn(gPerfLogContext, addr, size, name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void logPerfFunctions(
|
|
|
|
const std::vector<Proto*>& moduleProtos, const uint8_t* nativeModuleBaseAddress, const std::vector<NativeProtoExecDataPtr>& nativeProtos)
|
|
|
|
{
|
|
|
|
if (gPerfLogFn == nullptr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (nativeProtos.size() > 0)
|
|
|
|
gPerfLogFn(gPerfLogContext, uintptr_t(nativeModuleBaseAddress),
|
|
|
|
unsigned(getNativeProtoExecDataHeader(nativeProtos[0].get()).entryOffsetOrAddress - nativeModuleBaseAddress), "<luau helpers>");
|
|
|
|
|
|
|
|
auto protoIt = moduleProtos.begin();
|
|
|
|
|
|
|
|
for (const NativeProtoExecDataPtr& nativeProto : nativeProtos)
|
|
|
|
{
|
|
|
|
const NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProto.get());
|
|
|
|
|
|
|
|
while (protoIt != moduleProtos.end() && uint32_t((**protoIt).bytecodeid) != header.bytecodeId)
|
|
|
|
{
|
|
|
|
++protoIt;
|
|
|
|
}
|
|
|
|
|
|
|
|
CODEGEN_ASSERT(protoIt != moduleProtos.end());
|
|
|
|
|
|
|
|
logPerfFunction(*protoIt, uintptr_t(header.entryOffsetOrAddress), uint32_t(header.nativeCodeSize));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If Release is true, the native proto will be removed from the vector and
|
|
|
|
// ownership will be assigned to the Proto object (for use with the
|
|
|
|
// StandaloneCodeContext). If Release is false, the native proto will not be
|
|
|
|
// removed from the vector (for use with the SharedCodeContext).
|
|
|
|
template<bool Release, typename NativeProtosVector>
|
2024-04-12 11:44:40 +01:00
|
|
|
[[nodiscard]] static uint32_t bindNativeProtos(const std::vector<Proto*>& moduleProtos, NativeProtosVector& nativeProtos)
|
2024-04-05 18:41:05 +01:00
|
|
|
{
|
2024-04-12 11:44:40 +01:00
|
|
|
uint32_t protosBound = 0;
|
2024-04-05 18:41:05 +01:00
|
|
|
|
|
|
|
auto protoIt = moduleProtos.begin();
|
|
|
|
|
|
|
|
for (auto& nativeProto : nativeProtos)
|
|
|
|
{
|
|
|
|
const NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProto.get());
|
|
|
|
|
|
|
|
while (protoIt != moduleProtos.end() && uint32_t((**protoIt).bytecodeid) != header.bytecodeId)
|
|
|
|
{
|
|
|
|
++protoIt;
|
|
|
|
}
|
|
|
|
|
|
|
|
CODEGEN_ASSERT(protoIt != moduleProtos.end());
|
|
|
|
|
|
|
|
// The NativeProtoExecData is now owned by the VM and will be destroyed
|
|
|
|
// via onDestroyFunction.
|
|
|
|
Proto* proto = *protoIt;
|
|
|
|
|
|
|
|
if constexpr (Release)
|
|
|
|
{
|
|
|
|
proto->execdata = nativeProto.release();
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
proto->execdata = nativeProto.get();
|
|
|
|
}
|
|
|
|
|
|
|
|
proto->exectarget = reinterpret_cast<uintptr_t>(header.entryOffsetOrAddress);
|
|
|
|
proto->codeentry = &kCodeEntryInsn;
|
|
|
|
|
|
|
|
++protosBound;
|
|
|
|
}
|
|
|
|
|
|
|
|
return protosBound;
|
|
|
|
}
|
|
|
|
|
2024-03-30 22:49:03 +00:00
|
|
|
BaseCodeGenContext::BaseCodeGenContext(size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
|
|
|
|
: codeAllocator{blockSize, maxTotalSize, allocationCallback, allocationCallbackContext}
|
|
|
|
{
|
|
|
|
CODEGEN_ASSERT(isSupported());
|
|
|
|
|
|
|
|
#if defined(_WIN32)
|
|
|
|
unwindBuilder = std::make_unique<UnwindBuilderWin>();
|
|
|
|
#else
|
|
|
|
unwindBuilder = std::make_unique<UnwindBuilderDwarf2>();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
codeAllocator.context = unwindBuilder.get();
|
|
|
|
codeAllocator.createBlockUnwindInfo = createBlockUnwindInfo;
|
|
|
|
codeAllocator.destroyBlockUnwindInfo = destroyBlockUnwindInfo;
|
|
|
|
|
|
|
|
initFunctions(context);
|
|
|
|
}
|
|
|
|
|
|
|
|
[[nodiscard]] bool BaseCodeGenContext::initHeaderFunctions()
|
|
|
|
{
|
2024-05-16 23:22:22 +01:00
|
|
|
#if defined(CODEGEN_TARGET_X64)
|
2024-03-30 22:49:03 +00:00
|
|
|
if (!X64::initHeaderFunctions(*this))
|
|
|
|
return false;
|
2024-05-16 23:22:22 +01:00
|
|
|
#elif defined(CODEGEN_TARGET_A64)
|
2024-03-30 22:49:03 +00:00
|
|
|
if (!A64::initHeaderFunctions(*this))
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (gPerfLogFn)
|
|
|
|
gPerfLogFn(gPerfLogContext, uintptr_t(context.gateEntry), 4096, "<luau gate>");
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
StandaloneCodeGenContext::StandaloneCodeGenContext(
|
|
|
|
size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
|
|
|
|
: BaseCodeGenContext{blockSize, maxTotalSize, allocationCallback, allocationCallbackContext}
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2024-04-12 11:44:40 +01:00
|
|
|
[[nodiscard]] std::optional<ModuleBindResult> StandaloneCodeGenContext::tryBindExistingModule(const ModuleId&, const std::vector<Proto*>&)
|
2024-04-05 18:41:05 +01:00
|
|
|
{
|
|
|
|
// The StandaloneCodeGenContext does not support sharing of native code
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2024-04-12 11:44:40 +01:00
|
|
|
[[nodiscard]] ModuleBindResult StandaloneCodeGenContext::bindModule(const std::optional<ModuleId>&, const std::vector<Proto*>& moduleProtos,
|
2024-04-05 18:41:05 +01:00
|
|
|
std::vector<NativeProtoExecDataPtr> nativeProtos, const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize)
|
|
|
|
{
|
|
|
|
uint8_t* nativeData = nullptr;
|
|
|
|
size_t sizeNativeData = 0;
|
|
|
|
uint8_t* codeStart = nullptr;
|
|
|
|
if (!codeAllocator.allocate(data, int(dataSize), code, int(codeSize), nativeData, sizeNativeData, codeStart))
|
|
|
|
{
|
2024-04-12 11:44:40 +01:00
|
|
|
return {CodeGenCompilationResult::AllocationFailed};
|
2024-04-05 18:41:05 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Relocate the entry offsets to their final executable addresses:
|
|
|
|
for (const NativeProtoExecDataPtr& nativeProto : nativeProtos)
|
|
|
|
{
|
|
|
|
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProto.get());
|
|
|
|
|
|
|
|
header.entryOffsetOrAddress = codeStart + reinterpret_cast<uintptr_t>(header.entryOffsetOrAddress);
|
|
|
|
}
|
|
|
|
|
|
|
|
logPerfFunctions(moduleProtos, codeStart, nativeProtos);
|
|
|
|
|
2024-04-12 11:44:40 +01:00
|
|
|
const uint32_t protosBound = bindNativeProtos<true>(moduleProtos, nativeProtos);
|
2024-04-05 18:41:05 +01:00
|
|
|
|
2024-04-12 11:44:40 +01:00
|
|
|
return {CodeGenCompilationResult::Success, protosBound};
|
2024-04-05 18:41:05 +01:00
|
|
|
}
|
2024-03-30 22:49:03 +00:00
|
|
|
|
|
|
|
void StandaloneCodeGenContext::onCloseState() noexcept
|
|
|
|
{
|
|
|
|
// The StandaloneCodeGenContext is owned by the one VM that owns it, so when
|
|
|
|
// that VM is destroyed, we destroy *this as well:
|
|
|
|
delete this;
|
|
|
|
}
|
|
|
|
|
|
|
|
void StandaloneCodeGenContext::onDestroyFunction(void* execdata) noexcept
|
|
|
|
{
|
|
|
|
destroyNativeProtoExecData(static_cast<uint32_t*>(execdata));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
SharedCodeGenContext::SharedCodeGenContext(
|
|
|
|
size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
|
|
|
|
: BaseCodeGenContext{blockSize, maxTotalSize, allocationCallback, allocationCallbackContext}
|
2024-04-05 18:41:05 +01:00
|
|
|
, sharedAllocator{&codeAllocator}
|
2024-03-30 22:49:03 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2024-04-12 11:44:40 +01:00
|
|
|
[[nodiscard]] std::optional<ModuleBindResult> SharedCodeGenContext::tryBindExistingModule(
|
2024-04-05 18:41:05 +01:00
|
|
|
const ModuleId& moduleId, const std::vector<Proto*>& moduleProtos)
|
|
|
|
{
|
|
|
|
NativeModuleRef nativeModule = sharedAllocator.tryGetNativeModule(moduleId);
|
|
|
|
if (nativeModule.empty())
|
|
|
|
{
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
// Bind the native protos and acquire an owning reference for each:
|
2024-04-12 11:44:40 +01:00
|
|
|
const uint32_t protosBound = bindNativeProtos<false>(moduleProtos, nativeModule->getNativeProtos());
|
|
|
|
nativeModule->addRefs(protosBound);
|
2024-04-05 18:41:05 +01:00
|
|
|
|
2024-04-12 11:44:40 +01:00
|
|
|
return {{CodeGenCompilationResult::Success, protosBound}};
|
2024-04-05 18:41:05 +01:00
|
|
|
}
|
|
|
|
|
2024-04-12 11:44:40 +01:00
|
|
|
[[nodiscard]] ModuleBindResult SharedCodeGenContext::bindModule(const std::optional<ModuleId>& moduleId, const std::vector<Proto*>& moduleProtos,
|
2024-04-05 18:41:05 +01:00
|
|
|
std::vector<NativeProtoExecDataPtr> nativeProtos, const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize)
|
|
|
|
{
|
2024-04-12 11:44:40 +01:00
|
|
|
const std::pair<NativeModuleRef, bool> insertionResult = [&]() -> std::pair<NativeModuleRef, bool> {
|
|
|
|
if (moduleId.has_value())
|
|
|
|
{
|
|
|
|
return sharedAllocator.getOrInsertNativeModule(*moduleId, std::move(nativeProtos), data, dataSize, code, codeSize);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
return {sharedAllocator.insertAnonymousNativeModule(std::move(nativeProtos), data, dataSize, code, codeSize), true};
|
|
|
|
}
|
|
|
|
}();
|
2024-04-05 18:41:05 +01:00
|
|
|
|
|
|
|
// If we did not get a NativeModule back, allocation failed:
|
|
|
|
if (insertionResult.first.empty())
|
2024-04-12 11:44:40 +01:00
|
|
|
return {CodeGenCompilationResult::AllocationFailed};
|
2024-04-05 18:41:05 +01:00
|
|
|
|
|
|
|
// If we allocated a new module, log the function code ranges for perf:
|
|
|
|
if (insertionResult.second)
|
|
|
|
logPerfFunctions(moduleProtos, insertionResult.first->getModuleBaseAddress(), insertionResult.first->getNativeProtos());
|
|
|
|
|
|
|
|
// Bind the native protos and acquire an owning reference for each:
|
2024-04-12 11:44:40 +01:00
|
|
|
const uint32_t protosBound = bindNativeProtos<false>(moduleProtos, insertionResult.first->getNativeProtos());
|
|
|
|
insertionResult.first->addRefs(protosBound);
|
2024-04-05 18:41:05 +01:00
|
|
|
|
2024-04-12 11:44:40 +01:00
|
|
|
return {CodeGenCompilationResult::Success, protosBound};
|
2024-04-05 18:41:05 +01:00
|
|
|
}
|
2024-03-30 22:49:03 +00:00
|
|
|
|
|
|
|
void SharedCodeGenContext::onCloseState() noexcept
|
|
|
|
{
|
|
|
|
// The lifetime of the SharedCodeGenContext is managed separately from the
|
|
|
|
// VMs that use it. When a VM is destroyed, we don't need to do anything
|
|
|
|
// here.
|
|
|
|
}
|
|
|
|
|
|
|
|
void SharedCodeGenContext::onDestroyFunction(void* execdata) noexcept
|
|
|
|
{
|
|
|
|
getNativeProtoExecDataHeader(static_cast<const uint32_t*>(execdata)).nativeModule->release();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
[[nodiscard]] UniqueSharedCodeGenContext createSharedCodeGenContext()
|
|
|
|
{
|
|
|
|
return createSharedCodeGenContext(size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), nullptr, nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
[[nodiscard]] UniqueSharedCodeGenContext createSharedCodeGenContext(AllocationCallback* allocationCallback, void* allocationCallbackContext)
|
|
|
|
{
|
|
|
|
return createSharedCodeGenContext(
|
|
|
|
size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), allocationCallback, allocationCallbackContext);
|
|
|
|
}
|
|
|
|
|
|
|
|
[[nodiscard]] UniqueSharedCodeGenContext createSharedCodeGenContext(
|
|
|
|
size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
|
|
|
|
{
|
|
|
|
UniqueSharedCodeGenContext codeGenContext{new SharedCodeGenContext{blockSize, maxTotalSize, nullptr, nullptr}};
|
|
|
|
|
|
|
|
if (!codeGenContext->initHeaderFunctions())
|
|
|
|
return {};
|
|
|
|
|
|
|
|
return codeGenContext;
|
|
|
|
}
|
|
|
|
|
|
|
|
void destroySharedCodeGenContext(const SharedCodeGenContext* codeGenContext) noexcept
|
|
|
|
{
|
|
|
|
delete codeGenContext;
|
|
|
|
}
|
|
|
|
|
|
|
|
void SharedCodeGenContextDeleter::operator()(const SharedCodeGenContext* codeGenContext) const noexcept
|
|
|
|
{
|
|
|
|
destroySharedCodeGenContext(codeGenContext);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
[[nodiscard]] static BaseCodeGenContext* getCodeGenContext(lua_State* L) noexcept
|
|
|
|
{
|
|
|
|
return static_cast<BaseCodeGenContext*>(L->global->ecb.context);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void onCloseState(lua_State* L) noexcept
|
|
|
|
{
|
|
|
|
getCodeGenContext(L)->onCloseState();
|
|
|
|
L->global->ecb = lua_ExecutionCallbacks{};
|
|
|
|
}
|
|
|
|
|
|
|
|
static void onDestroyFunction(lua_State* L, Proto* proto) noexcept
|
|
|
|
{
|
|
|
|
getCodeGenContext(L)->onDestroyFunction(proto->execdata);
|
|
|
|
proto->execdata = nullptr;
|
|
|
|
proto->exectarget = 0;
|
|
|
|
proto->codeentry = proto->code;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int onEnter(lua_State* L, Proto* proto)
|
|
|
|
{
|
|
|
|
BaseCodeGenContext* codeGenContext = getCodeGenContext(L);
|
|
|
|
|
|
|
|
CODEGEN_ASSERT(proto->execdata);
|
|
|
|
CODEGEN_ASSERT(L->ci->savedpc >= proto->code && L->ci->savedpc < proto->code + proto->sizecode);
|
|
|
|
|
|
|
|
uintptr_t target = proto->exectarget + static_cast<uint32_t*>(proto->execdata)[L->ci->savedpc - proto->code];
|
|
|
|
|
|
|
|
// Returns 1 to finish the function in the VM
|
|
|
|
return GateFn(codeGenContext->context.gateEntry)(L, proto, target, &codeGenContext->context);
|
|
|
|
}
|
|
|
|
|
2024-04-05 18:41:05 +01:00
|
|
|
static int onEnterDisabled(lua_State* L, Proto* proto)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2024-03-30 22:49:03 +00:00
|
|
|
// Defined in CodeGen.cpp
|
|
|
|
void onDisable(lua_State* L, Proto* proto);
|
|
|
|
|
|
|
|
static size_t getMemorySize(lua_State* L, Proto* proto)
|
|
|
|
{
|
|
|
|
const NativeProtoExecDataHeader& execDataHeader = getNativeProtoExecDataHeader(static_cast<const uint32_t*>(proto->execdata));
|
|
|
|
|
|
|
|
const size_t execDataSize = sizeof(NativeProtoExecDataHeader) + execDataHeader.bytecodeInstructionCount * sizeof(Instruction);
|
|
|
|
|
|
|
|
// While execDataSize is exactly the size of the allocation we made and hold for 'execdata' field, the code size is approximate
|
|
|
|
// This is because code+data page is shared and owned by all Proto from a single module and each one can keep the whole region alive
|
|
|
|
// So individual Proto being freed by GC will not reflect memory use by native code correctly
|
|
|
|
return execDataSize + execDataHeader.nativeCodeSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void initializeExecutionCallbacks(lua_State* L, BaseCodeGenContext* codeGenContext) noexcept
|
|
|
|
{
|
2024-04-25 21:57:23 +01:00
|
|
|
CODEGEN_ASSERT(!FFlag::LuauCodegenCheckNullContext || codeGenContext != nullptr);
|
2024-04-12 11:44:40 +01:00
|
|
|
|
2024-03-30 22:49:03 +00:00
|
|
|
lua_ExecutionCallbacks* ecb = &L->global->ecb;
|
|
|
|
|
|
|
|
ecb->context = codeGenContext;
|
|
|
|
ecb->close = onCloseState;
|
|
|
|
ecb->destroy = onDestroyFunction;
|
|
|
|
ecb->enter = onEnter;
|
|
|
|
ecb->disable = onDisable;
|
2024-04-05 18:41:05 +01:00
|
|
|
ecb->getmemorysize = getMemorySize;
|
2024-03-30 22:49:03 +00:00
|
|
|
}
|
|
|
|
|
2024-05-26 16:33:40 +01:00
|
|
|
void create(lua_State* L)
|
2024-03-30 22:49:03 +00:00
|
|
|
{
|
2024-05-26 16:33:40 +01:00
|
|
|
return create(L, size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), nullptr, nullptr);
|
2024-03-30 22:49:03 +00:00
|
|
|
}
|
|
|
|
|
2024-05-26 16:33:40 +01:00
|
|
|
void create(lua_State* L, AllocationCallback* allocationCallback, void* allocationCallbackContext)
|
2024-03-30 22:49:03 +00:00
|
|
|
{
|
2024-05-26 16:33:40 +01:00
|
|
|
return create(L, size_t(FInt::LuauCodeGenBlockSize), size_t(FInt::LuauCodeGenMaxTotalSize), allocationCallback, allocationCallbackContext);
|
2024-03-30 22:49:03 +00:00
|
|
|
}
|
|
|
|
|
2024-05-26 16:33:40 +01:00
|
|
|
void create(lua_State* L, size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
|
2024-03-30 22:49:03 +00:00
|
|
|
{
|
|
|
|
std::unique_ptr<StandaloneCodeGenContext> codeGenContext =
|
|
|
|
std::make_unique<StandaloneCodeGenContext>(blockSize, maxTotalSize, allocationCallback, allocationCallbackContext);
|
|
|
|
|
|
|
|
if (!codeGenContext->initHeaderFunctions())
|
|
|
|
return;
|
|
|
|
|
|
|
|
initializeExecutionCallbacks(L, codeGenContext.release());
|
|
|
|
}
|
|
|
|
|
2024-05-26 16:33:40 +01:00
|
|
|
void create(lua_State* L, SharedCodeGenContext* codeGenContext)
|
2024-03-30 22:49:03 +00:00
|
|
|
{
|
|
|
|
initializeExecutionCallbacks(L, codeGenContext);
|
|
|
|
}
|
|
|
|
|
2024-04-05 18:41:05 +01:00
|
|
|
[[nodiscard]] static NativeProtoExecDataPtr createNativeProtoExecData(Proto* proto, const IrBuilder& ir)
|
|
|
|
{
|
|
|
|
NativeProtoExecDataPtr nativeExecData = createNativeProtoExecData(proto->sizecode);
|
|
|
|
|
|
|
|
uint32_t instTarget = ir.function.entryLocation;
|
|
|
|
|
|
|
|
for (int i = 0; i < proto->sizecode; ++i)
|
|
|
|
{
|
|
|
|
CODEGEN_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget);
|
|
|
|
|
|
|
|
nativeExecData[i] = ir.function.bcMapping[i].asmLocation - instTarget;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set first instruction offset to 0 so that entering this function still
|
|
|
|
// executes any generated entry code.
|
|
|
|
nativeExecData[0] = 0;
|
|
|
|
|
|
|
|
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeExecData.get());
|
|
|
|
header.entryOffsetOrAddress = reinterpret_cast<const uint8_t*>(static_cast<uintptr_t>(instTarget));
|
|
|
|
header.bytecodeId = uint32_t(proto->bytecodeid);
|
|
|
|
header.bytecodeInstructionCount = proto->sizecode;
|
|
|
|
|
|
|
|
return nativeExecData;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename AssemblyBuilder>
|
2024-05-10 17:17:09 +01:00
|
|
|
[[nodiscard]] static NativeProtoExecDataPtr createNativeFunction(AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto,
|
|
|
|
uint32_t& totalIrInstCount, const HostIrHooks& hooks, CodeGenCompilationResult& result)
|
2024-04-05 18:41:05 +01:00
|
|
|
{
|
2024-05-10 17:17:09 +01:00
|
|
|
IrBuilder ir(hooks);
|
2024-04-05 18:41:05 +01:00
|
|
|
ir.buildFunctionIr(proto);
|
|
|
|
|
|
|
|
unsigned instCount = unsigned(ir.function.instructions.size());
|
|
|
|
|
|
|
|
if (totalIrInstCount + instCount >= unsigned(FInt::CodegenHeuristicsInstructionLimit.value))
|
|
|
|
{
|
|
|
|
result = CodeGenCompilationResult::CodeGenOverflowInstructionLimit;
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
totalIrInstCount += instCount;
|
|
|
|
|
|
|
|
if (!lowerFunction(ir, build, helpers, proto, {}, /* stats */ nullptr, result))
|
|
|
|
{
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
return createNativeProtoExecData(proto, ir);
|
|
|
|
}
|
|
|
|
|
2024-04-12 11:44:40 +01:00
|
|
|
[[nodiscard]] static CompilationResult compileInternal(
|
2024-05-10 17:17:09 +01:00
|
|
|
const std::optional<ModuleId>& moduleId, lua_State* L, int idx, const CompilationOptions& options, CompilationStats* stats)
|
2024-04-05 18:41:05 +01:00
|
|
|
{
|
|
|
|
CODEGEN_ASSERT(lua_isLfunction(L, idx));
|
|
|
|
const TValue* func = luaA_toobject(L, idx);
|
|
|
|
|
|
|
|
Proto* root = clvalue(func)->l.p;
|
|
|
|
|
2024-06-14 17:38:56 +01:00
|
|
|
if ((options.flags & CodeGen_OnlyNativeModules) != 0 && (root->flags & LPF_NATIVE_MODULE) == 0 && (root->flags & LPF_NATIVE_FUNCTION) == 0)
|
2024-04-05 18:41:05 +01:00
|
|
|
return CompilationResult{CodeGenCompilationResult::NotNativeModule};
|
|
|
|
|
|
|
|
BaseCodeGenContext* codeGenContext = getCodeGenContext(L);
|
|
|
|
if (codeGenContext == nullptr)
|
|
|
|
return CompilationResult{CodeGenCompilationResult::CodeGenNotInitialized};
|
|
|
|
|
|
|
|
std::vector<Proto*> protos;
|
2024-06-14 17:38:56 +01:00
|
|
|
if (FFlag::LuauNativeAttribute)
|
|
|
|
gatherFunctions(protos, root, options.flags, root->flags & LPF_NATIVE_FUNCTION);
|
|
|
|
else
|
|
|
|
gatherFunctions_DEPRECATED(protos, root, options.flags);
|
2024-04-05 18:41:05 +01:00
|
|
|
|
|
|
|
// Skip protos that have been compiled during previous invocations of CodeGen::compile
|
|
|
|
protos.erase(std::remove_if(protos.begin(), protos.end(),
|
|
|
|
[](Proto* p) {
|
|
|
|
return p == nullptr || p->execdata != nullptr;
|
|
|
|
}),
|
|
|
|
protos.end());
|
|
|
|
|
|
|
|
if (protos.empty())
|
|
|
|
return CompilationResult{CodeGenCompilationResult::NothingToCompile};
|
|
|
|
|
|
|
|
if (stats != nullptr)
|
|
|
|
stats->functionsTotal = uint32_t(protos.size());
|
|
|
|
|
2024-04-12 11:44:40 +01:00
|
|
|
if (moduleId.has_value())
|
|
|
|
{
|
|
|
|
if (std::optional<ModuleBindResult> existingModuleBindResult = codeGenContext->tryBindExistingModule(*moduleId, protos))
|
|
|
|
{
|
|
|
|
if (stats != nullptr)
|
|
|
|
stats->functionsBound = existingModuleBindResult->functionsBound;
|
|
|
|
|
|
|
|
return CompilationResult{existingModuleBindResult->compilationResult};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-16 23:22:22 +01:00
|
|
|
#if defined(CODEGEN_TARGET_A64)
|
2024-04-05 18:41:05 +01:00
|
|
|
static unsigned int cpuFeatures = getCpuFeaturesA64();
|
|
|
|
A64::AssemblyBuilderA64 build(/* logText= */ false, cpuFeatures);
|
|
|
|
#else
|
|
|
|
X64::AssemblyBuilderX64 build(/* logText= */ false);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ModuleHelpers helpers;
|
2024-05-16 23:22:22 +01:00
|
|
|
#if defined(CODEGEN_TARGET_A64)
|
2024-04-05 18:41:05 +01:00
|
|
|
A64::assembleHelpers(build, helpers);
|
|
|
|
#else
|
|
|
|
X64::assembleHelpers(build, helpers);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
CompilationResult compilationResult;
|
|
|
|
|
|
|
|
std::vector<NativeProtoExecDataPtr> nativeProtos;
|
|
|
|
nativeProtos.reserve(protos.size());
|
|
|
|
|
|
|
|
uint32_t totalIrInstCount = 0;
|
|
|
|
|
|
|
|
for (size_t i = 0; i != protos.size(); ++i)
|
|
|
|
{
|
|
|
|
CodeGenCompilationResult protoResult = CodeGenCompilationResult::Success;
|
|
|
|
|
2024-05-10 17:17:09 +01:00
|
|
|
NativeProtoExecDataPtr nativeExecData = createNativeFunction(build, helpers, protos[i], totalIrInstCount, options.hooks, protoResult);
|
2024-04-05 18:41:05 +01:00
|
|
|
if (nativeExecData != nullptr)
|
|
|
|
{
|
|
|
|
nativeProtos.push_back(std::move(nativeExecData));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
compilationResult.protoFailures.push_back(
|
|
|
|
{protoResult, protos[i]->debugname ? getstr(protos[i]->debugname) : "", protos[i]->linedefined});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Very large modules might result in overflowing a jump offset; in this
|
|
|
|
// case we currently abandon the entire module
|
|
|
|
if (!build.finalize())
|
|
|
|
{
|
|
|
|
compilationResult.result = CodeGenCompilationResult::CodeGenAssemblerFinalizationFailure;
|
|
|
|
return compilationResult;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If no functions were assembled, we don't need to allocate/copy executable pages for helpers
|
|
|
|
if (nativeProtos.empty())
|
|
|
|
return compilationResult;
|
|
|
|
|
|
|
|
if (stats != nullptr)
|
|
|
|
{
|
|
|
|
for (const NativeProtoExecDataPtr& nativeExecData : nativeProtos)
|
|
|
|
{
|
|
|
|
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeExecData.get());
|
|
|
|
|
|
|
|
stats->bytecodeSizeBytes += header.bytecodeInstructionCount * sizeof(Instruction);
|
|
|
|
|
|
|
|
// Account for the native -> bytecode instruction offsets mapping:
|
|
|
|
stats->nativeMetadataSizeBytes += header.bytecodeInstructionCount * sizeof(uint32_t);
|
|
|
|
}
|
|
|
|
|
|
|
|
stats->functionsCompiled += uint32_t(nativeProtos.size());
|
2024-04-12 11:44:40 +01:00
|
|
|
stats->nativeCodeSizeBytes += build.code.size() * sizeof(build.code[0]);
|
2024-04-05 18:41:05 +01:00
|
|
|
stats->nativeDataSizeBytes += build.data.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < nativeProtos.size(); ++i)
|
|
|
|
{
|
|
|
|
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProtos[i].get());
|
|
|
|
|
|
|
|
uint32_t begin = uint32_t(reinterpret_cast<uintptr_t>(header.entryOffsetOrAddress));
|
|
|
|
uint32_t end = i + 1 < nativeProtos.size() ? uint32_t(uintptr_t(getNativeProtoExecDataHeader(nativeProtos[i + 1].get()).entryOffsetOrAddress))
|
2024-04-12 11:44:40 +01:00
|
|
|
: uint32_t(build.code.size() * sizeof(build.code[0]));
|
2024-04-05 18:41:05 +01:00
|
|
|
|
|
|
|
CODEGEN_ASSERT(begin < end);
|
|
|
|
|
|
|
|
header.nativeCodeSize = end - begin;
|
|
|
|
}
|
|
|
|
|
2024-04-12 11:44:40 +01:00
|
|
|
const ModuleBindResult bindResult =
|
2024-04-05 18:41:05 +01:00
|
|
|
codeGenContext->bindModule(moduleId, protos, std::move(nativeProtos), reinterpret_cast<const uint8_t*>(build.data.data()), build.data.size(),
|
2024-04-12 11:44:40 +01:00
|
|
|
reinterpret_cast<const uint8_t*>(build.code.data()), build.code.size() * sizeof(build.code[0]));
|
|
|
|
|
|
|
|
if (stats != nullptr)
|
|
|
|
stats->functionsBound = bindResult.functionsBound;
|
|
|
|
|
|
|
|
if (bindResult.compilationResult != CodeGenCompilationResult::Success)
|
|
|
|
compilationResult.result = bindResult.compilationResult;
|
|
|
|
|
2024-04-05 18:41:05 +01:00
|
|
|
return compilationResult;
|
|
|
|
}
|
|
|
|
|
2024-05-26 16:33:40 +01:00
|
|
|
CompilationResult compile(const ModuleId& moduleId, lua_State* L, int idx, const CompilationOptions& options, CompilationStats* stats)
|
2024-04-12 11:44:40 +01:00
|
|
|
{
|
2024-05-10 17:17:09 +01:00
|
|
|
return compileInternal(moduleId, L, idx, options, stats);
|
2024-04-12 11:44:40 +01:00
|
|
|
}
|
|
|
|
|
2024-05-26 16:33:40 +01:00
|
|
|
CompilationResult compile(lua_State* L, int idx, const CompilationOptions& options, CompilationStats* stats)
|
2024-04-12 11:44:40 +01:00
|
|
|
{
|
2024-05-10 17:17:09 +01:00
|
|
|
return compileInternal({}, L, idx, options, stats);
|
2024-04-12 11:44:40 +01:00
|
|
|
}
|
|
|
|
|
2024-05-26 16:33:40 +01:00
|
|
|
CompilationResult compile(lua_State* L, int idx, unsigned int flags, CompilationStats* stats)
|
|
|
|
{
|
|
|
|
return compileInternal({}, L, idx, CompilationOptions{flags}, stats);
|
|
|
|
}
|
|
|
|
|
|
|
|
CompilationResult compile(const ModuleId& moduleId, lua_State* L, int idx, unsigned int flags, CompilationStats* stats)
|
|
|
|
{
|
|
|
|
return compileInternal(moduleId, L, idx, CompilationOptions{flags}, stats);
|
|
|
|
}
|
|
|
|
|
|
|
|
[[nodiscard]] bool isNativeExecutionEnabled(lua_State* L)
|
2024-04-05 18:41:05 +01:00
|
|
|
{
|
|
|
|
return getCodeGenContext(L) != nullptr && L->global->ecb.enter == onEnter;
|
|
|
|
}
|
|
|
|
|
2024-05-26 16:33:40 +01:00
|
|
|
void setNativeExecutionEnabled(lua_State* L, bool enabled)
|
2024-04-05 18:41:05 +01:00
|
|
|
{
|
|
|
|
if (getCodeGenContext(L) != nullptr)
|
|
|
|
L->global->ecb.enter = enabled ? onEnter : onEnterDisabled;
|
|
|
|
}
|
|
|
|
|
2024-05-31 18:46:33 +01:00
|
|
|
static uint8_t userdataRemapperWrap(lua_State* L, const char* str, size_t len)
|
|
|
|
{
|
|
|
|
if (BaseCodeGenContext* codegenCtx = getCodeGenContext(L))
|
|
|
|
{
|
|
|
|
uint8_t index = codegenCtx->userdataRemapper(codegenCtx->userdataRemappingContext, str, len);
|
|
|
|
|
|
|
|
if (index < (LBC_TYPE_TAGGED_USERDATA_END - LBC_TYPE_TAGGED_USERDATA_BASE))
|
|
|
|
return LBC_TYPE_TAGGED_USERDATA_BASE + index;
|
|
|
|
}
|
|
|
|
|
|
|
|
return LBC_TYPE_USERDATA;
|
|
|
|
}
|
|
|
|
|
|
|
|
void setUserdataRemapper(lua_State* L, void* context, UserdataRemapperCallback cb)
|
|
|
|
{
|
|
|
|
if (BaseCodeGenContext* codegenCtx = getCodeGenContext(L))
|
|
|
|
{
|
|
|
|
codegenCtx->userdataRemappingContext = context;
|
|
|
|
codegenCtx->userdataRemapper = cb;
|
|
|
|
|
|
|
|
L->global->ecb.gettypemapping = cb ? userdataRemapperWrap : nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-30 22:49:03 +00:00
|
|
|
} // namespace CodeGen
|
|
|
|
} // namespace Luau
|