luau/CodeGen/src/CodeGenA64.cpp

342 lines
11 KiB
C++
Raw Normal View History

2023-03-17 16:59:30 +02:00
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "CodeGenA64.h"
#include "Luau/AssemblyBuilderA64.h"
#include "Luau/UnwindBuilder.h"
2023-05-19 11:59:59 -07:00
#include "BitUtils.h"
2024-03-30 15:49:03 -07:00
#include "CodeGenContext.h"
2023-10-06 10:31:16 -07:00
#include "CodeGenUtils.h"
2023-03-17 16:59:30 +02:00
#include "NativeState.h"
2023-03-24 10:34:14 -07:00
#include "EmitCommonA64.h"
2023-03-17 16:59:30 +02:00
#include "lstate.h"
namespace Luau
{
namespace CodeGen
{
namespace A64
{
2023-04-14 15:05:27 +03:00
struct EntryLocations
2023-03-17 16:59:30 +02:00
{
2023-04-14 15:05:27 +03:00
Label start;
Label prologueEnd;
Label epilogueStart;
};
static void emitExit(AssemblyBuilderA64& build, bool continueInVm)
{
build.mov(x0, continueInVm);
build.ldr(x1, mem(rNativeContext, offsetof(NativeContext, gateExit)));
build.br(x1);
}
2023-08-11 15:55:30 +03:00
static void emitUpdatePcForExit(AssemblyBuilderA64& build)
2023-07-07 10:14:35 -07:00
{
// x0 = pcpos * sizeof(Instruction)
build.add(x0, rCode, x0);
build.ldr(x1, mem(rState, offsetof(lua_State, ci)));
build.str(x0, mem(x1, offsetof(CallInfo, savedpc)));
2023-08-11 15:55:30 +03:00
}
2023-07-07 10:14:35 -07:00
2023-08-11 15:55:30 +03:00
static void emitClearNativeFlag(AssemblyBuilderA64& build)
{
build.ldr(x0, mem(rState, offsetof(lua_State, ci)));
build.ldr(w1, mem(x0, offsetof(CallInfo, flags)));
build.mov(w2, ~LUA_CALLINFO_NATIVE);
build.and_(w1, w1, w2);
build.str(w1, mem(x0, offsetof(CallInfo, flags)));
2023-07-07 10:14:35 -07:00
}
2023-04-14 15:05:27 +03:00
static void emitInterrupt(AssemblyBuilderA64& build)
{
// x0 = pc offset
// x1 = return address in native code
2023-04-28 14:55:55 +03:00
Label skip;
2023-04-14 15:05:27 +03:00
// Stash return address in rBase; we need to reload rBase anyway
build.mov(rBase, x1);
2023-04-28 14:55:55 +03:00
// Load interrupt handler; it may be nullptr in case the update raced with the check before we got here
build.ldr(x2, mem(rState, offsetof(lua_State, global)));
build.ldr(x2, mem(x2, offsetof(global_State, cb.interrupt)));
build.cbz(x2, skip);
2023-04-14 15:05:27 +03:00
// Update savedpc; required in case interrupt errors
build.add(x0, rCode, x0);
build.ldr(x1, mem(rState, offsetof(lua_State, ci)));
build.str(x0, mem(x1, offsetof(CallInfo, savedpc)));
// Call interrupt
build.mov(x0, rState);
build.mov(w1, -1);
build.blr(x2);
// Check if we need to exit
build.ldrb(w0, mem(rState, offsetof(lua_State, status)));
build.cbz(w0, skip);
// L->ci->savedpc--
// note: recomputing this avoids having to stash x0
build.ldr(x1, mem(rState, offsetof(lua_State, ci)));
build.ldr(x0, mem(x1, offsetof(CallInfo, savedpc)));
build.sub(x0, x0, sizeof(Instruction));
build.str(x0, mem(x1, offsetof(CallInfo, savedpc)));
emitExit(build, /* continueInVm */ false);
build.setLabel(skip);
// Return back to caller; rBase has stashed return address
build.mov(x0, rBase);
emitUpdateBase(build); // interrupt may have reallocated stack
build.br(x0);
}
2023-10-06 10:31:16 -07:00
static void emitContinueCall(AssemblyBuilderA64& build, ModuleHelpers& helpers)
2023-04-14 15:05:27 +03:00
{
// x0 = closure object to reentry (equal to clvalue(L->ci->func))
2023-10-06 10:31:16 -07:00
// If the fallback yielded, we need to do this right away
// note: it's slightly cheaper to check x0 LSB; a valid Closure pointer must be aligned to 8 bytes
2024-02-16 03:25:31 +02:00
CODEGEN_ASSERT(CALL_FALLBACK_YIELD == 1);
2023-10-06 10:31:16 -07:00
build.tbnz(x0, 0, helpers.exitNoContinueVm);
2023-04-14 15:05:27 +03:00
// Need to update state of the current function before we jump away
build.ldr(x1, mem(x0, offsetof(Closure, l.p))); // cl->l.p aka proto
2023-10-06 10:31:16 -07:00
build.ldr(x2, mem(x1, offsetof(Proto, exectarget)));
build.cbz(x2, helpers.exitContinueVm);
2023-05-19 11:59:59 -07:00
2023-04-14 15:05:27 +03:00
build.mov(rClosure, x0);
2023-05-25 23:46:51 +03:00
2024-02-16 03:25:31 +02:00
CODEGEN_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
2023-05-25 23:46:51 +03:00
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
2023-04-14 15:05:27 +03:00
2023-10-06 10:31:16 -07:00
build.br(x2);
2023-04-14 15:05:27 +03:00
}
2023-06-09 15:20:36 +03:00
void emitReturn(AssemblyBuilderA64& build, ModuleHelpers& helpers)
{
// x1 = res
// w2 = number of written values
// x0 = ci
build.ldr(x0, mem(rState, offsetof(lua_State, ci)));
// w3 = ci->nresults
build.ldr(w3, mem(x0, offsetof(CallInfo, nresults)));
Label skipResultCopy;
// Fill the rest of the expected results (nresults - written) with 'nil'
build.cmp(w2, w3);
build.b(ConditionA64::GreaterEqual, skipResultCopy);
// TODO: cmp above could compute this and flags using subs
build.sub(w2, w3, w2); // counter = nresults - written
build.mov(w4, LUA_TNIL);
Label repeatNilLoop = build.setLabel();
build.str(w4, mem(x1, offsetof(TValue, tt)));
build.add(x1, x1, sizeof(TValue));
build.sub(w2, w2, 1);
build.cbnz(w2, repeatNilLoop);
build.setLabel(skipResultCopy);
// x2 = cip = ci - 1
build.sub(x2, x0, sizeof(CallInfo));
// res = cip->top when nresults >= 0
Label skipFixedRetTop;
build.tbnz(w3, 31, skipFixedRetTop);
build.ldr(x1, mem(x2, offsetof(CallInfo, top))); // res = cip->top
build.setLabel(skipFixedRetTop);
// Update VM state (ci, base, top)
build.str(x2, mem(rState, offsetof(lua_State, ci))); // L->ci = cip
build.ldr(rBase, mem(x2, offsetof(CallInfo, base))); // sync base = L->base while we have a chance
build.str(rBase, mem(rState, offsetof(lua_State, base))); // L->base = cip->base
build.str(x1, mem(rState, offsetof(lua_State, top))); // L->top = res
// Unlikely, but this might be the last return from VM
build.ldr(w4, mem(x0, offsetof(CallInfo, flags)));
build.tbnz(w4, countrz(LUA_CALLINFO_RETURN), helpers.exitNoContinueVm);
// Continue in interpreter if function has no native data
build.ldr(w4, mem(x2, offsetof(CallInfo, flags)));
build.tbz(w4, countrz(LUA_CALLINFO_NATIVE), helpers.exitContinueVm);
// Need to update state of the current function before we jump away
build.ldr(rClosure, mem(x2, offsetof(CallInfo, func)));
build.ldr(rClosure, mem(rClosure, offsetof(TValue, value.gc)));
build.ldr(x1, mem(rClosure, offsetof(Closure, l.p))); // cl->l.p aka proto
2024-02-16 03:25:31 +02:00
CODEGEN_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
2023-06-09 15:20:36 +03:00
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
// Get instruction index from instruction pointer
// To get instruction index from instruction pointer, we need to divide byte offset by 4
// But we will actually need to scale instruction index by 4 back to byte offset later so it cancels out
build.ldr(x2, mem(x2, offsetof(CallInfo, savedpc))); // cip->savedpc
build.sub(x2, x2, rCode);
// Get new instruction location and jump to it
2024-02-16 03:25:31 +02:00
CODEGEN_ASSERT(offsetof(Proto, exectarget) == offsetof(Proto, execdata) + 8);
2023-06-09 15:20:36 +03:00
build.ldp(x3, x4, mem(x1, offsetof(Proto, execdata)));
build.ldr(w2, mem(x3, x2));
build.add(x4, x4, x2);
build.br(x4);
}
2023-04-14 15:05:27 +03:00
static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilder& unwind)
{
EntryLocations locations;
2023-03-17 16:59:30 +02:00
2023-03-24 10:34:14 -07:00
// Arguments: x0 = lua_State*, x1 = Proto*, x2 = native code pointer to jump to, x3 = NativeContext*
2023-04-14 15:05:27 +03:00
locations.start = build.setLabel();
2023-03-24 10:34:14 -07:00
// prologue
build.sub(sp, sp, kStackSize);
build.stp(x29, x30, mem(sp)); // fp, lr
2023-03-17 16:59:30 +02:00
2023-03-24 10:34:14 -07:00
// stash non-volatile registers used for execution environment
build.stp(x19, x20, mem(sp, 16));
build.stp(x21, x22, mem(sp, 32));
build.stp(x23, x24, mem(sp, 48));
2023-08-18 10:06:29 -07:00
build.str(x25, mem(sp, 64));
2023-03-24 10:34:14 -07:00
build.mov(x29, sp); // this is only necessary if we maintain frame pointers, which we do in the JIT for now
2023-03-17 16:59:30 +02:00
2023-04-14 15:05:27 +03:00
locations.prologueEnd = build.setLabel();
2023-03-17 16:59:30 +02:00
2023-05-05 12:57:12 -07:00
uint32_t prologueSize = build.getLabelOffset(locations.prologueEnd) - build.getLabelOffset(locations.start);
2023-03-17 16:59:30 +02:00
// Setup native execution environment
2023-03-24 10:34:14 -07:00
build.mov(rState, x0);
build.mov(rNativeContext, x3);
2023-08-18 10:06:29 -07:00
build.ldr(rGlobalState, mem(x0, offsetof(lua_State, global)));
2023-03-24 10:34:14 -07:00
build.ldr(rBase, mem(x0, offsetof(lua_State, base))); // L->base
2023-05-25 23:46:51 +03:00
2024-02-16 03:25:31 +02:00
CODEGEN_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
2023-05-25 23:46:51 +03:00
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
2023-03-17 16:59:30 +02:00
2023-03-24 10:34:14 -07:00
build.ldr(x9, mem(x0, offsetof(lua_State, ci))); // L->ci
build.ldr(x9, mem(x9, offsetof(CallInfo, func))); // L->ci->func
build.ldr(rClosure, mem(x9, offsetof(TValue, value.gc))); // L->ci->func->value.gc aka cl
// Jump to the specified instruction; further control flow will be handled with custom ABI with register setup from EmitCommonA64.h
2023-03-17 16:59:30 +02:00
build.br(x2);
// Even though we jumped away, we will return here in the end
2023-04-14 15:05:27 +03:00
locations.epilogueStart = build.setLabel();
2023-03-17 16:59:30 +02:00
// Cleanup and exit
2023-08-18 10:06:29 -07:00
build.ldr(x25, mem(sp, 64));
2023-03-24 10:34:14 -07:00
build.ldp(x23, x24, mem(sp, 48));
build.ldp(x21, x22, mem(sp, 32));
build.ldp(x19, x20, mem(sp, 16));
build.ldp(x29, x30, mem(sp)); // fp, lr
build.add(sp, sp, kStackSize);
2023-03-17 16:59:30 +02:00
build.ret();
2023-04-14 15:05:27 +03:00
// Our entry function is special, it spans the whole remaining code area
2023-05-05 12:57:12 -07:00
unwind.startFunction();
2023-08-18 10:06:29 -07:00
unwind.prologueA64(prologueSize, kStackSize, {x29, x30, x19, x20, x21, x22, x23, x24, x25});
2024-05-26 08:33:40 -07:00
unwind.finishFunction(build.getLabelOffset(locations.start), kFullBlockFunction);
2023-04-14 15:05:27 +03:00
return locations;
}
2024-03-30 15:49:03 -07:00
bool initHeaderFunctions(BaseCodeGenContext& codeGenContext)
{
AssemblyBuilderA64 build(/* logText= */ false);
UnwindBuilder& unwind = *codeGenContext.unwindBuilder.get();
unwind.startInfo(UnwindBuilder::A64);
EntryLocations entryLocations = buildEntryFunction(build, unwind);
build.finalize();
unwind.finishInfo();
CODEGEN_ASSERT(build.data.empty());
uint8_t* codeStart = nullptr;
2024-08-01 16:25:12 -07:00
if (!codeGenContext.codeAllocator.allocate(
build.data.data(),
int(build.data.size()),
reinterpret_cast<const uint8_t*>(build.code.data()),
int(build.code.size() * sizeof(build.code[0])),
codeGenContext.gateData,
codeGenContext.gateDataSize,
codeStart
))
2024-03-30 15:49:03 -07:00
{
CODEGEN_ASSERT(!"Failed to create entry function");
return false;
}
// Set the offset at the begining so that functions in new blocks will not overlay the locations
// specified by the unwind information of the entry function
unwind.setBeginOffset(build.getLabelOffset(entryLocations.prologueEnd));
codeGenContext.context.gateEntry = codeStart + build.getLabelOffset(entryLocations.start);
codeGenContext.context.gateExit = codeStart + build.getLabelOffset(entryLocations.epilogueStart);
return true;
}
2023-03-24 10:34:14 -07:00
void assembleHelpers(AssemblyBuilderA64& build, ModuleHelpers& helpers)
{
2023-08-11 15:55:30 +03:00
if (build.logText)
build.logAppend("; updatePcAndContinueInVm\n");
build.setLabel(helpers.updatePcAndContinueInVm);
emitUpdatePcForExit(build);
2023-07-07 10:14:35 -07:00
if (build.logText)
build.logAppend("; exitContinueVmClearNativeFlag\n");
build.setLabel(helpers.exitContinueVmClearNativeFlag);
emitClearNativeFlag(build);
2023-03-24 10:34:14 -07:00
if (build.logText)
build.logAppend("; exitContinueVm\n");
2023-06-16 10:01:18 -07:00
build.setLabel(helpers.exitContinueVm);
2023-03-24 10:34:14 -07:00
emitExit(build, /* continueInVm */ true);
if (build.logText)
build.logAppend("; exitNoContinueVm\n");
2023-06-16 10:01:18 -07:00
build.setLabel(helpers.exitNoContinueVm);
2023-03-24 10:34:14 -07:00
emitExit(build, /* continueInVm */ false);
2023-03-31 15:21:14 +03:00
if (build.logText)
build.logAppend("; interrupt\n");
2023-06-16 10:01:18 -07:00
build.setLabel(helpers.interrupt);
2023-03-31 15:21:14 +03:00
emitInterrupt(build);
2023-06-09 15:20:36 +03:00
if (build.logText)
build.logAppend("; return\n");
2023-06-16 10:01:18 -07:00
build.setLabel(helpers.return_);
2023-06-09 15:20:36 +03:00
emitReturn(build, helpers);
2023-10-06 10:31:16 -07:00
if (build.logText)
build.logAppend("; continueCall\n");
build.setLabel(helpers.continueCall);
emitContinueCall(build, helpers);
2023-03-24 10:34:14 -07:00
}
2023-03-17 16:59:30 +02:00
} // namespace A64
} // namespace CodeGen
} // namespace Luau