1
0
Fork 0
mirror of https://github.com/luau-lang/luau.git synced 2025-04-10 13:50:55 +01:00

Sync to upstream/release/569 ()

All of our changes this week have been focused on the new type solver
and the JIT.

As we march toward feature parity with the old solver, we've tightened
up a bunch of lingering issues with overload resolution, unsealed
tables, and type normalization. We've also fixed a bunch of crashes and
assertion failures in the new solver.

On the JIT front, we've started work on an A64 backend, improved the IR
analysis in a bunch of cases, and implemented assembly generation for
the builtin functions `type()` and `typeof()`.

---------

Co-authored-by: Arseny Kapoulkine <arseny.kapoulkine@gmail.com>
Co-authored-by: Vyacheslav Egorov <vegorov@roblox.com>
This commit is contained in:
Andy Friesen 2023-03-24 11:03:04 -07:00 committed by GitHub
parent 0d6aacf407
commit b4ebad4862
Signed by: DevComp
GPG key ID: 4AEE18F83AFDEB23
67 changed files with 1958 additions and 939 deletions

View file

@ -26,7 +26,4 @@ TypePackId clone(TypePackId tp, TypeArena& dest, CloneState& cloneState);
TypeId clone(TypeId tp, TypeArena& dest, CloneState& cloneState); TypeId clone(TypeId tp, TypeArena& dest, CloneState& cloneState);
TypeFun clone(const TypeFun& typeFun, TypeArena& dest, CloneState& cloneState); TypeFun clone(const TypeFun& typeFun, TypeArena& dest, CloneState& cloneState);
TypeId shallowClone(TypeId ty, TypeArena& dest, const TxnLog* log, bool alwaysClone = false);
TypeId shallowClone(TypeId ty, NotNull<TypeArena> dest);
} // namespace Luau } // namespace Luau

View file

@ -7,7 +7,7 @@
#include "Luau/Unifiable.h" #include "Luau/Unifiable.h"
LUAU_FASTFLAG(DebugLuauCopyBeforeNormalizing) LUAU_FASTFLAG(DebugLuauCopyBeforeNormalizing)
LUAU_FASTFLAG(LuauClonePublicInterfaceLess) LUAU_FASTFLAG(LuauClonePublicInterfaceLess2)
LUAU_FASTINTVARIABLE(LuauTypeCloneRecursionLimit, 300) LUAU_FASTINTVARIABLE(LuauTypeCloneRecursionLimit, 300)
@ -422,86 +422,4 @@ TypeFun clone(const TypeFun& typeFun, TypeArena& dest, CloneState& cloneState)
return result; return result;
} }
TypeId shallowClone(TypeId ty, TypeArena& dest, const TxnLog* log, bool alwaysClone)
{
ty = log->follow(ty);
TypeId result = ty;
if (auto pty = log->pending(ty))
ty = &pty->pending;
if (const FunctionType* ftv = get<FunctionType>(ty))
{
FunctionType clone = FunctionType{ftv->level, ftv->scope, ftv->argTypes, ftv->retTypes, ftv->definition, ftv->hasSelf};
clone.generics = ftv->generics;
clone.genericPacks = ftv->genericPacks;
clone.magicFunction = ftv->magicFunction;
clone.dcrMagicFunction = ftv->dcrMagicFunction;
clone.dcrMagicRefinement = ftv->dcrMagicRefinement;
clone.tags = ftv->tags;
clone.argNames = ftv->argNames;
result = dest.addType(std::move(clone));
}
else if (const TableType* ttv = get<TableType>(ty))
{
LUAU_ASSERT(!ttv->boundTo);
TableType clone = TableType{ttv->props, ttv->indexer, ttv->level, ttv->scope, ttv->state};
clone.definitionModuleName = ttv->definitionModuleName;
clone.definitionLocation = ttv->definitionLocation;
clone.name = ttv->name;
clone.syntheticName = ttv->syntheticName;
clone.instantiatedTypeParams = ttv->instantiatedTypeParams;
clone.instantiatedTypePackParams = ttv->instantiatedTypePackParams;
clone.tags = ttv->tags;
result = dest.addType(std::move(clone));
}
else if (const MetatableType* mtv = get<MetatableType>(ty))
{
MetatableType clone = MetatableType{mtv->table, mtv->metatable};
clone.syntheticName = mtv->syntheticName;
result = dest.addType(std::move(clone));
}
else if (const UnionType* utv = get<UnionType>(ty))
{
UnionType clone;
clone.options = utv->options;
result = dest.addType(std::move(clone));
}
else if (const IntersectionType* itv = get<IntersectionType>(ty))
{
IntersectionType clone;
clone.parts = itv->parts;
result = dest.addType(std::move(clone));
}
else if (const PendingExpansionType* petv = get<PendingExpansionType>(ty))
{
PendingExpansionType clone{petv->prefix, petv->name, petv->typeArguments, petv->packArguments};
result = dest.addType(std::move(clone));
}
else if (const ClassType* ctv = get<ClassType>(ty); FFlag::LuauClonePublicInterfaceLess && ctv && alwaysClone)
{
ClassType clone{ctv->name, ctv->props, ctv->parent, ctv->metatable, ctv->tags, ctv->userData, ctv->definitionModuleName};
result = dest.addType(std::move(clone));
}
else if (FFlag::LuauClonePublicInterfaceLess && alwaysClone)
{
result = dest.addType(*ty);
}
else if (const NegationType* ntv = get<NegationType>(ty))
{
result = dest.addType(NegationType{ntv->ty});
}
else
return result;
asMutable(result)->documentationSymbol = ty->documentationSymbol;
return result;
}
TypeId shallowClone(TypeId ty, NotNull<TypeArena> dest)
{
return shallowClone(ty, *dest, TxnLog::empty());
}
} // namespace Luau } // namespace Luau

View file

@ -1359,10 +1359,34 @@ InferencePack ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExprCa
if (argTail && args.size() < 2) if (argTail && args.size() < 2)
argTailPack = extendTypePack(*arena, builtinTypes, *argTail, 2 - args.size()); argTailPack = extendTypePack(*arena, builtinTypes, *argTail, 2 - args.size());
LUAU_ASSERT(args.size() + argTailPack.head.size() == 2); TypeId target = nullptr;
TypeId mt = nullptr;
TypeId target = args.size() > 0 ? args[0] : argTailPack.head[0]; if (args.size() + argTailPack.head.size() == 2)
TypeId mt = args.size() > 1 ? args[1] : argTailPack.head[args.size() == 0 ? 1 : 0]; {
target = args.size() > 0 ? args[0] : argTailPack.head[0];
mt = args.size() > 1 ? args[1] : argTailPack.head[args.size() == 0 ? 1 : 0];
}
else
{
std::vector<TypeId> unpackedTypes;
if (args.size() > 0)
target = args[0];
else
{
target = arena->addType(BlockedType{});
unpackedTypes.emplace_back(target);
}
mt = arena->addType(BlockedType{});
unpackedTypes.emplace_back(mt);
TypePackId mtPack = arena->addTypePack(std::move(unpackedTypes));
addConstraint(scope, call->location, UnpackConstraint{mtPack, *argTail});
}
LUAU_ASSERT(target);
LUAU_ASSERT(mt);
AstExpr* targetExpr = call->args.data[0]; AstExpr* targetExpr = call->args.data[0];
@ -2090,6 +2114,19 @@ ConstraintGraphBuilder::FunctionSignature ConstraintGraphBuilder::checkFunctionS
TypePack expectedArgPack; TypePack expectedArgPack;
const FunctionType* expectedFunction = expectedType ? get<FunctionType>(*expectedType) : nullptr; const FunctionType* expectedFunction = expectedType ? get<FunctionType>(*expectedType) : nullptr;
// This check ensures that expectedType is precisely optional and not any (since any is also an optional type)
if (expectedType && isOptional(*expectedType) && !get<AnyType>(*expectedType))
{
auto ut = get<UnionType>(*expectedType);
for (auto u : ut)
{
if (get<FunctionType>(u) && !isNil(u))
{
expectedFunction = get<FunctionType>(u);
break;
}
}
}
if (expectedFunction) if (expectedFunction)
{ {

View file

@ -3,6 +3,7 @@
#include "Luau/Anyification.h" #include "Luau/Anyification.h"
#include "Luau/ApplyTypeFunction.h" #include "Luau/ApplyTypeFunction.h"
#include "Luau/Clone.h" #include "Luau/Clone.h"
#include "Luau/Common.h"
#include "Luau/ConstraintSolver.h" #include "Luau/ConstraintSolver.h"
#include "Luau/DcrLogger.h" #include "Luau/DcrLogger.h"
#include "Luau/Instantiation.h" #include "Luau/Instantiation.h"
@ -221,17 +222,6 @@ void dump(ConstraintSolver* cs, ToStringOptions& opts)
auto it = cs->blockedConstraints.find(c); auto it = cs->blockedConstraints.find(c);
int blockCount = it == cs->blockedConstraints.end() ? 0 : int(it->second); int blockCount = it == cs->blockedConstraints.end() ? 0 : int(it->second);
printf("\t%d\t%s\n", blockCount, toString(*c, opts).c_str()); printf("\t%d\t%s\n", blockCount, toString(*c, opts).c_str());
for (NotNull<Constraint> dep : c->dependencies)
{
auto unsolvedIter = std::find(begin(cs->unsolvedConstraints), end(cs->unsolvedConstraints), dep);
if (unsolvedIter == cs->unsolvedConstraints.end())
continue;
auto it = cs->blockedConstraints.find(dep);
int blockCount = it == cs->blockedConstraints.end() ? 0 : int(it->second);
printf("\t%d\t\t%s\n", blockCount, toString(*dep, opts).c_str());
}
} }
} }
@ -578,12 +568,16 @@ bool ConstraintSolver::tryDispatch(const UnaryConstraint& c, NotNull<const Const
case AstExprUnary::Not: case AstExprUnary::Not:
{ {
asMutable(c.resultType)->ty.emplace<BoundType>(builtinTypes->booleanType); asMutable(c.resultType)->ty.emplace<BoundType>(builtinTypes->booleanType);
unblock(c.resultType);
return true; return true;
} }
case AstExprUnary::Len: case AstExprUnary::Len:
{ {
// __len must return a number. // __len must return a number.
asMutable(c.resultType)->ty.emplace<BoundType>(builtinTypes->numberType); asMutable(c.resultType)->ty.emplace<BoundType>(builtinTypes->numberType);
unblock(c.resultType);
return true; return true;
} }
case AstExprUnary::Minus: case AstExprUnary::Minus:
@ -613,6 +607,7 @@ bool ConstraintSolver::tryDispatch(const UnaryConstraint& c, NotNull<const Const
asMutable(c.resultType)->ty.emplace<BoundType>(builtinTypes->errorRecoveryType()); asMutable(c.resultType)->ty.emplace<BoundType>(builtinTypes->errorRecoveryType());
} }
unblock(c.resultType);
return true; return true;
} }
} }
@ -868,7 +863,7 @@ bool ConstraintSolver::tryDispatch(const IterableConstraint& c, NotNull<const Co
}; };
auto [iteratorTypes, iteratorTail] = flatten(c.iterator); auto [iteratorTypes, iteratorTail] = flatten(c.iterator);
if (iteratorTail) if (iteratorTail && isBlocked(*iteratorTail))
return block_(*iteratorTail); return block_(*iteratorTail);
{ {
@ -1249,35 +1244,63 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
*asMutable(follow(*ty)) = BoundType{builtinTypes->anyType}; *asMutable(follow(*ty)) = BoundType{builtinTypes->anyType};
} }
TypeId instantiatedTy = arena->addType(BlockedType{});
TypeId inferredTy = arena->addType(FunctionType{TypeLevel{}, constraint->scope.get(), argsPack, c.result}); TypeId inferredTy = arena->addType(FunctionType{TypeLevel{}, constraint->scope.get(), argsPack, c.result});
auto pushConstraintGreedy = [this, constraint](ConstraintV cv) -> Constraint* { std::vector<TypeId> overloads = flattenIntersection(fn);
std::unique_ptr<Constraint> c = std::make_unique<Constraint>(constraint->scope, constraint->location, std::move(cv));
NotNull<Constraint> borrow{c.get()};
bool ok = tryDispatch(borrow, false); Instantiation inst(TxnLog::empty(), arena, TypeLevel{}, constraint->scope);
if (ok)
return nullptr;
solverConstraints.push_back(std::move(c)); for (TypeId overload : overloads)
unsolvedConstraints.push_back(borrow); {
overload = follow(overload);
return borrow; std::optional<TypeId> instantiated = inst.substitute(overload);
}; LUAU_ASSERT(instantiated); // TODO FIXME HANDLE THIS
// HACK: We don't want other constraints to act on the free type pack Unifier u{normalizer, Mode::Strict, constraint->scope, Location{}, Covariant};
// created above until after these two constraints are solved, so we try to u.useScopes = true;
// dispatch them directly.
auto ic = pushConstraintGreedy(InstantiationConstraint{instantiatedTy, fn}); u.tryUnify(*instantiated, inferredTy, /* isFunctionCall */ true);
auto sc = pushConstraintGreedy(SubtypeConstraint{instantiatedTy, inferredTy});
if (ic) if (!u.blockedTypes.empty() || !u.blockedTypePacks.empty())
inheritBlocks(constraint, NotNull{ic}); {
for (TypeId bt : u.blockedTypes)
block(bt, constraint);
for (TypePackId btp : u.blockedTypePacks)
block(btp, constraint);
return false;
}
if (sc) if (const auto& e = hasUnificationTooComplex(u.errors))
inheritBlocks(constraint, NotNull{sc}); reportError(*e);
if (u.errors.empty())
{
// We found a matching overload.
const auto [changedTypes, changedPacks] = u.log.getChanges();
u.log.commit();
unblock(changedTypes);
unblock(changedPacks);
unblock(c.result);
return true;
}
}
// We found no matching overloads.
Unifier u{normalizer, Mode::Strict, constraint->scope, Location{}, Covariant};
u.useScopes = true;
u.tryUnify(inferredTy, builtinTypes->anyType);
u.tryUnify(fn, builtinTypes->anyType);
LUAU_ASSERT(u.errors.empty()); // unifying with any should never fail
const auto [changedTypes, changedPacks] = u.log.getChanges();
u.log.commit();
unblock(changedTypes);
unblock(changedPacks);
unblock(c.result); unblock(c.result);
return true; return true;
@ -1291,6 +1314,7 @@ bool ConstraintSolver::tryDispatch(const PrimitiveTypeConstraint& c, NotNull<con
TypeId bindTo = maybeSingleton(expectedType) ? c.singletonType : c.multitonType; TypeId bindTo = maybeSingleton(expectedType) ? c.singletonType : c.multitonType;
asMutable(c.resultType)->ty.emplace<BoundType>(bindTo); asMutable(c.resultType)->ty.emplace<BoundType>(bindTo);
unblock(c.resultType);
return true; return true;
} }
@ -1335,20 +1359,17 @@ static bool isUnsealedTable(TypeId ty)
} }
/** /**
* Create a shallow copy of `ty` and its properties along `path`. Insert a new * Given a path into a set of nested unsealed tables `ty`, insert a new property `replaceTy` as the leaf-most property.
* property (the last segment of `path`) into the tail table with the value `t`.
* *
* On success, returns the new outermost table type. If the root table or any * Fails and does nothing if every table along the way is not unsealed.
* of its subkeys are not unsealed tables, the function fails and returns
* std::nullopt.
* *
* TODO: Prove that we completely give up in the face of indexers and * Mutates the innermost table type in-place.
* metatables.
*/ */
static std::optional<TypeId> updateTheTableType(NotNull<TypeArena> arena, TypeId ty, const std::vector<std::string>& path, TypeId replaceTy) static void updateTheTableType(
NotNull<BuiltinTypes> builtinTypes, NotNull<TypeArena> arena, TypeId ty, const std::vector<std::string>& path, TypeId replaceTy)
{ {
if (path.empty()) if (path.empty())
return std::nullopt; return;
// First walk the path and ensure that it's unsealed tables all the way // First walk the path and ensure that it's unsealed tables all the way
// to the end. // to the end.
@ -1357,12 +1378,12 @@ static std::optional<TypeId> updateTheTableType(NotNull<TypeArena> arena, TypeId
for (size_t i = 0; i < path.size() - 1; ++i) for (size_t i = 0; i < path.size() - 1; ++i)
{ {
if (!isUnsealedTable(t)) if (!isUnsealedTable(t))
return std::nullopt; return;
const TableType* tbl = get<TableType>(t); const TableType* tbl = get<TableType>(t);
auto it = tbl->props.find(path[i]); auto it = tbl->props.find(path[i]);
if (it == tbl->props.end()) if (it == tbl->props.end())
return std::nullopt; return;
t = follow(it->second.type); t = follow(it->second.type);
} }
@ -1371,40 +1392,37 @@ static std::optional<TypeId> updateTheTableType(NotNull<TypeArena> arena, TypeId
// We are not changing property types. We are only admitting this one // We are not changing property types. We are only admitting this one
// new property to be appended. // new property to be appended.
if (!isUnsealedTable(t)) if (!isUnsealedTable(t))
return std::nullopt; return;
const TableType* tbl = get<TableType>(t); const TableType* tbl = get<TableType>(t);
if (0 != tbl->props.count(path.back())) if (0 != tbl->props.count(path.back()))
return std::nullopt; return;
} }
const TypeId res = shallowClone(ty, arena); TypeId t = ty;
TypeId t = res; ErrorVec dummy;
for (size_t i = 0; i < path.size() - 1; ++i) for (size_t i = 0; i < path.size() - 1; ++i)
{ {
const std::string segment = path[i]; auto propTy = findTablePropertyRespectingMeta(builtinTypes, dummy, t, path[i], Location{});
dummy.clear();
TableType* ttv = getMutable<TableType>(t); if (!propTy)
LUAU_ASSERT(ttv); return;
auto propIt = ttv->props.find(segment); t = *propTy;
if (propIt != ttv->props.end())
{
LUAU_ASSERT(isUnsealedTable(propIt->second.type));
t = shallowClone(follow(propIt->second.type), arena);
ttv->props[segment].type = t;
}
else
return std::nullopt;
} }
TableType* ttv = getMutable<TableType>(t); const std::string& lastSegment = path.back();
LUAU_ASSERT(ttv);
const std::string lastSegment = path.back(); t = follow(t);
LUAU_ASSERT(0 == ttv->props.count(lastSegment)); TableType* tt = getMutable<TableType>(t);
ttv->props[lastSegment] = Property{replaceTy}; if (auto mt = get<MetatableType>(t))
return res; tt = getMutable<TableType>(mt->table);
if (!tt)
return;
tt->props[lastSegment].type = replaceTy;
} }
bool ConstraintSolver::tryDispatch(const SetPropConstraint& c, NotNull<const Constraint> constraint, bool force) bool ConstraintSolver::tryDispatch(const SetPropConstraint& c, NotNull<const Constraint> constraint, bool force)
@ -1443,6 +1461,7 @@ bool ConstraintSolver::tryDispatch(const SetPropConstraint& c, NotNull<const Con
if (!isBlocked(c.propType)) if (!isBlocked(c.propType))
unify(c.propType, *existingPropType, constraint->scope); unify(c.propType, *existingPropType, constraint->scope);
bind(c.resultType, c.subjectType); bind(c.resultType, c.subjectType);
unblock(c.resultType);
return true; return true;
} }
@ -1467,6 +1486,8 @@ bool ConstraintSolver::tryDispatch(const SetPropConstraint& c, NotNull<const Con
bind(subjectType, ty); bind(subjectType, ty);
if (follow(c.resultType) != follow(ty)) if (follow(c.resultType) != follow(ty))
bind(c.resultType, ty); bind(c.resultType, ty);
unblock(subjectType);
unblock(c.resultType);
return true; return true;
} }
else if (auto ttv = getMutable<TableType>(subjectType)) else if (auto ttv = getMutable<TableType>(subjectType))
@ -1477,20 +1498,23 @@ bool ConstraintSolver::tryDispatch(const SetPropConstraint& c, NotNull<const Con
ttv->props[c.path[0]] = Property{c.propType}; ttv->props[c.path[0]] = Property{c.propType};
bind(c.resultType, c.subjectType); bind(c.resultType, c.subjectType);
unblock(c.resultType);
return true; return true;
} }
else if (ttv->state == TableState::Unsealed) else if (ttv->state == TableState::Unsealed)
{ {
LUAU_ASSERT(!subjectType->persistent); LUAU_ASSERT(!subjectType->persistent);
std::optional<TypeId> augmented = updateTheTableType(NotNull{arena}, subjectType, c.path, c.propType); updateTheTableType(builtinTypes, NotNull{arena}, subjectType, c.path, c.propType);
bind(c.resultType, augmented.value_or(subjectType)); bind(c.resultType, c.subjectType);
bind(subjectType, c.resultType); unblock(subjectType);
unblock(c.resultType);
return true; return true;
} }
else else
{ {
bind(c.resultType, subjectType); bind(c.resultType, subjectType);
unblock(c.resultType);
return true; return true;
} }
} }
@ -1499,6 +1523,7 @@ bool ConstraintSolver::tryDispatch(const SetPropConstraint& c, NotNull<const Con
// Other kinds of types don't change shape when properties are assigned // Other kinds of types don't change shape when properties are assigned
// to them. (if they allow properties at all!) // to them. (if they allow properties at all!)
bind(c.resultType, subjectType); bind(c.resultType, subjectType);
unblock(c.resultType);
return true; return true;
} }
} }

View file

@ -32,7 +32,6 @@ LUAU_FASTFLAGVARIABLE(LuauKnowsTheDataModel3, false)
LUAU_FASTFLAGVARIABLE(LuauLintInTypecheck, false) LUAU_FASTFLAGVARIABLE(LuauLintInTypecheck, false)
LUAU_FASTINTVARIABLE(LuauAutocompleteCheckTimeoutMs, 100) LUAU_FASTINTVARIABLE(LuauAutocompleteCheckTimeoutMs, 100)
LUAU_FASTFLAGVARIABLE(DebugLuauDeferredConstraintResolution, false) LUAU_FASTFLAGVARIABLE(DebugLuauDeferredConstraintResolution, false)
LUAU_FASTFLAGVARIABLE(LuauDefinitionFileSourceModule, false)
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJson, false); LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJson, false);
namespace Luau namespace Luau
@ -144,70 +143,9 @@ LoadDefinitionFileResult Frontend::loadDefinitionFile(std::string_view source, c
return LoadDefinitionFileResult{true, parseResult, sourceModule, checkedModule}; return LoadDefinitionFileResult{true, parseResult, sourceModule, checkedModule};
} }
LoadDefinitionFileResult loadDefinitionFile_DEPRECATED(
TypeChecker& typeChecker, GlobalTypes& globals, ScopePtr targetScope, std::string_view source, const std::string& packageName)
{
LUAU_TIMETRACE_SCOPE("loadDefinitionFile", "Frontend");
Luau::Allocator allocator;
Luau::AstNameTable names(allocator);
ParseOptions options;
options.allowDeclarationSyntax = true;
Luau::ParseResult parseResult = Luau::Parser::parse(source.data(), source.size(), names, allocator, options);
if (parseResult.errors.size() > 0)
return LoadDefinitionFileResult{false, parseResult, {}, nullptr};
Luau::SourceModule module;
module.root = parseResult.root;
module.mode = Mode::Definition;
ModulePtr checkedModule = typeChecker.check(module, Mode::Definition);
if (checkedModule->errors.size() > 0)
return LoadDefinitionFileResult{false, parseResult, {}, checkedModule};
CloneState cloneState;
std::vector<TypeId> typesToPersist;
typesToPersist.reserve(checkedModule->declaredGlobals.size() + checkedModule->exportedTypeBindings.size());
for (const auto& [name, ty] : checkedModule->declaredGlobals)
{
TypeId globalTy = clone(ty, globals.globalTypes, cloneState);
std::string documentationSymbol = packageName + "/global/" + name;
generateDocumentationSymbols(globalTy, documentationSymbol);
targetScope->bindings[globals.globalNames.names->getOrAdd(name.c_str())] = {globalTy, Location(), false, {}, documentationSymbol};
typesToPersist.push_back(globalTy);
}
for (const auto& [name, ty] : checkedModule->exportedTypeBindings)
{
TypeFun globalTy = clone(ty, globals.globalTypes, cloneState);
std::string documentationSymbol = packageName + "/globaltype/" + name;
generateDocumentationSymbols(globalTy.type, documentationSymbol);
targetScope->exportedTypeBindings[name] = globalTy;
typesToPersist.push_back(globalTy.type);
}
for (TypeId ty : typesToPersist)
{
persist(ty);
}
return LoadDefinitionFileResult{true, parseResult, {}, checkedModule};
}
LoadDefinitionFileResult loadDefinitionFile(TypeChecker& typeChecker, GlobalTypes& globals, ScopePtr targetScope, std::string_view source, LoadDefinitionFileResult loadDefinitionFile(TypeChecker& typeChecker, GlobalTypes& globals, ScopePtr targetScope, std::string_view source,
const std::string& packageName, bool captureComments) const std::string& packageName, bool captureComments)
{ {
if (!FFlag::LuauDefinitionFileSourceModule)
return loadDefinitionFile_DEPRECATED(typeChecker, globals, targetScope, source, packageName);
LUAU_TIMETRACE_SCOPE("loadDefinitionFile", "Frontend"); LUAU_TIMETRACE_SCOPE("loadDefinitionFile", "Frontend");
Luau::SourceModule sourceModule; Luau::SourceModule sourceModule;

View file

@ -127,7 +127,7 @@ TypeId ReplaceGenerics::clean(TypeId ty)
TypePackId ReplaceGenerics::clean(TypePackId tp) TypePackId ReplaceGenerics::clean(TypePackId tp)
{ {
LUAU_ASSERT(isDirty(tp)); LUAU_ASSERT(isDirty(tp));
return addTypePack(TypePackVar(FreeTypePack{level})); return addTypePack(TypePackVar(FreeTypePack{scope, level}));
} }
} // namespace Luau } // namespace Luau

View file

@ -16,7 +16,7 @@
#include <algorithm> #include <algorithm>
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution); LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAGVARIABLE(LuauClonePublicInterfaceLess, false); LUAU_FASTFLAGVARIABLE(LuauClonePublicInterfaceLess2, false);
LUAU_FASTFLAG(LuauSubstitutionReentrant); LUAU_FASTFLAG(LuauSubstitutionReentrant);
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution); LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution);
LUAU_FASTFLAG(LuauSubstitutionFixMissingFields); LUAU_FASTFLAG(LuauSubstitutionFixMissingFields);
@ -194,7 +194,7 @@ void Module::clonePublicInterface(NotNull<BuiltinTypes> builtinTypes, InternalEr
TxnLog log; TxnLog log;
ClonePublicInterface clonePublicInterface{&log, builtinTypes, this}; ClonePublicInterface clonePublicInterface{&log, builtinTypes, this};
if (FFlag::LuauClonePublicInterfaceLess) if (FFlag::LuauClonePublicInterfaceLess2)
returnType = clonePublicInterface.cloneTypePack(returnType); returnType = clonePublicInterface.cloneTypePack(returnType);
else else
returnType = clone(returnType, interfaceTypes, cloneState); returnType = clone(returnType, interfaceTypes, cloneState);
@ -202,7 +202,7 @@ void Module::clonePublicInterface(NotNull<BuiltinTypes> builtinTypes, InternalEr
moduleScope->returnType = returnType; moduleScope->returnType = returnType;
if (varargPack) if (varargPack)
{ {
if (FFlag::LuauClonePublicInterfaceLess) if (FFlag::LuauClonePublicInterfaceLess2)
varargPack = clonePublicInterface.cloneTypePack(*varargPack); varargPack = clonePublicInterface.cloneTypePack(*varargPack);
else else
varargPack = clone(*varargPack, interfaceTypes, cloneState); varargPack = clone(*varargPack, interfaceTypes, cloneState);
@ -211,7 +211,7 @@ void Module::clonePublicInterface(NotNull<BuiltinTypes> builtinTypes, InternalEr
for (auto& [name, tf] : moduleScope->exportedTypeBindings) for (auto& [name, tf] : moduleScope->exportedTypeBindings)
{ {
if (FFlag::LuauClonePublicInterfaceLess) if (FFlag::LuauClonePublicInterfaceLess2)
tf = clonePublicInterface.cloneTypeFun(tf); tf = clonePublicInterface.cloneTypeFun(tf);
else else
tf = clone(tf, interfaceTypes, cloneState); tf = clone(tf, interfaceTypes, cloneState);
@ -219,7 +219,7 @@ void Module::clonePublicInterface(NotNull<BuiltinTypes> builtinTypes, InternalEr
for (auto& [name, ty] : declaredGlobals) for (auto& [name, ty] : declaredGlobals)
{ {
if (FFlag::LuauClonePublicInterfaceLess) if (FFlag::LuauClonePublicInterfaceLess2)
ty = clonePublicInterface.cloneType(ty); ty = clonePublicInterface.cloneType(ty);
else else
ty = clone(ty, interfaceTypes, cloneState); ty = clone(ty, interfaceTypes, cloneState);

View file

@ -533,7 +533,7 @@ static bool areNormalizedClasses(const NormalizedClassType& tys)
static bool isPlainTyvar(TypeId ty) static bool isPlainTyvar(TypeId ty)
{ {
return (get<FreeType>(ty) || get<GenericType>(ty) || (FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(ty))); return (get<FreeType>(ty) || get<GenericType>(ty) || (FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(ty)) || get<PendingExpansionType>(ty));
} }
static bool isNormalizedTyvar(const NormalizedTyvars& tyvars) static bool isNormalizedTyvar(const NormalizedTyvars& tyvars)
@ -1380,7 +1380,8 @@ bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, int ignor
} }
else if (FFlag::LuauTransitiveSubtyping && get<UnknownType>(here.tops)) else if (FFlag::LuauTransitiveSubtyping && get<UnknownType>(here.tops))
return true; return true;
else if (get<GenericType>(there) || get<FreeType>(there) || (FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(there))) else if (get<GenericType>(there) || get<FreeType>(there) || (FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(there)) ||
get<PendingExpansionType>(there))
{ {
if (tyvarIndex(there) <= ignoreSmallerTyvars) if (tyvarIndex(there) <= ignoreSmallerTyvars)
return true; return true;
@ -1460,6 +1461,10 @@ bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, int ignor
} }
else if (!FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(there)) else if (!FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(there))
LUAU_ASSERT(!"Internal error: Trying to normalize a BlockedType"); LUAU_ASSERT(!"Internal error: Trying to normalize a BlockedType");
else if (get<PendingExpansionType>(there))
{
// nothing
}
else else
LUAU_ASSERT(!"Unreachable"); LUAU_ASSERT(!"Unreachable");
@ -2544,7 +2549,8 @@ bool Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there)
return false; return false;
return true; return true;
} }
else if (get<GenericType>(there) || get<FreeType>(there) || (FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(there))) else if (get<GenericType>(there) || get<FreeType>(there) || (FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(there)) ||
get<PendingExpansionType>(there))
{ {
NormalizedType thereNorm{builtinTypes}; NormalizedType thereNorm{builtinTypes};
NormalizedType topNorm{builtinTypes}; NormalizedType topNorm{builtinTypes};
@ -2856,7 +2862,8 @@ bool isConsistentSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, Not
return ok; return ok;
} }
bool isConsistentSubtype(TypePackId subPack, TypePackId superPack, NotNull<Scope> scope, NotNull<BuiltinTypes> builtinTypes, InternalErrorReporter& ice) bool isConsistentSubtype(
TypePackId subPack, TypePackId superPack, NotNull<Scope> scope, NotNull<BuiltinTypes> builtinTypes, InternalErrorReporter& ice)
{ {
UnifierSharedState sharedState{&ice}; UnifierSharedState sharedState{&ice};
TypeArena arena; TypeArena arena;

View file

@ -9,7 +9,7 @@
#include <stdexcept> #include <stdexcept>
LUAU_FASTFLAGVARIABLE(LuauSubstitutionFixMissingFields, false) LUAU_FASTFLAGVARIABLE(LuauSubstitutionFixMissingFields, false)
LUAU_FASTFLAG(LuauClonePublicInterfaceLess) LUAU_FASTFLAG(LuauClonePublicInterfaceLess2)
LUAU_FASTINTVARIABLE(LuauTarjanChildLimit, 10000) LUAU_FASTINTVARIABLE(LuauTarjanChildLimit, 10000)
LUAU_FASTFLAGVARIABLE(LuauClassTypeVarsInSubstitution, false) LUAU_FASTFLAGVARIABLE(LuauClassTypeVarsInSubstitution, false)
LUAU_FASTFLAGVARIABLE(LuauSubstitutionReentrant, false) LUAU_FASTFLAGVARIABLE(LuauSubstitutionReentrant, false)
@ -17,6 +17,181 @@ LUAU_FASTFLAGVARIABLE(LuauSubstitutionReentrant, false)
namespace Luau namespace Luau
{ {
static TypeId DEPRECATED_shallowClone(TypeId ty, TypeArena& dest, const TxnLog* log, bool alwaysClone)
{
ty = log->follow(ty);
TypeId result = ty;
if (auto pty = log->pending(ty))
ty = &pty->pending;
if (const FunctionType* ftv = get<FunctionType>(ty))
{
FunctionType clone = FunctionType{ftv->level, ftv->scope, ftv->argTypes, ftv->retTypes, ftv->definition, ftv->hasSelf};
clone.generics = ftv->generics;
clone.genericPacks = ftv->genericPacks;
clone.magicFunction = ftv->magicFunction;
clone.dcrMagicFunction = ftv->dcrMagicFunction;
clone.dcrMagicRefinement = ftv->dcrMagicRefinement;
clone.tags = ftv->tags;
clone.argNames = ftv->argNames;
result = dest.addType(std::move(clone));
}
else if (const TableType* ttv = get<TableType>(ty))
{
LUAU_ASSERT(!ttv->boundTo);
TableType clone = TableType{ttv->props, ttv->indexer, ttv->level, ttv->scope, ttv->state};
clone.definitionModuleName = ttv->definitionModuleName;
clone.definitionLocation = ttv->definitionLocation;
clone.name = ttv->name;
clone.syntheticName = ttv->syntheticName;
clone.instantiatedTypeParams = ttv->instantiatedTypeParams;
clone.instantiatedTypePackParams = ttv->instantiatedTypePackParams;
clone.tags = ttv->tags;
result = dest.addType(std::move(clone));
}
else if (const MetatableType* mtv = get<MetatableType>(ty))
{
MetatableType clone = MetatableType{mtv->table, mtv->metatable};
clone.syntheticName = mtv->syntheticName;
result = dest.addType(std::move(clone));
}
else if (const UnionType* utv = get<UnionType>(ty))
{
UnionType clone;
clone.options = utv->options;
result = dest.addType(std::move(clone));
}
else if (const IntersectionType* itv = get<IntersectionType>(ty))
{
IntersectionType clone;
clone.parts = itv->parts;
result = dest.addType(std::move(clone));
}
else if (const PendingExpansionType* petv = get<PendingExpansionType>(ty))
{
PendingExpansionType clone{petv->prefix, petv->name, petv->typeArguments, petv->packArguments};
result = dest.addType(std::move(clone));
}
else if (const NegationType* ntv = get<NegationType>(ty))
{
result = dest.addType(NegationType{ntv->ty});
}
else
return result;
asMutable(result)->documentationSymbol = ty->documentationSymbol;
return result;
}
static TypeId shallowClone(TypeId ty, TypeArena& dest, const TxnLog* log, bool alwaysClone)
{
if (!FFlag::LuauClonePublicInterfaceLess2)
return DEPRECATED_shallowClone(ty, dest, log, alwaysClone);
auto go = [ty, &dest, alwaysClone](auto&& a) {
using T = std::decay_t<decltype(a)>;
if constexpr (std::is_same_v<T, FreeType>)
return ty;
else if constexpr (std::is_same_v<T, BoundType>)
{
// This should never happen, but visit() cannot see it.
LUAU_ASSERT(!"shallowClone didn't follow its argument!");
return dest.addType(BoundType{a.boundTo});
}
else if constexpr (std::is_same_v<T, GenericType>)
return dest.addType(a);
else if constexpr (std::is_same_v<T, BlockedType>)
return ty;
else if constexpr (std::is_same_v<T, PrimitiveType>)
return ty;
else if constexpr (std::is_same_v<T, PendingExpansionType>)
return ty;
else if constexpr (std::is_same_v<T, AnyType>)
return ty;
else if constexpr (std::is_same_v<T, ErrorType>)
return ty;
else if constexpr (std::is_same_v<T, UnknownType>)
return ty;
else if constexpr (std::is_same_v<T, NeverType>)
return ty;
else if constexpr (std::is_same_v<T, LazyType>)
return ty;
else if constexpr (std::is_same_v<T, SingletonType>)
return dest.addType(a);
else if constexpr (std::is_same_v<T, FunctionType>)
{
FunctionType clone = FunctionType{a.level, a.scope, a.argTypes, a.retTypes, a.definition, a.hasSelf};
clone.generics = a.generics;
clone.genericPacks = a.genericPacks;
clone.magicFunction = a.magicFunction;
clone.dcrMagicFunction = a.dcrMagicFunction;
clone.dcrMagicRefinement = a.dcrMagicRefinement;
clone.tags = a.tags;
clone.argNames = a.argNames;
return dest.addType(std::move(clone));
}
else if constexpr (std::is_same_v<T, TableType>)
{
LUAU_ASSERT(!a.boundTo);
TableType clone = TableType{a.props, a.indexer, a.level, a.scope, a.state};
clone.definitionModuleName = a.definitionModuleName;
clone.definitionLocation = a.definitionLocation;
clone.name = a.name;
clone.syntheticName = a.syntheticName;
clone.instantiatedTypeParams = a.instantiatedTypeParams;
clone.instantiatedTypePackParams = a.instantiatedTypePackParams;
clone.tags = a.tags;
return dest.addType(std::move(clone));
}
else if constexpr (std::is_same_v<T, MetatableType>)
{
MetatableType clone = MetatableType{a.table, a.metatable};
clone.syntheticName = a.syntheticName;
return dest.addType(std::move(clone));
}
else if constexpr (std::is_same_v<T, UnionType>)
{
UnionType clone;
clone.options = a.options;
return dest.addType(std::move(clone));
}
else if constexpr (std::is_same_v<T, IntersectionType>)
{
IntersectionType clone;
clone.parts = a.parts;
return dest.addType(std::move(clone));
}
else if constexpr (std::is_same_v<T, ClassType>)
{
if (alwaysClone)
{
ClassType clone{a.name, a.props, a.parent, a.metatable, a.tags, a.userData, a.definitionModuleName};
return dest.addType(std::move(clone));
}
else
return ty;
}
else if constexpr (std::is_same_v<T, NegationType>)
return dest.addType(NegationType{a.ty});
else
static_assert(always_false_v<T>, "Non-exhaustive shallowClone switch");
};
ty = log->follow(ty);
if (auto pty = log->pending(ty))
ty = &pty->pending;
TypeId resTy = visit(go, ty->ty);
if (resTy != ty)
asMutable(resTy)->documentationSymbol = ty->documentationSymbol;
return resTy;
}
void Tarjan::visitChildren(TypeId ty, int index) void Tarjan::visitChildren(TypeId ty, int index)
{ {
LUAU_ASSERT(ty == log->follow(ty)); LUAU_ASSERT(ty == log->follow(ty));
@ -469,7 +644,7 @@ std::optional<TypePackId> Substitution::substitute(TypePackId tp)
TypeId Substitution::clone(TypeId ty) TypeId Substitution::clone(TypeId ty)
{ {
return shallowClone(ty, *arena, log, /* alwaysClone */ FFlag::LuauClonePublicInterfaceLess); return shallowClone(ty, *arena, log, /* alwaysClone */ FFlag::LuauClonePublicInterfaceLess2);
} }
TypePackId Substitution::clone(TypePackId tp) TypePackId Substitution::clone(TypePackId tp)
@ -494,7 +669,7 @@ TypePackId Substitution::clone(TypePackId tp)
clone.hidden = vtp->hidden; clone.hidden = vtp->hidden;
return addTypePack(std::move(clone)); return addTypePack(std::move(clone));
} }
else if (FFlag::LuauClonePublicInterfaceLess) else if (FFlag::LuauClonePublicInterfaceLess2)
{ {
return addTypePack(*tp); return addTypePack(*tp);
} }

View file

@ -85,6 +85,11 @@ struct FindCyclicTypes final : TypeVisitor
{ {
return false; return false;
} }
bool visit(TypeId, const PendingExpansionType&) override
{
return false;
}
}; };
template<typename TID> template<typename TID>
@ -1518,7 +1523,7 @@ std::string toString(const Constraint& constraint, ToStringOptions& opts)
} }
else if constexpr (std::is_same_v<T, FunctionCallConstraint>) else if constexpr (std::is_same_v<T, FunctionCallConstraint>)
{ {
return "call " + tos(c.fn) + " with { result = " + tos(c.result) + " }"; return "call " + tos(c.fn) + "( " + tos(c.argsPack) + " )" + " with { result = " + tos(c.result) + " }";
} }
else if constexpr (std::is_same_v<T, PrimitiveTypeConstraint>) else if constexpr (std::is_same_v<T, PrimitiveTypeConstraint>)
{ {

View file

@ -26,7 +26,6 @@ LUAU_FASTINTVARIABLE(LuauTableTypeMaximumStringifierLength, 0)
LUAU_FASTINT(LuauTypeInferRecursionLimit) LUAU_FASTINT(LuauTypeInferRecursionLimit)
LUAU_FASTFLAG(LuauInstantiateInSubtyping) LUAU_FASTFLAG(LuauInstantiateInSubtyping)
LUAU_FASTFLAG(LuauNormalizeBlockedTypes) LUAU_FASTFLAG(LuauNormalizeBlockedTypes)
LUAU_FASTFLAGVARIABLE(LuauMatchReturnsOptionalString, false);
namespace Luau namespace Luau
{ {
@ -1219,12 +1218,12 @@ static std::vector<TypeId> parsePatternString(NotNull<BuiltinTypes> builtinTypes
if (i + 1 < size && data[i + 1] == ')') if (i + 1 < size && data[i + 1] == ')')
{ {
i++; i++;
result.push_back(FFlag::LuauMatchReturnsOptionalString ? builtinTypes->optionalNumberType : builtinTypes->numberType); result.push_back(builtinTypes->optionalNumberType);
continue; continue;
} }
++depth; ++depth;
result.push_back(FFlag::LuauMatchReturnsOptionalString ? builtinTypes->optionalStringType : builtinTypes->stringType); result.push_back(builtinTypes->optionalStringType);
} }
else if (data[i] == ')') else if (data[i] == ')')
{ {
@ -1242,7 +1241,7 @@ static std::vector<TypeId> parsePatternString(NotNull<BuiltinTypes> builtinTypes
return std::vector<TypeId>(); return std::vector<TypeId>();
if (result.empty()) if (result.empty())
result.push_back(FFlag::LuauMatchReturnsOptionalString ? builtinTypes->optionalStringType : builtinTypes->stringType); result.push_back(builtinTypes->optionalStringType);
return result; return result;
} }

View file

@ -568,6 +568,10 @@ struct TypeChecker2
{ {
// nothing // nothing
} }
else if (isOptional(iteratorTy))
{
reportError(OptionalValueAccess{iteratorTy}, forInStatement->values.data[0]->location);
}
else if (std::optional<TypeId> iterMmTy = else if (std::optional<TypeId> iterMmTy =
findMetatableEntry(builtinTypes, module->errors, iteratorTy, "__iter", forInStatement->values.data[0]->location)) findMetatableEntry(builtinTypes, module->errors, iteratorTy, "__iter", forInStatement->values.data[0]->location))
{ {
@ -973,6 +977,12 @@ struct TypeChecker2
else if (auto utv = get<UnionType>(functionType)) else if (auto utv = get<UnionType>(functionType))
{ {
// Sometimes it's okay to call a union of functions, but only if all of the functions are the same. // Sometimes it's okay to call a union of functions, but only if all of the functions are the same.
// Another scenario we might run into it is if the union has a nil member. In this case, we want to throw an error
if (isOptional(functionType))
{
reportError(OptionalValueAccess{functionType}, call->location);
return;
}
std::optional<TypeId> fst; std::optional<TypeId> fst;
for (TypeId ty : utv) for (TypeId ty : utv)
{ {
@ -1187,6 +1197,8 @@ struct TypeChecker2
else else
reportError(CannotExtendTable{exprType, CannotExtendTable::Indexer, "indexer??"}, indexExpr->location); reportError(CannotExtendTable{exprType, CannotExtendTable::Indexer, "indexer??"}, indexExpr->location);
} }
else if (get<UnionType>(exprType) && isOptional(exprType))
reportError(OptionalValueAccess{exprType}, indexExpr->location);
} }
void visit(AstExprFunction* fn) void visit(AstExprFunction* fn)
@ -1297,8 +1309,12 @@ struct TypeChecker2
DenseHashSet<TypeId> seen{nullptr}; DenseHashSet<TypeId> seen{nullptr};
int recursionCount = 0; int recursionCount = 0;
if (!hasLength(operandType, seen, &recursionCount)) if (!hasLength(operandType, seen, &recursionCount))
{ {
if (isOptional(operandType))
reportError(OptionalValueAccess{operandType}, expr->location);
else
reportError(NotATable{operandType}, expr->location); reportError(NotATable{operandType}, expr->location);
} }
} }

View file

@ -689,11 +689,10 @@ LUAU_NOINLINE void TypeChecker::checkBlockTypeAliases(const ScopePtr& scope, std
if (duplicateTypeAliases.contains({typealias->exported, name})) if (duplicateTypeAliases.contains({typealias->exported, name}))
continue; continue;
TypeId type = bindings[name].type; TypeId type = follow(bindings[name].type);
if (get<FreeType>(follow(type))) if (get<FreeType>(type))
{ {
Type* mty = asMutable(follow(type)); asMutable(type)->ty.emplace<BoundType>(errorRecoveryType(anyType));
mty->reassign(*errorRecoveryType(anyType));
reportError(TypeError{typealias->location, OccursCheckFailed{}}); reportError(TypeError{typealias->location, OccursCheckFailed{}});
} }

View file

@ -331,7 +331,7 @@ TypeId TypeReducer::reduce(TypeId ty)
if (edge->irreducible) if (edge->irreducible)
return edge->type; return edge->type;
else else
ty = edge->type; ty = follow(edge->type);
} }
else if (cyclics->contains(ty)) else if (cyclics->contains(ty))
return ty; return ty;

View file

@ -192,6 +192,18 @@ struct SkipCacheForType final : TypeOnceVisitor
return false; return false;
} }
bool visit(TypeId, const BlockedType&) override
{
result = true;
return false;
}
bool visit(TypeId, const PendingExpansionType&) override
{
result = true;
return false;
}
bool visit(TypeId ty, const TableType&) override bool visit(TypeId ty, const TableType&) override
{ {
// Types from other modules don't contain mutable elements and are ok to cache // Types from other modules don't contain mutable elements and are ok to cache
@ -259,6 +271,12 @@ struct SkipCacheForType final : TypeOnceVisitor
return false; return false;
} }
bool visit(TypePackId tp, const BlockedTypePack&) override
{
result = true;
return false;
}
const DenseHashMap<TypeId, bool>& skipCacheForType; const DenseHashMap<TypeId, bool>& skipCacheForType;
const TypeArena* typeArena = nullptr; const TypeArena* typeArena = nullptr;
bool result = false; bool result = false;
@ -386,6 +404,12 @@ void Unifier::tryUnify(TypeId subTy, TypeId superTy, bool isFunctionCall, bool i
tryUnify_(subTy, superTy, isFunctionCall, isIntersection); tryUnify_(subTy, superTy, isFunctionCall, isIntersection);
} }
static bool isBlocked(const TxnLog& log, TypeId ty)
{
ty = log.follow(ty);
return get<BlockedType>(ty) || get<PendingExpansionType>(ty);
}
void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool isIntersection) void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool isIntersection)
{ {
RecursionLimiter _ra(&sharedState.counters.recursionCount, sharedState.counters.recursionLimit); RecursionLimiter _ra(&sharedState.counters.recursionCount, sharedState.counters.recursionLimit);
@ -531,11 +555,15 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
size_t errorCount = errors.size(); size_t errorCount = errors.size();
if (log.getMutable<BlockedType>(subTy) && log.getMutable<BlockedType>(superTy)) if (isBlocked(log, subTy) && isBlocked(log, superTy))
{ {
blockedTypes.push_back(subTy); blockedTypes.push_back(subTy);
blockedTypes.push_back(superTy); blockedTypes.push_back(superTy);
} }
else if (isBlocked(log, subTy))
blockedTypes.push_back(subTy);
else if (isBlocked(log, superTy))
blockedTypes.push_back(superTy);
else if (const UnionType* subUnion = log.getMutable<UnionType>(subTy)) else if (const UnionType* subUnion = log.getMutable<UnionType>(subTy))
{ {
tryUnifyUnionWithType(subTy, subUnion, superTy); tryUnifyUnionWithType(subTy, subUnion, superTy);
@ -890,7 +918,8 @@ void Unifier::tryUnifyTypeWithUnion(TypeId subTy, TypeId superTy, const UnionTyp
if (!subNorm || !superNorm) if (!subNorm || !superNorm)
return reportError(location, UnificationTooComplex{}); return reportError(location, UnificationTooComplex{});
else if ((failedOptionCount == 1 || foundHeuristic) && failedOption) else if ((failedOptionCount == 1 || foundHeuristic) && failedOption)
innerState.tryUnifyNormalizedTypes(subTy, superTy, *subNorm, *superNorm, "None of the union options are compatible. For example:", *failedOption); innerState.tryUnifyNormalizedTypes(
subTy, superTy, *subNorm, *superNorm, "None of the union options are compatible. For example:", *failedOption);
else else
innerState.tryUnifyNormalizedTypes(subTy, superTy, *subNorm, *superNorm, "none of the union options are compatible"); innerState.tryUnifyNormalizedTypes(subTy, superTy, *subNorm, *superNorm, "none of the union options are compatible");
if (!innerState.failure) if (!innerState.failure)

View file

@ -24,20 +24,24 @@ public:
// Moves // Moves
void mov(RegisterA64 dst, RegisterA64 src); void mov(RegisterA64 dst, RegisterA64 src);
void mov(RegisterA64 dst, uint16_t src, int shift = 0); void mov(RegisterA64 dst, int src); // macro
// Moves of 32-bit immediates get decomposed into one or more of these
void movz(RegisterA64 dst, uint16_t src, int shift = 0);
void movn(RegisterA64 dst, uint16_t src, int shift = 0);
void movk(RegisterA64 dst, uint16_t src, int shift = 0); void movk(RegisterA64 dst, uint16_t src, int shift = 0);
// Arithmetics // Arithmetics
void add(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0); void add(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void add(RegisterA64 dst, RegisterA64 src1, int src2); void add(RegisterA64 dst, RegisterA64 src1, uint16_t src2);
void sub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0); void sub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void sub(RegisterA64 dst, RegisterA64 src1, int src2); void sub(RegisterA64 dst, RegisterA64 src1, uint16_t src2);
void neg(RegisterA64 dst, RegisterA64 src); void neg(RegisterA64 dst, RegisterA64 src);
// Comparisons // Comparisons
// Note: some arithmetic instructions also have versions that update flags (ADDS etc) but we aren't using them atm // Note: some arithmetic instructions also have versions that update flags (ADDS etc) but we aren't using them atm
void cmp(RegisterA64 src1, RegisterA64 src2); void cmp(RegisterA64 src1, RegisterA64 src2);
void cmp(RegisterA64 src1, int src2); void cmp(RegisterA64 src1, uint16_t src2);
// Bitwise // Bitwise
// Note: shifted-register support and bitfield operations are omitted for simplicity // Note: shifted-register support and bitfield operations are omitted for simplicity
@ -63,11 +67,13 @@ public:
void ldrsb(RegisterA64 dst, AddressA64 src); void ldrsb(RegisterA64 dst, AddressA64 src);
void ldrsh(RegisterA64 dst, AddressA64 src); void ldrsh(RegisterA64 dst, AddressA64 src);
void ldrsw(RegisterA64 dst, AddressA64 src); void ldrsw(RegisterA64 dst, AddressA64 src);
void ldp(RegisterA64 dst1, RegisterA64 dst2, AddressA64 src);
// Store // Store
void str(RegisterA64 src, AddressA64 dst); void str(RegisterA64 src, AddressA64 dst);
void strb(RegisterA64 src, AddressA64 dst); void strb(RegisterA64 src, AddressA64 dst);
void strh(RegisterA64 src, AddressA64 dst); void strh(RegisterA64 src, AddressA64 dst);
void stp(RegisterA64 src1, RegisterA64 src2, AddressA64 dst);
// Control flow // Control flow
// Note: tbz/tbnz are currently not supported because they have 15-bit offsets and we don't support branch thunks // Note: tbz/tbnz are currently not supported because they have 15-bit offsets and we don't support branch thunks
@ -84,6 +90,9 @@ public:
void adr(RegisterA64 dst, uint64_t value); void adr(RegisterA64 dst, uint64_t value);
void adr(RegisterA64 dst, double value); void adr(RegisterA64 dst, double value);
// Address of code (label)
void adr(RegisterA64 dst, Label& label);
// Run final checks // Run final checks
bool finalize(); bool finalize();
@ -113,6 +122,9 @@ public:
const bool logText = false; const bool logText = false;
// Maximum immediate argument to functions like add/sub/cmp
static constexpr size_t kMaxImmediate = (1 << 12) - 1;
private: private:
// Instruction archetypes // Instruction archetypes
void place0(const char* name, uint32_t word); void place0(const char* name, uint32_t word);
@ -127,6 +139,8 @@ private:
void placeBCR(const char* name, Label& label, uint8_t op, RegisterA64 cond); void placeBCR(const char* name, Label& label, uint8_t op, RegisterA64 cond);
void placeBR(const char* name, RegisterA64 src, uint32_t op); void placeBR(const char* name, RegisterA64 src, uint32_t op);
void placeADR(const char* name, RegisterA64 src, uint8_t op); void placeADR(const char* name, RegisterA64 src, uint8_t op);
void placeADR(const char* name, RegisterA64 src, uint8_t op, Label& label);
void placeP(const char* name, RegisterA64 dst1, RegisterA64 dst2, AddressA64 src, uint8_t op, uint8_t size);
void place(uint32_t word); void place(uint32_t word);
@ -146,6 +160,7 @@ private:
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src); LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, int src, int shift = 0); LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, int src, int shift = 0);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, AddressA64 src); LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, AddressA64 src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst1, RegisterA64 dst2, AddressA64 src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 src, Label label); LUAU_NOINLINE void log(const char* opcode, RegisterA64 src, Label label);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 src); LUAU_NOINLINE void log(const char* opcode, RegisterA64 src);
LUAU_NOINLINE void log(const char* opcode, Label label); LUAU_NOINLINE void log(const char* opcode, Label label);

View file

@ -52,8 +52,8 @@ void computeCfgInfo(IrFunction& function);
struct BlockIteratorWrapper struct BlockIteratorWrapper
{ {
uint32_t* itBegin = nullptr; const uint32_t* itBegin = nullptr;
uint32_t* itEnd = nullptr; const uint32_t* itEnd = nullptr;
bool empty() const bool empty() const
{ {
@ -65,19 +65,19 @@ struct BlockIteratorWrapper
return size_t(itEnd - itBegin); return size_t(itEnd - itBegin);
} }
uint32_t* begin() const const uint32_t* begin() const
{ {
return itBegin; return itBegin;
} }
uint32_t* end() const const uint32_t* end() const
{ {
return itEnd; return itEnd;
} }
}; };
BlockIteratorWrapper predecessors(CfgInfo& cfg, uint32_t blockIdx); BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx);
BlockIteratorWrapper successors(CfgInfo& cfg, uint32_t blockIdx); BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx);
} // namespace CodeGen } // namespace CodeGen
} // namespace Luau } // namespace Luau

View file

@ -385,17 +385,15 @@ enum class IrCmd : uint8_t
LOP_SETLIST, LOP_SETLIST,
// Call specified function // Call specified function
// A: unsigned int (bytecode instruction index) // A: Rn (function, followed by arguments)
// B: Rn (function, followed by arguments) // B: int (argument count or -1 to use all arguments up to stack top)
// C: int (argument count or -1 to use all arguments up to stack top) // C: int (result count or -1 to preserve all results and adjust stack top)
// D: int (result count or -1 to preserve all results and adjust stack top) // Note: return values are placed starting from Rn specified in 'A'
// Note: return values are placed starting from Rn specified in 'B'
LOP_CALL, LOP_CALL,
// Return specified values from the function // Return specified values from the function
// A: unsigned int (bytecode instruction index) // A: Rn (value start)
// B: Rn (value start) // B: int (result count or -1 to return all values up to stack top)
// C: int (result count or -1 to return all values up to stack top)
LOP_RETURN, LOP_RETURN,
// Adjust loop variables for one iteration of a generic for loop, jump back to the loop header if loop needs to continue // Adjust loop variables for one iteration of a generic for loop, jump back to the loop header if loop needs to continue
@ -421,10 +419,9 @@ enum class IrCmd : uint8_t
LOP_FORGPREP_XNEXT_FALLBACK, LOP_FORGPREP_XNEXT_FALLBACK,
// Perform `and` or `or` operation (selecting lhs or rhs based on whether the lhs is truthy) and put the result into target register // Perform `and` or `or` operation (selecting lhs or rhs based on whether the lhs is truthy) and put the result into target register
// A: unsigned int (bytecode instruction index) // A: Rn (target)
// B: Rn (target) // B: Rn (lhs)
// C: Rn (lhs) // C: Rn or Kn (rhs)
// D: Rn or Kn (rhs)
LOP_AND, LOP_AND,
LOP_ANDK, LOP_ANDK,
LOP_OR, LOP_OR,
@ -790,12 +787,6 @@ struct IrFunction
return value.valueDouble; return value.valueDouble;
} }
IrCondition conditionOp(IrOp op)
{
LUAU_ASSERT(op.kind == IrOpKind::Condition);
return IrCondition(op.index);
}
uint32_t getBlockIndex(const IrBlock& block) uint32_t getBlockIndex(const IrBlock& block)
{ {
// Can only be called with blocks from our vector // Can only be called with blocks from our vector
@ -804,5 +795,29 @@ struct IrFunction
} }
}; };
inline IrCondition conditionOp(IrOp op)
{
LUAU_ASSERT(op.kind == IrOpKind::Condition);
return IrCondition(op.index);
}
inline int vmRegOp(IrOp op)
{
LUAU_ASSERT(op.kind == IrOpKind::VmReg);
return op.index;
}
inline int vmConstOp(IrOp op)
{
LUAU_ASSERT(op.kind == IrOpKind::VmConst);
return op.index;
}
inline int vmUpvalueOp(IrOp op)
{
LUAU_ASSERT(op.kind == IrOpKind::VmUpvalue);
return op.index;
}
} // namespace CodeGen } // namespace CodeGen
} // namespace Luau } // namespace Luau

View file

@ -19,9 +19,9 @@ const char* getBlockKindName(IrBlockKind kind);
struct IrToStringContext struct IrToStringContext
{ {
std::string& result; std::string& result;
std::vector<IrBlock>& blocks; const std::vector<IrBlock>& blocks;
std::vector<IrConst>& constants; const std::vector<IrConst>& constants;
CfgInfo& cfg; const CfgInfo& cfg;
}; };
void toString(IrToStringContext& ctx, const IrInst& inst, uint32_t index); void toString(IrToStringContext& ctx, const IrInst& inst, uint32_t index);
@ -33,13 +33,13 @@ void toString(std::string& result, IrConst constant);
void toStringDetailed(IrToStringContext& ctx, const IrInst& inst, uint32_t index, bool includeUseInfo); void toStringDetailed(IrToStringContext& ctx, const IrInst& inst, uint32_t index, bool includeUseInfo);
void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t index, bool includeUseInfo); // Block title void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t index, bool includeUseInfo); // Block title
std::string toString(IrFunction& function, bool includeUseInfo); std::string toString(const IrFunction& function, bool includeUseInfo);
std::string dump(IrFunction& function); std::string dump(const IrFunction& function);
std::string toDot(IrFunction& function, bool includeInst); std::string toDot(const IrFunction& function, bool includeInst);
std::string dumpDot(IrFunction& function, bool includeInst); std::string dumpDot(const IrFunction& function, bool includeInst);
} // namespace CodeGen } // namespace CodeGen
} // namespace Luau } // namespace Luau

View file

@ -45,9 +45,30 @@ void AssemblyBuilderA64::mov(RegisterA64 dst, RegisterA64 src)
placeSR2("mov", dst, src, 0b01'01010); placeSR2("mov", dst, src, 0b01'01010);
} }
void AssemblyBuilderA64::mov(RegisterA64 dst, uint16_t src, int shift) void AssemblyBuilderA64::mov(RegisterA64 dst, int src)
{ {
placeI16("mov", dst, src, 0b10'100101, shift); if (src >= 0)
{
movz(dst, src & 0xffff);
if (src > 0xffff)
movk(dst, src >> 16, 16);
}
else
{
movn(dst, ~src & 0xffff);
if (src < -0x10000)
movk(dst, (src >> 16) & 0xffff, 16);
}
}
void AssemblyBuilderA64::movz(RegisterA64 dst, uint16_t src, int shift)
{
placeI16("movz", dst, src, 0b10'100101, shift);
}
void AssemblyBuilderA64::movn(RegisterA64 dst, uint16_t src, int shift)
{
placeI16("movn", dst, src, 0b00'100101, shift);
} }
void AssemblyBuilderA64::movk(RegisterA64 dst, uint16_t src, int shift) void AssemblyBuilderA64::movk(RegisterA64 dst, uint16_t src, int shift)
@ -60,7 +81,7 @@ void AssemblyBuilderA64::add(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2
placeSR3("add", dst, src1, src2, 0b00'01011, shift); placeSR3("add", dst, src1, src2, 0b00'01011, shift);
} }
void AssemblyBuilderA64::add(RegisterA64 dst, RegisterA64 src1, int src2) void AssemblyBuilderA64::add(RegisterA64 dst, RegisterA64 src1, uint16_t src2)
{ {
placeI12("add", dst, src1, src2, 0b00'10001); placeI12("add", dst, src1, src2, 0b00'10001);
} }
@ -70,7 +91,7 @@ void AssemblyBuilderA64::sub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2
placeSR3("sub", dst, src1, src2, 0b10'01011, shift); placeSR3("sub", dst, src1, src2, 0b10'01011, shift);
} }
void AssemblyBuilderA64::sub(RegisterA64 dst, RegisterA64 src1, int src2) void AssemblyBuilderA64::sub(RegisterA64 dst, RegisterA64 src1, uint16_t src2)
{ {
placeI12("sub", dst, src1, src2, 0b10'10001); placeI12("sub", dst, src1, src2, 0b10'10001);
} }
@ -87,7 +108,7 @@ void AssemblyBuilderA64::cmp(RegisterA64 src1, RegisterA64 src2)
placeSR3("cmp", dst, src1, src2, 0b11'01011); placeSR3("cmp", dst, src1, src2, 0b11'01011);
} }
void AssemblyBuilderA64::cmp(RegisterA64 src1, int src2) void AssemblyBuilderA64::cmp(RegisterA64 src1, uint16_t src2)
{ {
RegisterA64 dst = src1.kind == KindA64::x ? xzr : wzr; RegisterA64 dst = src1.kind == KindA64::x ? xzr : wzr;
@ -186,6 +207,14 @@ void AssemblyBuilderA64::ldrsw(RegisterA64 dst, AddressA64 src)
placeA("ldrsw", dst, src, 0b11100010, 0b10); placeA("ldrsw", dst, src, 0b11100010, 0b10);
} }
void AssemblyBuilderA64::ldp(RegisterA64 dst1, RegisterA64 dst2, AddressA64 src)
{
LUAU_ASSERT(dst1.kind == KindA64::x || dst1.kind == KindA64::w);
LUAU_ASSERT(dst1.kind == dst2.kind);
placeP("ldp", dst1, dst2, src, 0b101'0'010'1, 0b10 | uint8_t(dst1.kind == KindA64::x));
}
void AssemblyBuilderA64::str(RegisterA64 src, AddressA64 dst) void AssemblyBuilderA64::str(RegisterA64 src, AddressA64 dst)
{ {
LUAU_ASSERT(src.kind == KindA64::x || src.kind == KindA64::w); LUAU_ASSERT(src.kind == KindA64::x || src.kind == KindA64::w);
@ -207,6 +236,14 @@ void AssemblyBuilderA64::strh(RegisterA64 src, AddressA64 dst)
placeA("strh", src, dst, 0b11100000, 0b01); placeA("strh", src, dst, 0b11100000, 0b01);
} }
void AssemblyBuilderA64::stp(RegisterA64 src1, RegisterA64 src2, AddressA64 dst)
{
LUAU_ASSERT(src1.kind == KindA64::x || src1.kind == KindA64::w);
LUAU_ASSERT(src1.kind == src2.kind);
placeP("stp", src1, src2, dst, 0b101'0'010'0, 0b10 | uint8_t(src1.kind == KindA64::x));
}
void AssemblyBuilderA64::b(Label& label) void AssemblyBuilderA64::b(Label& label)
{ {
// Note: we aren't using 'b' form since it has a 26-bit immediate which requires custom fixup logic // Note: we aren't using 'b' form since it has a 26-bit immediate which requires custom fixup logic
@ -276,6 +313,11 @@ void AssemblyBuilderA64::adr(RegisterA64 dst, double value)
patchImm19(location, -int(location) - int((data.size() - pos) / 4)); patchImm19(location, -int(location) - int((data.size() - pos) / 4));
} }
void AssemblyBuilderA64::adr(RegisterA64 dst, Label& label)
{
placeADR("adr", dst, 0b10000, label);
}
bool AssemblyBuilderA64::finalize() bool AssemblyBuilderA64::finalize()
{ {
code.resize(codePos - code.data()); code.resize(codePos - code.data());
@ -511,6 +553,32 @@ void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op)
commit(); commit();
} }
void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op, Label& label)
{
LUAU_ASSERT(dst.kind == KindA64::x);
place(dst.index | (op << 24));
commit();
patchLabel(label);
if (logText)
log(name, dst, label);
}
void AssemblyBuilderA64::placeP(const char* name, RegisterA64 src1, RegisterA64 src2, AddressA64 dst, uint8_t op, uint8_t size)
{
if (logText)
log(name, src1, src2, dst);
LUAU_ASSERT(dst.kind == AddressKindA64::imm);
LUAU_ASSERT(dst.data >= -128 * (1 << size) && dst.data <= 127 * (1 << size));
LUAU_ASSERT(dst.data % (1 << size) == 0);
place(src1.index | (dst.base.index << 5) | (src2.index << 10) | (((dst.data >> size) & 127) << 15) | (op << 22) | (size << 31));
commit();
}
void AssemblyBuilderA64::place(uint32_t word) void AssemblyBuilderA64::place(uint32_t word)
{ {
LUAU_ASSERT(codePos < codeEnd); LUAU_ASSERT(codePos < codeEnd);
@ -628,6 +696,17 @@ void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, AddressA64 src
text.append("\n"); text.append("\n");
} }
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst1, RegisterA64 dst2, AddressA64 src)
{
logAppend(" %-12s", opcode);
log(dst1);
text.append(",");
log(dst2);
text.append(",");
log(src);
text.append("\n");
}
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, RegisterA64 src) void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, RegisterA64 src)
{ {
logAppend(" %-12s", opcode); logAppend(" %-12s", opcode);

View file

@ -6,6 +6,8 @@
#include "Luau/CodeBlockUnwind.h" #include "Luau/CodeBlockUnwind.h"
#include "Luau/IrAnalysis.h" #include "Luau/IrAnalysis.h"
#include "Luau/IrBuilder.h" #include "Luau/IrBuilder.h"
#include "Luau/IrDump.h"
#include "Luau/IrUtils.h"
#include "Luau/OptimizeConstProp.h" #include "Luau/OptimizeConstProp.h"
#include "Luau/OptimizeFinalX64.h" #include "Luau/OptimizeFinalX64.h"
@ -13,19 +15,24 @@
#include "Luau/UnwindBuilderDwarf2.h" #include "Luau/UnwindBuilderDwarf2.h"
#include "Luau/UnwindBuilderWin.h" #include "Luau/UnwindBuilderWin.h"
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/AssemblyBuilderA64.h" #include "Luau/AssemblyBuilderA64.h"
#include "Luau/AssemblyBuilderX64.h"
#include "CustomExecUtils.h" #include "CustomExecUtils.h"
#include "CodeGenX64.h" #include "NativeState.h"
#include "CodeGenA64.h" #include "CodeGenA64.h"
#include "EmitCommonA64.h"
#include "IrLoweringA64.h"
#include "CodeGenX64.h"
#include "EmitCommonX64.h" #include "EmitCommonX64.h"
#include "EmitInstructionX64.h" #include "EmitInstructionX64.h"
#include "IrLoweringX64.h" #include "IrLoweringX64.h"
#include "NativeState.h"
#include "lapi.h" #include "lapi.h"
#include <algorithm>
#include <memory> #include <memory>
#if defined(__x86_64__) || defined(_M_X64) #if defined(__x86_64__) || defined(_M_X64)
@ -60,6 +67,148 @@ static NativeProto* createNativeProto(Proto* proto, const IrBuilder& ir)
return result; return result;
} }
template<typename AssemblyBuilder, typename IrLowering>
static void lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& function, int bytecodeid, AssemblyOptions options)
{
// While we will need a better block ordering in the future, right now we want to mostly preserve build order with fallbacks outlined
std::vector<uint32_t> sortedBlocks;
sortedBlocks.reserve(function.blocks.size());
for (uint32_t i = 0; i < function.blocks.size(); i++)
sortedBlocks.push_back(i);
std::sort(sortedBlocks.begin(), sortedBlocks.end(), [&](uint32_t idxA, uint32_t idxB) {
const IrBlock& a = function.blocks[idxA];
const IrBlock& b = function.blocks[idxB];
// Place fallback blocks at the end
if ((a.kind == IrBlockKind::Fallback) != (b.kind == IrBlockKind::Fallback))
return (a.kind == IrBlockKind::Fallback) < (b.kind == IrBlockKind::Fallback);
// Try to order by instruction order
return a.start < b.start;
});
DenseHashMap<uint32_t, uint32_t> bcLocations{~0u};
// Create keys for IR assembly locations that original bytecode instruction are interested in
for (const auto& [irLocation, asmLocation] : function.bcMapping)
{
if (irLocation != ~0u)
bcLocations[irLocation] = 0;
}
DenseHashMap<uint32_t, uint32_t> indexIrToBc{~0u};
bool outputEnabled = options.includeAssembly || options.includeIr;
if (outputEnabled && options.annotator)
{
// Create reverse mapping from IR location to bytecode location
for (size_t i = 0; i < function.bcMapping.size(); ++i)
{
uint32_t irLocation = function.bcMapping[i].irLocation;
if (irLocation != ~0u)
indexIrToBc[irLocation] = uint32_t(i);
}
}
IrToStringContext ctx{build.text, function.blocks, function.constants, function.cfg};
// We use this to skip outlined fallback blocks from IR/asm text output
size_t textSize = build.text.length();
uint32_t codeSize = build.getCodeSize();
bool seenFallback = false;
IrBlock dummy;
dummy.start = ~0u;
for (size_t i = 0; i < sortedBlocks.size(); ++i)
{
uint32_t blockIndex = sortedBlocks[i];
IrBlock& block = function.blocks[blockIndex];
if (block.kind == IrBlockKind::Dead)
continue;
LUAU_ASSERT(block.start != ~0u);
LUAU_ASSERT(block.finish != ~0u);
// If we want to skip fallback code IR/asm, we'll record when those blocks start once we see them
if (block.kind == IrBlockKind::Fallback && !seenFallback)
{
textSize = build.text.length();
codeSize = build.getCodeSize();
seenFallback = true;
}
if (options.includeIr)
{
build.logAppend("# ");
toStringDetailed(ctx, block, blockIndex, /* includeUseInfo */ true);
}
build.setLabel(block.label);
for (uint32_t index = block.start; index <= block.finish; index++)
{
LUAU_ASSERT(index < function.instructions.size());
// If IR instruction is the first one for the original bytecode, we can annotate it with source code text
if (outputEnabled && options.annotator)
{
if (uint32_t* bcIndex = indexIrToBc.find(index))
options.annotator(options.annotatorContext, build.text, bytecodeid, *bcIndex);
}
// If bytecode needs the location of this instruction for jumps, record it
if (uint32_t* bcLocation = bcLocations.find(index))
{
Label label = (index == block.start) ? block.label : build.setLabel();
*bcLocation = build.getLabelOffset(label);
}
IrInst& inst = function.instructions[index];
// Skip pseudo instructions, but make sure they are not used at this stage
// This also prevents them from getting into text output when that's enabled
if (isPseudo(inst.cmd))
{
LUAU_ASSERT(inst.useCount == 0);
continue;
}
if (options.includeIr)
{
build.logAppend("# ");
toStringDetailed(ctx, inst, index, /* includeUseInfo */ true);
}
IrBlock& next = i + 1 < sortedBlocks.size() ? function.blocks[sortedBlocks[i + 1]] : dummy;
lowering.lowerInst(inst, index, next);
}
if (options.includeIr)
build.logAppend("#\n");
}
if (outputEnabled && !options.includeOutlinedCode && seenFallback)
{
build.text.resize(textSize);
if (options.includeAssembly)
build.logAppend("; skipping %u bytes of outlined code\n", unsigned((build.getCodeSize() - codeSize) * sizeof(build.code[0])));
}
// Copy assembly locations of IR instructions that are mapped to bytecode instructions
for (auto& [irLocation, asmLocation] : function.bcMapping)
{
if (irLocation != ~0u)
asmLocation = bcLocations[irLocation];
}
}
[[maybe_unused]] static void lowerIr( [[maybe_unused]] static void lowerIr(
X64::AssemblyBuilderX64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options) X64::AssemblyBuilderX64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
{ {
@ -69,24 +218,34 @@ static NativeProto* createNativeProto(Proto* proto, const IrBuilder& ir)
build.align(kFunctionAlignment, X64::AlignmentDataX64::Ud2); build.align(kFunctionAlignment, X64::AlignmentDataX64::Ud2);
X64::IrLoweringX64 lowering(build, helpers, data, proto, ir.function); X64::IrLoweringX64 lowering(build, helpers, data, ir.function);
lowering.lower(options); lowerImpl(build, lowering, ir.function, proto->bytecodeid, options);
} }
[[maybe_unused]] static void lowerIr( [[maybe_unused]] static void lowerIr(
A64::AssemblyBuilderA64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options) A64::AssemblyBuilderA64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
{ {
Label start = build.setLabel(); if (A64::IrLoweringA64::canLower(ir.function))
{
build.mov(A64::x0, 1); // finish function in VM A64::IrLoweringA64 lowering(build, helpers, data, proto, ir.function);
build.ret();
lowerImpl(build, lowering, ir.function, proto->bytecodeid, options);
}
else
{
// TODO: This is only needed while we don't support all IR opcodes // TODO: This is only needed while we don't support all IR opcodes
// When we can't translate some parts of the function, we instead encode a dummy assembly sequence that hands off control to VM // When we can't translate some parts of the function, we instead encode a dummy assembly sequence that hands off control to VM
// In the future we could return nullptr from assembleFunction and handle it because there may be other reasons for why we refuse to assemble. // In the future we could return nullptr from assembleFunction and handle it because there may be other reasons for why we refuse to assemble.
Label start = build.setLabel();
build.mov(A64::x0, 1); // finish function in VM
build.ldr(A64::x1, A64::mem(A64::rNativeContext, offsetof(NativeContext, gateExit)));
build.br(A64::x1);
for (int i = 0; i < proto->sizecode; i++) for (int i = 0; i < proto->sizecode; i++)
ir.function.bcMapping[i].asmLocation = build.getLabelOffset(start); ir.function.bcMapping[i].asmLocation = build.getLabelOffset(start);
}
} }
template<typename AssemblyBuilder> template<typename AssemblyBuilder>
@ -123,15 +282,13 @@ static NativeProto* assembleFunction(AssemblyBuilder& build, NativeState& data,
IrBuilder ir; IrBuilder ir;
ir.buildFunctionIr(proto); ir.buildFunctionIr(proto);
computeCfgInfo(ir.function);
if (!FFlag::DebugCodegenNoOpt) if (!FFlag::DebugCodegenNoOpt)
{ {
constPropInBlockChains(ir); constPropInBlockChains(ir);
} }
// TODO: cfg info has to be computed earlier to use in optimizations
// It's done here to appear in text output and to measure performance impact on code generation
computeCfgInfo(ir.function);
lowerIr(build, ir, data, helpers, proto, options); lowerIr(build, ir, data, helpers, proto, options);
if (build.logText) if (build.logText)
@ -217,7 +374,8 @@ bool isSupported()
return true; return true;
#elif defined(__aarch64__) #elif defined(__aarch64__)
return true; // TODO: A64 codegen does not generate correct unwind info at the moment so it requires longjmp instead of C++ exceptions
return bool(LUA_USE_LONGJMP);
#else #else
return false; return false;
#endif #endif
@ -300,7 +458,9 @@ void compile(lua_State* L, int idx)
gatherFunctions(protos, clvalue(func)->l.p); gatherFunctions(protos, clvalue(func)->l.p);
ModuleHelpers helpers; ModuleHelpers helpers;
#if !defined(__aarch64__) #if defined(__aarch64__)
A64::assembleHelpers(build, helpers);
#else
X64::assembleHelpers(build, helpers); X64::assembleHelpers(build, helpers);
#endif #endif
@ -359,7 +519,9 @@ std::string getAssembly(lua_State* L, int idx, AssemblyOptions options)
gatherFunctions(protos, clvalue(func)->l.p); gatherFunctions(protos, clvalue(func)->l.p);
ModuleHelpers helpers; ModuleHelpers helpers;
#if !defined(__aarch64__) #if defined(__aarch64__)
A64::assembleHelpers(build, helpers);
#else
X64::assembleHelpers(build, helpers); X64::assembleHelpers(build, helpers);
#endif #endif
@ -373,8 +535,7 @@ std::string getAssembly(lua_State* L, int idx, AssemblyOptions options)
build.finalize(); build.finalize();
if (options.outputBinary) if (options.outputBinary)
return std::string( return std::string(reinterpret_cast<const char*>(build.code.data()), reinterpret_cast<const char*>(build.code.data() + build.code.size())) +
reinterpret_cast<const char*>(build.code.data()), reinterpret_cast<const char*>(build.code.data() + build.code.size())) +
std::string(build.data.begin(), build.data.end()); std::string(build.data.begin(), build.data.end());
else else
return build.text; return build.text;

View file

@ -6,6 +6,7 @@
#include "CustomExecUtils.h" #include "CustomExecUtils.h"
#include "NativeState.h" #include "NativeState.h"
#include "EmitCommonA64.h"
#include "lstate.h" #include "lstate.h"
@ -21,26 +22,50 @@ bool initEntryFunction(NativeState& data)
AssemblyBuilderA64 build(/* logText= */ false); AssemblyBuilderA64 build(/* logText= */ false);
UnwindBuilder& unwind = *data.unwindBuilder.get(); UnwindBuilder& unwind = *data.unwindBuilder.get();
unwind.start(); // Arguments: x0 = lua_State*, x1 = Proto*, x2 = native code pointer to jump to, x3 = NativeContext*
unwind.allocStack(8); // TODO: this is only necessary to align stack by 16 bytes, as start() allocates 8b return pointer
// TODO: prologue goes here unwind.start();
unwind.allocStack(8); // TODO: this is just a hack to make UnwindBuilder assertions cooperate
// prologue
build.sub(sp, sp, kStackSize);
build.stp(x29, x30, mem(sp)); // fp, lr
// stash non-volatile registers used for execution environment
build.stp(x19, x20, mem(sp, 16));
build.stp(x21, x22, mem(sp, 32));
build.stp(x23, x24, mem(sp, 48));
build.mov(x29, sp); // this is only necessary if we maintain frame pointers, which we do in the JIT for now
unwind.finish(); unwind.finish();
size_t prologueSize = build.setLabel().location; size_t prologueSize = build.setLabel().location;
// Setup native execution environment // Setup native execution environment
// TODO: figure out state layout build.mov(rState, x0);
build.mov(rNativeContext, x3);
// Jump to the specified instruction; further control flow will be handled with custom ABI with register setup from EmitCommonX64.h build.ldr(rBase, mem(x0, offsetof(lua_State, base))); // L->base
build.ldr(rConstants, mem(x1, offsetof(Proto, k))); // proto->k
build.ldr(rCode, mem(x1, offsetof(Proto, code))); // proto->code
build.ldr(x9, mem(x0, offsetof(lua_State, ci))); // L->ci
build.ldr(x9, mem(x9, offsetof(CallInfo, func))); // L->ci->func
build.ldr(rClosure, mem(x9, offsetof(TValue, value.gc))); // L->ci->func->value.gc aka cl
// Jump to the specified instruction; further control flow will be handled with custom ABI with register setup from EmitCommonA64.h
build.br(x2); build.br(x2);
// Even though we jumped away, we will return here in the end // Even though we jumped away, we will return here in the end
Label returnOff = build.setLabel(); Label returnOff = build.setLabel();
// Cleanup and exit // Cleanup and exit
// TODO: epilogue build.ldp(x23, x24, mem(sp, 48));
build.ldp(x21, x22, mem(sp, 32));
build.ldp(x19, x20, mem(sp, 16));
build.ldp(x29, x30, mem(sp)); // fp, lr
build.add(sp, sp, kStackSize);
build.ret(); build.ret();
@ -59,11 +84,24 @@ bool initEntryFunction(NativeState& data)
// specified by the unwind information of the entry function // specified by the unwind information of the entry function
unwind.setBeginOffset(prologueSize); unwind.setBeginOffset(prologueSize);
data.context.gateExit = data.context.gateEntry + returnOff.location; data.context.gateExit = data.context.gateEntry + build.getLabelOffset(returnOff);
return true; return true;
} }
void assembleHelpers(AssemblyBuilderA64& build, ModuleHelpers& helpers)
{
if (build.logText)
build.logAppend("; exitContinueVm\n");
helpers.exitContinueVm = build.setLabel();
emitExit(build, /* continueInVm */ true);
if (build.logText)
build.logAppend("; exitNoContinueVm\n");
helpers.exitNoContinueVm = build.setLabel();
emitExit(build, /* continueInVm */ false);
}
} // namespace A64 } // namespace A64
} // namespace CodeGen } // namespace CodeGen
} // namespace Luau } // namespace Luau

View file

@ -7,11 +7,15 @@ namespace CodeGen
{ {
struct NativeState; struct NativeState;
struct ModuleHelpers;
namespace A64 namespace A64
{ {
class AssemblyBuilderA64;
bool initEntryFunction(NativeState& data); bool initEntryFunction(NativeState& data);
void assembleHelpers(AssemblyBuilderA64& build, ModuleHelpers& helpers);
} // namespace A64 } // namespace A64
} // namespace CodeGen } // namespace CodeGen

View file

@ -126,5 +126,42 @@ void callEpilogC(lua_State* L, int nresults, int n)
L->top = (nresults == LUA_MULTRET) ? res : cip->top; L->top = (nresults == LUA_MULTRET) ? res : cip->top;
} }
const Instruction* returnFallback(lua_State* L, StkId ra, int n)
{
// ci is our callinfo, cip is our parent
CallInfo* ci = L->ci;
CallInfo* cip = ci - 1;
StkId res = ci->func; // note: we assume CALL always puts func+args and expects results to start at func
StkId vali = ra;
StkId valend = (n == LUA_MULTRET) ? L->top : ra + n; // copy as much as possible for MULTRET calls, and only as much as needed otherwise
int nresults = ci->nresults;
// copy return values into parent stack (but only up to nresults!), fill the rest with nil
// note: in MULTRET context nresults starts as -1 so i != 0 condition never activates intentionally
int i;
for (i = nresults; i != 0 && vali < valend; i--)
setobj2s(L, res++, vali++);
while (i-- > 0)
setnilvalue(res++);
// pop the stack frame
L->ci = cip;
L->base = cip->base;
L->top = (nresults == LUA_MULTRET) ? res : cip->top;
// we're done!
if (LUAU_UNLIKELY(ci->flags & LUA_CALLINFO_RETURN))
{
L->top = res;
return NULL;
}
LUAU_ASSERT(isLua(cip));
return cip->savedpc;
}
} // namespace CodeGen } // namespace CodeGen
} // namespace Luau } // namespace Luau

View file

@ -16,5 +16,7 @@ void forgPrepXnextFallback(lua_State* L, TValue* ra, int pc);
Closure* callProlog(lua_State* L, TValue* ra, StkId argtop, int nresults); Closure* callProlog(lua_State* L, TValue* ra, StkId argtop, int nresults);
void callEpilogC(lua_State* L, int nresults, int n); void callEpilogC(lua_State* L, int nresults, int n);
const Instruction* returnFallback(lua_State* L, StkId ra, int n);
} // namespace CodeGen } // namespace CodeGen
} // namespace Luau } // namespace Luau

View file

@ -0,0 +1,75 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "EmitCommonA64.h"
#include "NativeState.h"
#include "CustomExecUtils.h"
namespace Luau
{
namespace CodeGen
{
namespace A64
{
void emitExit(AssemblyBuilderA64& build, bool continueInVm)
{
build.mov(x0, continueInVm);
build.ldr(x1, mem(rNativeContext, offsetof(NativeContext, gateExit)));
build.br(x1);
}
void emitUpdateBase(AssemblyBuilderA64& build)
{
build.ldr(rBase, mem(rState, offsetof(lua_State, base)));
}
void emitSetSavedPc(AssemblyBuilderA64& build, int pcpos)
{
if (pcpos * sizeof(Instruction) <= AssemblyBuilderA64::kMaxImmediate)
{
build.add(x0, rCode, uint16_t(pcpos * sizeof(Instruction)));
}
else
{
build.mov(x0, pcpos * sizeof(Instruction));
build.add(x0, rCode, x0);
}
build.ldr(x1, mem(rState, offsetof(lua_State, ci)));
build.str(x0, mem(x1, offsetof(CallInfo, savedpc)));
}
void emitInterrupt(AssemblyBuilderA64& build, int pcpos)
{
Label skip;
build.ldr(x2, mem(rState, offsetof(lua_State, global)));
build.ldr(x2, mem(x2, offsetof(global_State, cb.interrupt)));
build.cbz(x2, skip);
emitSetSavedPc(build, pcpos + 1); // uses x0/x1
// Call interrupt
// TODO: This code should be outlined so that it can be shared by multiple interruptible instructions
build.mov(x0, rState);
build.mov(w1, -1);
build.blr(x2);
// Check if we need to exit
build.ldrb(w0, mem(rState, offsetof(lua_State, status)));
build.cbz(w0, skip);
// L->ci->savedpc--
build.ldr(x0, mem(rState, offsetof(lua_State, ci)));
build.ldr(x1, mem(x0, offsetof(CallInfo, savedpc)));
build.sub(x1, x1, sizeof(Instruction));
build.str(x1, mem(x0, offsetof(CallInfo, savedpc)));
emitExit(build, /* continueInVm */ false);
build.setLabel(skip);
}
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View file

@ -0,0 +1,77 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/AssemblyBuilderA64.h"
#include "EmitCommon.h"
#include "lobject.h"
#include "ltm.h"
// AArch64 ABI reminder:
// Arguments: x0-x7, v0-v7
// Return: x0, v0 (or x8 that points to the address of the resulting structure)
// Volatile: x9-x14, v16-v31 ("caller-saved", any call may change them)
// Non-volatile: x19-x28, v8-v15 ("callee-saved", preserved after calls, only bottom half of SIMD registers is preserved!)
// Reserved: x16-x18: reserved for linker/platform use; x29: frame pointer (unless omitted); x30: link register; x31: stack pointer
namespace Luau
{
namespace CodeGen
{
struct NativeState;
namespace A64
{
// Data that is very common to access is placed in non-volatile registers
constexpr RegisterA64 rState = x19; // lua_State* L
constexpr RegisterA64 rBase = x20; // StkId base
constexpr RegisterA64 rNativeContext = x21; // NativeContext* context
constexpr RegisterA64 rConstants = x22; // TValue* k
constexpr RegisterA64 rClosure = x23; // Closure* cl
constexpr RegisterA64 rCode = x24; // Instruction* code
// Native code is as stackless as the interpreter, so we can place some data on the stack once and have it accessible at any point
// See CodeGenA64.cpp for layout
constexpr unsigned kStackSize = 64; // 8 stashed registers
inline AddressA64 luauReg(int ri)
{
return mem(rBase, ri * sizeof(TValue));
}
inline AddressA64 luauRegValue(int ri)
{
return mem(rBase, ri * sizeof(TValue) + offsetof(TValue, value));
}
inline AddressA64 luauRegTag(int ri)
{
return mem(rBase, ri * sizeof(TValue) + offsetof(TValue, tt));
}
inline AddressA64 luauConstant(int ki)
{
return mem(rConstants, ki * sizeof(TValue));
}
inline AddressA64 luauConstantTag(int ki)
{
return mem(rConstants, ki * sizeof(TValue) + offsetof(TValue, tt));
}
inline AddressA64 luauConstantValue(int ki)
{
return mem(rConstants, ki * sizeof(TValue) + offsetof(TValue, value));
}
void emitExit(AssemblyBuilderA64& build, bool continueInVm);
void emitUpdateBase(AssemblyBuilderA64& build);
void emitSetSavedPc(AssemblyBuilderA64& build, int pcpos); // invalidates x0/x1
void emitInterrupt(AssemblyBuilderA64& build, int pcpos);
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View file

@ -0,0 +1,59 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "EmitInstructionA64.h"
#include "Luau/AssemblyBuilderA64.h"
#include "EmitCommonA64.h"
#include "NativeState.h"
#include "CustomExecUtils.h"
namespace Luau
{
namespace CodeGen
{
namespace A64
{
void emitInstReturn(AssemblyBuilderA64& build, ModuleHelpers& helpers, int ra, int n)
{
// callFallback(L, ra, n)
build.mov(x0, rState);
build.add(x1, rBase, uint16_t(ra * sizeof(TValue)));
build.mov(w2, n);
build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, returnFallback)));
build.blr(x3);
emitUpdateBase(build);
// If the fallback requested an exit, we need to do this right away
build.cbz(x0, helpers.exitNoContinueVm);
// Need to update state of the current function before we jump away
build.ldr(x1, mem(rState, offsetof(lua_State, ci))); // L->ci
build.ldr(x1, mem(x1, offsetof(CallInfo, func))); // L->ci->func
build.ldr(rClosure, mem(x1, offsetof(TValue, value.gc))); // L->ci->func->value.gc aka cl
build.ldr(x1, mem(rClosure, offsetof(Closure, l.p))); // cl->l.p aka proto
build.ldr(rConstants, mem(x1, offsetof(Proto, k))); // proto->k
build.ldr(rCode, mem(x1, offsetof(Proto, code))); // proto->code
// Get instruction index from instruction pointer
// To get instruction index from instruction pointer, we need to divide byte offset by 4
// But we will actually need to scale instruction index by 8 back to byte offset later so it cancels out
build.sub(x2, x0, rCode);
build.add(x2, x2, x2); // TODO: this would not be necessary if we supported shifted register offsets in loads
// We need to check if the new function can be executed natively
build.ldr(x1, mem(x1, offsetofProtoExecData));
build.cbz(x1, helpers.exitContinueVm);
// Get new instruction location and jump to it
build.ldr(x1, mem(x1, offsetof(NativeProto, instTargets)));
build.ldr(x1, mem(x1, x2));
build.br(x1);
}
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View file

@ -0,0 +1,20 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
namespace CodeGen
{
struct ModuleHelpers;
namespace A64
{
class AssemblyBuilderA64;
void emitInstReturn(AssemblyBuilderA64& build, ModuleHelpers& helpers, int ra, int n);
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View file

@ -4,12 +4,7 @@
#include "Luau/AssemblyBuilderX64.h" #include "Luau/AssemblyBuilderX64.h"
#include "CustomExecUtils.h" #include "CustomExecUtils.h"
#include "EmitBuiltinsX64.h"
#include "EmitCommonX64.h" #include "EmitCommonX64.h"
#include "NativeState.h"
#include "lobject.h"
#include "ltm.h"
namespace Luau namespace Luau
{ {
@ -18,16 +13,8 @@ namespace CodeGen
namespace X64 namespace X64
{ {
void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Instruction* pc, int pcpos) void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int nparams, int nresults)
{ {
int ra = LUAU_INSN_A(*pc);
int nparams = LUAU_INSN_B(*pc) - 1;
int nresults = LUAU_INSN_C(*pc) - 1;
emitInterrupt(build, pcpos);
emitSetSavedPc(build, pcpos + 1);
build.mov(rArg1, rState); build.mov(rArg1, rState);
build.lea(rArg2, luauRegAddress(ra)); build.lea(rArg2, luauRegAddress(ra));
@ -171,13 +158,8 @@ void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Instr
} }
} }
void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Instruction* pc, int pcpos) void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int actualResults)
{ {
emitInterrupt(build, pcpos);
int ra = LUAU_INSN_A(*pc);
int b = LUAU_INSN_B(*pc) - 1;
RegisterX64 ci = r8; RegisterX64 ci = r8;
RegisterX64 cip = r9; RegisterX64 cip = r9;
RegisterX64 res = rdi; RegisterX64 res = rdi;
@ -196,7 +178,7 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Ins
RegisterX64 counter = ecx; RegisterX64 counter = ecx;
if (b == 0) if (actualResults == 0)
{ {
// Our instruction doesn't have any results, so just fill results expected in parent with 'nil' // Our instruction doesn't have any results, so just fill results expected in parent with 'nil'
build.test(nresults, nresults); // test here will set SF=1 for a negative number, ZF=1 for zero and OF=0 build.test(nresults, nresults); // test here will set SF=1 for a negative number, ZF=1 for zero and OF=0
@ -210,7 +192,7 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Ins
build.dec(counter); build.dec(counter);
build.jcc(ConditionX64::NotZero, repeatNilLoop); build.jcc(ConditionX64::NotZero, repeatNilLoop);
} }
else if (b == 1) else if (actualResults == 1)
{ {
// Try setting our 1 result // Try setting our 1 result
build.test(nresults, nresults); build.test(nresults, nresults);
@ -245,10 +227,10 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Ins
build.lea(vali, luauRegAddress(ra)); build.lea(vali, luauRegAddress(ra));
// Copy as much as possible for MULTRET calls, and only as much as needed otherwise // Copy as much as possible for MULTRET calls, and only as much as needed otherwise
if (b == LUA_MULTRET) if (actualResults == LUA_MULTRET)
build.mov(valend, qword[rState + offsetof(lua_State, top)]); // valend = L->top build.mov(valend, qword[rState + offsetof(lua_State, top)]); // valend = L->top
else else
build.lea(valend, luauRegAddress(ra + b)); // valend = ra + b build.lea(valend, luauRegAddress(ra + actualResults)); // valend = ra + actualResults
build.mov(counter, nresults); build.mov(counter, nresults);
@ -333,24 +315,19 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Ins
build.jmp(qword[rdx + rax * 2]); build.jmp(qword[rdx + rax * 2]);
} }
void emitInstSetList(AssemblyBuilderX64& build, const Instruction* pc, Label& next) void emitInstSetList(AssemblyBuilderX64& build, Label& next, int ra, int rb, int count, uint32_t index)
{ {
int ra = LUAU_INSN_A(*pc); OperandX64 last = index + count - 1;
int rb = LUAU_INSN_B(*pc);
int c = LUAU_INSN_C(*pc) - 1;
uint32_t index = pc[1];
OperandX64 last = index + c - 1; // Using non-volatile 'rbx' for dynamic 'count' value (for LUA_MULTRET) to skip later recomputation
// We also keep 'count' scaled by sizeof(TValue) here as it helps in the loop below
// Using non-volatile 'rbx' for dynamic 'c' value (for LUA_MULTRET) to skip later recomputation
// We also keep 'c' scaled by sizeof(TValue) here as it helps in the loop below
RegisterX64 cscaled = rbx; RegisterX64 cscaled = rbx;
if (c == LUA_MULTRET) if (count == LUA_MULTRET)
{ {
RegisterX64 tmp = rax; RegisterX64 tmp = rax;
// c = L->top - rb // count = L->top - rb
build.mov(cscaled, qword[rState + offsetof(lua_State, top)]); build.mov(cscaled, qword[rState + offsetof(lua_State, top)]);
build.lea(tmp, luauRegAddress(rb)); build.lea(tmp, luauRegAddress(rb));
build.sub(cscaled, tmp); // Using byte difference build.sub(cscaled, tmp); // Using byte difference
@ -360,7 +337,7 @@ void emitInstSetList(AssemblyBuilderX64& build, const Instruction* pc, Label& ne
build.mov(tmp, qword[tmp + offsetof(CallInfo, top)]); build.mov(tmp, qword[tmp + offsetof(CallInfo, top)]);
build.mov(qword[rState + offsetof(lua_State, top)], tmp); build.mov(qword[rState + offsetof(lua_State, top)], tmp);
// last = index + c - 1; // last = index + count - 1;
last = edx; last = edx;
build.mov(last, dwordReg(cscaled)); build.mov(last, dwordReg(cscaled));
build.shr(last, kTValueSizeLog2); build.shr(last, kTValueSizeLog2);
@ -394,9 +371,9 @@ void emitInstSetList(AssemblyBuilderX64& build, const Instruction* pc, Label& ne
const int kUnrollSetListLimit = 4; const int kUnrollSetListLimit = 4;
if (c != LUA_MULTRET && c <= kUnrollSetListLimit) if (count != LUA_MULTRET && count <= kUnrollSetListLimit)
{ {
for (int i = 0; i < c; ++i) for (int i = 0; i < count; ++i)
{ {
// setobj2t(L, &array[index + i - 1], rb + i); // setobj2t(L, &array[index + i - 1], rb + i);
build.vmovups(xmm0, luauRegValue(rb + i)); build.vmovups(xmm0, luauRegValue(rb + i));
@ -405,17 +382,17 @@ void emitInstSetList(AssemblyBuilderX64& build, const Instruction* pc, Label& ne
} }
else else
{ {
LUAU_ASSERT(c != 0); LUAU_ASSERT(count != 0);
build.xor_(offset, offset); build.xor_(offset, offset);
if (index != 1) if (index != 1)
build.add(arrayDst, (index - 1) * sizeof(TValue)); build.add(arrayDst, (index - 1) * sizeof(TValue));
Label repeatLoop, endLoop; Label repeatLoop, endLoop;
OperandX64 limit = c == LUA_MULTRET ? cscaled : OperandX64(c * sizeof(TValue)); OperandX64 limit = count == LUA_MULTRET ? cscaled : OperandX64(count * sizeof(TValue));
// If c is static, we will always do at least one iteration // If c is static, we will always do at least one iteration
if (c == LUA_MULTRET) if (count == LUA_MULTRET)
{ {
build.cmp(offset, limit); build.cmp(offset, limit);
build.jcc(ConditionX64::NotBelow, endLoop); build.jcc(ConditionX64::NotBelow, endLoop);
@ -556,14 +533,14 @@ static void emitInstAndX(AssemblyBuilderX64& build, int ra, int rb, OperandX64 c
} }
} }
void emitInstAnd(AssemblyBuilderX64& build, const Instruction* pc) void emitInstAnd(AssemblyBuilderX64& build, int ra, int rb, int rc)
{ {
emitInstAndX(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauReg(LUAU_INSN_C(*pc))); emitInstAndX(build, ra, rb, luauReg(rc));
} }
void emitInstAndK(AssemblyBuilderX64& build, const Instruction* pc) void emitInstAndK(AssemblyBuilderX64& build, int ra, int rb, int kc)
{ {
emitInstAndX(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauConstant(LUAU_INSN_C(*pc))); emitInstAndX(build, ra, rb, luauConstant(kc));
} }
static void emitInstOrX(AssemblyBuilderX64& build, int ra, int rb, OperandX64 c) static void emitInstOrX(AssemblyBuilderX64& build, int ra, int rb, OperandX64 c)
@ -594,14 +571,14 @@ static void emitInstOrX(AssemblyBuilderX64& build, int ra, int rb, OperandX64 c)
} }
} }
void emitInstOr(AssemblyBuilderX64& build, const Instruction* pc) void emitInstOr(AssemblyBuilderX64& build, int ra, int rb, int rc)
{ {
emitInstOrX(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauReg(LUAU_INSN_C(*pc))); emitInstOrX(build, ra, rb, luauReg(rc));
} }
void emitInstOrK(AssemblyBuilderX64& build, const Instruction* pc) void emitInstOrK(AssemblyBuilderX64& build, int ra, int rb, int kc)
{ {
emitInstOrX(build, LUAU_INSN_A(*pc), LUAU_INSN_B(*pc), luauConstant(LUAU_INSN_C(*pc))); emitInstOrX(build, ra, rb, luauConstant(kc));
} }
void emitInstGetImportFallback(AssemblyBuilderX64& build, int ra, uint32_t aux) void emitInstGetImportFallback(AssemblyBuilderX64& build, int ra, uint32_t aux)

View file

@ -3,11 +3,6 @@
#include <stdint.h> #include <stdint.h>
#include "ltm.h"
typedef uint32_t Instruction;
typedef struct lua_TValue TValue;
namespace Luau namespace Luau
{ {
namespace CodeGen namespace CodeGen
@ -21,16 +16,16 @@ namespace X64
class AssemblyBuilderX64; class AssemblyBuilderX64;
void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Instruction* pc, int pcpos); void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int nparams, int nresults);
void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, const Instruction* pc, int pcpos); void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int actualResults);
void emitInstSetList(AssemblyBuilderX64& build, const Instruction* pc, Label& next); void emitInstSetList(AssemblyBuilderX64& build, Label& next, int ra, int rb, int count, uint32_t index);
void emitinstForGLoop(AssemblyBuilderX64& build, int ra, int aux, Label& loopRepeat, Label& loopExit); void emitinstForGLoop(AssemblyBuilderX64& build, int ra, int aux, Label& loopRepeat, Label& loopExit);
void emitinstForGLoopFallback(AssemblyBuilderX64& build, int pcpos, int ra, int aux, Label& loopRepeat); void emitinstForGLoopFallback(AssemblyBuilderX64& build, int pcpos, int ra, int aux, Label& loopRepeat);
void emitInstForGPrepXnextFallback(AssemblyBuilderX64& build, int pcpos, int ra, Label& target); void emitInstForGPrepXnextFallback(AssemblyBuilderX64& build, int pcpos, int ra, Label& target);
void emitInstAnd(AssemblyBuilderX64& build, const Instruction* pc); void emitInstAnd(AssemblyBuilderX64& build, int ra, int rb, int rc);
void emitInstAndK(AssemblyBuilderX64& build, const Instruction* pc); void emitInstAndK(AssemblyBuilderX64& build, int ra, int rb, int kc);
void emitInstOr(AssemblyBuilderX64& build, const Instruction* pc); void emitInstOr(AssemblyBuilderX64& build, int ra, int rb, int rc);
void emitInstOrK(AssemblyBuilderX64& build, const Instruction* pc); void emitInstOrK(AssemblyBuilderX64& build, int ra, int rb, int kc);
void emitInstGetImportFallback(AssemblyBuilderX64& build, int ra, uint32_t aux); void emitInstGetImportFallback(AssemblyBuilderX64& build, int ra, uint32_t aux);
void emitInstCoverage(AssemblyBuilderX64& build, int pcpos); void emitInstCoverage(AssemblyBuilderX64& build, int pcpos);

View file

@ -244,12 +244,16 @@ static RegisterSet computeBlockLiveInRegSet(IrFunction& function, const IrBlock&
// A <- B, C // A <- B, C
case IrCmd::DO_ARITH: case IrCmd::DO_ARITH:
case IrCmd::GET_TABLE: case IrCmd::GET_TABLE:
case IrCmd::SET_TABLE:
use(inst.b); use(inst.b);
maybeUse(inst.c); // Argument can also be a VmConst maybeUse(inst.c); // Argument can also be a VmConst
def(inst.a); def(inst.a);
break; break;
case IrCmd::SET_TABLE:
use(inst.a);
use(inst.b);
maybeUse(inst.c); // Argument can also be a VmConst
break;
// A <- B // A <- B
case IrCmd::DO_LEN: case IrCmd::DO_LEN:
use(inst.b); use(inst.b);
@ -301,13 +305,13 @@ static RegisterSet computeBlockLiveInRegSet(IrFunction& function, const IrBlock&
useRange(inst.c.index, function.intOp(inst.d)); useRange(inst.c.index, function.intOp(inst.d));
break; break;
case IrCmd::LOP_CALL: case IrCmd::LOP_CALL:
use(inst.b); use(inst.a);
useRange(inst.b.index + 1, function.intOp(inst.c)); useRange(inst.a.index + 1, function.intOp(inst.b));
defRange(inst.b.index, function.intOp(inst.d)); defRange(inst.a.index, function.intOp(inst.c));
break; break;
case IrCmd::LOP_RETURN: case IrCmd::LOP_RETURN:
useRange(inst.b.index, function.intOp(inst.c)); useRange(inst.a.index, function.intOp(inst.b));
break; break;
case IrCmd::FASTCALL: case IrCmd::FASTCALL:
case IrCmd::INVOKE_FASTCALL: case IrCmd::INVOKE_FASTCALL:
@ -333,7 +337,9 @@ static RegisterSet computeBlockLiveInRegSet(IrFunction& function, const IrBlock&
useVarargs(inst.c.index); useVarargs(inst.c.index);
} }
defRange(inst.b.index, function.intOp(inst.f)); // Multiple return sequences (count == -1) are defined by ADJUST_STACK_TO_REG
if (int count = function.intOp(inst.f); count != -1)
defRange(inst.b.index, count);
break; break;
case IrCmd::LOP_FORGLOOP: case IrCmd::LOP_FORGLOOP:
// First register is not used by instruction, we check that it's still 'nil' with CHECK_TAG // First register is not used by instruction, we check that it's still 'nil' with CHECK_TAG
@ -352,20 +358,20 @@ static RegisterSet computeBlockLiveInRegSet(IrFunction& function, const IrBlock&
case IrCmd::LOP_FORGPREP_XNEXT_FALLBACK: case IrCmd::LOP_FORGPREP_XNEXT_FALLBACK:
use(inst.b); use(inst.b);
break; break;
// B <- C, D // A <- B, C
case IrCmd::LOP_AND: case IrCmd::LOP_AND:
case IrCmd::LOP_OR: case IrCmd::LOP_OR:
use(inst.b);
use(inst.c); use(inst.c);
use(inst.d);
def(inst.b); def(inst.a);
break; break;
// B <- C // A <- B
case IrCmd::LOP_ANDK: case IrCmd::LOP_ANDK:
case IrCmd::LOP_ORK: case IrCmd::LOP_ORK:
use(inst.c); use(inst.b);
def(inst.b); def(inst.a);
break; break;
case IrCmd::FALLBACK_GETGLOBAL: case IrCmd::FALLBACK_GETGLOBAL:
def(inst.b); def(inst.b);
@ -405,8 +411,10 @@ static RegisterSet computeBlockLiveInRegSet(IrFunction& function, const IrBlock&
defRange(inst.b.index, 3); defRange(inst.b.index, 3);
break; break;
case IrCmd::ADJUST_STACK_TO_REG: case IrCmd::ADJUST_STACK_TO_REG:
defRange(inst.a.index, -1);
break;
case IrCmd::ADJUST_STACK_TO_TOP: case IrCmd::ADJUST_STACK_TO_TOP:
// While these can be considered as vararg producers and consumers, it is already handled in fastcall instruction // While this can be considered to be a vararg consumer, it is already handled in fastcall instructions
break; break;
default: default:
@ -626,7 +634,7 @@ void computeCfgInfo(IrFunction& function)
computeCfgLiveInOutRegSets(function); computeCfgLiveInOutRegSets(function);
} }
BlockIteratorWrapper predecessors(CfgInfo& cfg, uint32_t blockIdx) BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx)
{ {
LUAU_ASSERT(blockIdx < cfg.predecessorsOffsets.size()); LUAU_ASSERT(blockIdx < cfg.predecessorsOffsets.size());
@ -636,7 +644,7 @@ BlockIteratorWrapper predecessors(CfgInfo& cfg, uint32_t blockIdx)
return BlockIteratorWrapper{cfg.predecessors.data() + start, cfg.predecessors.data() + end}; return BlockIteratorWrapper{cfg.predecessors.data() + start, cfg.predecessors.data() + end};
} }
BlockIteratorWrapper successors(CfgInfo& cfg, uint32_t blockIdx) BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx)
{ {
LUAU_ASSERT(blockIdx < cfg.successorsOffsets.size()); LUAU_ASSERT(blockIdx < cfg.successorsOffsets.size());

View file

@ -132,7 +132,10 @@ void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
translateInstSetGlobal(*this, pc, i); translateInstSetGlobal(*this, pc, i);
break; break;
case LOP_CALL: case LOP_CALL:
inst(IrCmd::LOP_CALL, constUint(i), vmReg(LUAU_INSN_A(*pc)), constInt(LUAU_INSN_B(*pc) - 1), constInt(LUAU_INSN_C(*pc) - 1)); inst(IrCmd::INTERRUPT, constUint(i));
inst(IrCmd::SET_SAVEDPC, constUint(i + 1));
inst(IrCmd::LOP_CALL, vmReg(LUAU_INSN_A(*pc)), constInt(LUAU_INSN_B(*pc) - 1), constInt(LUAU_INSN_C(*pc) - 1));
if (activeFastcallFallback) if (activeFastcallFallback)
{ {
@ -144,7 +147,9 @@ void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
} }
break; break;
case LOP_RETURN: case LOP_RETURN:
inst(IrCmd::LOP_RETURN, constUint(i), vmReg(LUAU_INSN_A(*pc)), constInt(LUAU_INSN_B(*pc) - 1)); inst(IrCmd::INTERRUPT, constUint(i));
inst(IrCmd::LOP_RETURN, vmReg(LUAU_INSN_A(*pc)), constInt(LUAU_INSN_B(*pc) - 1));
break; break;
case LOP_GETTABLE: case LOP_GETTABLE:
translateInstGetTable(*this, pc, i); translateInstGetTable(*this, pc, i);
@ -358,16 +363,16 @@ void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
translateInstForGPrepInext(*this, pc, i); translateInstForGPrepInext(*this, pc, i);
break; break;
case LOP_AND: case LOP_AND:
inst(IrCmd::LOP_AND, constUint(i), vmReg(LUAU_INSN_A(*pc)), vmReg(LUAU_INSN_B(*pc)), vmReg(LUAU_INSN_C(*pc))); inst(IrCmd::LOP_AND, vmReg(LUAU_INSN_A(*pc)), vmReg(LUAU_INSN_B(*pc)), vmReg(LUAU_INSN_C(*pc)));
break; break;
case LOP_ANDK: case LOP_ANDK:
inst(IrCmd::LOP_ANDK, constUint(i), vmReg(LUAU_INSN_A(*pc)), vmReg(LUAU_INSN_B(*pc)), vmConst(LUAU_INSN_C(*pc))); inst(IrCmd::LOP_ANDK, vmReg(LUAU_INSN_A(*pc)), vmReg(LUAU_INSN_B(*pc)), vmConst(LUAU_INSN_C(*pc)));
break; break;
case LOP_OR: case LOP_OR:
inst(IrCmd::LOP_OR, constUint(i), vmReg(LUAU_INSN_A(*pc)), vmReg(LUAU_INSN_B(*pc)), vmReg(LUAU_INSN_C(*pc))); inst(IrCmd::LOP_OR, vmReg(LUAU_INSN_A(*pc)), vmReg(LUAU_INSN_B(*pc)), vmReg(LUAU_INSN_C(*pc)));
break; break;
case LOP_ORK: case LOP_ORK:
inst(IrCmd::LOP_ORK, constUint(i), vmReg(LUAU_INSN_A(*pc)), vmReg(LUAU_INSN_B(*pc)), vmConst(LUAU_INSN_C(*pc))); inst(IrCmd::LOP_ORK, vmReg(LUAU_INSN_A(*pc)), vmReg(LUAU_INSN_B(*pc)), vmConst(LUAU_INSN_C(*pc)));
break; break;
case LOP_COVERAGE: case LOP_COVERAGE:
inst(IrCmd::LOP_COVERAGE, constUint(i)); inst(IrCmd::LOP_COVERAGE, constUint(i));

View file

@ -455,7 +455,7 @@ void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t ind
} }
// Predecessor list // Predecessor list
if (!ctx.cfg.predecessors.empty()) if (index < ctx.cfg.predecessorsOffsets.size())
{ {
BlockIteratorWrapper pred = predecessors(ctx.cfg, index); BlockIteratorWrapper pred = predecessors(ctx.cfg, index);
@ -469,7 +469,7 @@ void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t ind
} }
// Successor list // Successor list
if (!ctx.cfg.successors.empty()) if (index < ctx.cfg.successorsOffsets.size())
{ {
BlockIteratorWrapper succ = successors(ctx.cfg, index); BlockIteratorWrapper succ = successors(ctx.cfg, index);
@ -509,14 +509,14 @@ void toStringDetailed(IrToStringContext& ctx, const IrBlock& block, uint32_t ind
} }
} }
std::string toString(IrFunction& function, bool includeUseInfo) std::string toString(const IrFunction& function, bool includeUseInfo)
{ {
std::string result; std::string result;
IrToStringContext ctx{result, function.blocks, function.constants, function.cfg}; IrToStringContext ctx{result, function.blocks, function.constants, function.cfg};
for (size_t i = 0; i < function.blocks.size(); i++) for (size_t i = 0; i < function.blocks.size(); i++)
{ {
IrBlock& block = function.blocks[i]; const IrBlock& block = function.blocks[i];
if (block.kind == IrBlockKind::Dead) if (block.kind == IrBlockKind::Dead)
continue; continue;
@ -532,7 +532,7 @@ std::string toString(IrFunction& function, bool includeUseInfo)
// To allow dumping blocks that are still being constructed, we can't rely on terminator and need a bounds check // To allow dumping blocks that are still being constructed, we can't rely on terminator and need a bounds check
for (uint32_t index = block.start; index <= block.finish && index < uint32_t(function.instructions.size()); index++) for (uint32_t index = block.start; index <= block.finish && index < uint32_t(function.instructions.size()); index++)
{ {
IrInst& inst = function.instructions[index]; const IrInst& inst = function.instructions[index];
// Skip pseudo instructions unless they are still referenced // Skip pseudo instructions unless they are still referenced
if (isPseudo(inst.cmd) && inst.useCount == 0) if (isPseudo(inst.cmd) && inst.useCount == 0)
@ -548,7 +548,7 @@ std::string toString(IrFunction& function, bool includeUseInfo)
return result; return result;
} }
std::string dump(IrFunction& function) std::string dump(const IrFunction& function)
{ {
std::string result = toString(function, /* includeUseInfo */ true); std::string result = toString(function, /* includeUseInfo */ true);
@ -557,12 +557,12 @@ std::string dump(IrFunction& function)
return result; return result;
} }
std::string toDot(IrFunction& function, bool includeInst) std::string toDot(const IrFunction& function, bool includeInst)
{ {
std::string result; std::string result;
IrToStringContext ctx{result, function.blocks, function.constants, function.cfg}; IrToStringContext ctx{result, function.blocks, function.constants, function.cfg};
auto appendLabelRegset = [&ctx](std::vector<RegisterSet>& regSets, size_t blockIdx, const char* name) { auto appendLabelRegset = [&ctx](const std::vector<RegisterSet>& regSets, size_t blockIdx, const char* name) {
if (blockIdx < regSets.size()) if (blockIdx < regSets.size())
{ {
const RegisterSet& rs = regSets[blockIdx]; const RegisterSet& rs = regSets[blockIdx];
@ -581,7 +581,7 @@ std::string toDot(IrFunction& function, bool includeInst)
for (size_t i = 0; i < function.blocks.size(); i++) for (size_t i = 0; i < function.blocks.size(); i++)
{ {
IrBlock& block = function.blocks[i]; const IrBlock& block = function.blocks[i];
append(ctx.result, "b%u [", unsigned(i)); append(ctx.result, "b%u [", unsigned(i));
@ -599,7 +599,7 @@ std::string toDot(IrFunction& function, bool includeInst)
{ {
for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++) for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++)
{ {
IrInst& inst = function.instructions[instIdx]; const IrInst& inst = function.instructions[instIdx];
// Skip pseudo instructions unless they are still referenced // Skip pseudo instructions unless they are still referenced
if (isPseudo(inst.cmd) && inst.useCount == 0) if (isPseudo(inst.cmd) && inst.useCount == 0)
@ -618,14 +618,14 @@ std::string toDot(IrFunction& function, bool includeInst)
for (size_t i = 0; i < function.blocks.size(); i++) for (size_t i = 0; i < function.blocks.size(); i++)
{ {
IrBlock& block = function.blocks[i]; const IrBlock& block = function.blocks[i];
if (block.start == ~0u) if (block.start == ~0u)
continue; continue;
for (uint32_t instIdx = block.start; instIdx != ~0u && instIdx <= block.finish; instIdx++) for (uint32_t instIdx = block.start; instIdx != ~0u && instIdx <= block.finish; instIdx++)
{ {
IrInst& inst = function.instructions[instIdx]; const IrInst& inst = function.instructions[instIdx];
auto checkOp = [&](IrOp op) { auto checkOp = [&](IrOp op) {
if (op.kind == IrOpKind::Block) if (op.kind == IrOpKind::Block)
@ -651,7 +651,7 @@ std::string toDot(IrFunction& function, bool includeInst)
return result; return result;
} }
std::string dumpDot(IrFunction& function, bool includeInst) std::string dumpDot(const IrFunction& function, bool includeInst)
{ {
std::string result = toDot(function, includeInst); std::string result = toDot(function, includeInst);

View file

@ -0,0 +1,137 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "IrLoweringA64.h"
#include "Luau/CodeGen.h"
#include "Luau/DenseHash.h"
#include "Luau/IrAnalysis.h"
#include "Luau/IrDump.h"
#include "Luau/IrUtils.h"
#include "EmitCommonA64.h"
#include "EmitInstructionA64.h"
#include "NativeState.h"
#include "lstate.h"
namespace Luau
{
namespace CodeGen
{
namespace A64
{
IrLoweringA64::IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, NativeState& data, Proto* proto, IrFunction& function)
: build(build)
, helpers(helpers)
, data(data)
, proto(proto)
, function(function)
{
// In order to allocate registers during lowering, we need to know where instruction results are last used
updateLastUseLocations(function);
}
// TODO: Eventually this can go away
bool IrLoweringA64::canLower(const IrFunction& function)
{
for (const IrInst& inst : function.instructions)
{
switch (inst.cmd)
{
case IrCmd::NOP:
case IrCmd::SUBSTITUTE:
case IrCmd::INTERRUPT:
case IrCmd::LOP_RETURN:
continue;
default:
return false;
}
}
return true;
}
void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
{
switch (inst.cmd)
{
case IrCmd::INTERRUPT:
{
emitInterrupt(build, uintOp(inst.a));
break;
}
case IrCmd::LOP_RETURN:
{
emitInstReturn(build, helpers, vmRegOp(inst.a), intOp(inst.b));
break;
}
default:
LUAU_ASSERT(!"Not supported yet");
break;
}
// TODO
// regs.freeLastUseRegs(inst, index);
}
bool IrLoweringA64::isFallthroughBlock(IrBlock target, IrBlock next)
{
return target.start == next.start;
}
void IrLoweringA64::jumpOrFallthrough(IrBlock& target, IrBlock& next)
{
if (!isFallthroughBlock(target, next))
build.b(target.label);
}
RegisterA64 IrLoweringA64::regOp(IrOp op) const
{
IrInst& inst = function.instOp(op);
LUAU_ASSERT(inst.regA64 != noreg);
return inst.regA64;
}
IrConst IrLoweringA64::constOp(IrOp op) const
{
return function.constOp(op);
}
uint8_t IrLoweringA64::tagOp(IrOp op) const
{
return function.tagOp(op);
}
bool IrLoweringA64::boolOp(IrOp op) const
{
return function.boolOp(op);
}
int IrLoweringA64::intOp(IrOp op) const
{
return function.intOp(op);
}
unsigned IrLoweringA64::uintOp(IrOp op) const
{
return function.uintOp(op);
}
double IrLoweringA64::doubleOp(IrOp op) const
{
return function.doubleOp(op);
}
IrBlock& IrLoweringA64::blockOp(IrOp op) const
{
return function.blockOp(op);
}
Label& IrLoweringA64::labelOp(IrOp op) const
{
return blockOp(op).label;
}
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View file

@ -0,0 +1,60 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/AssemblyBuilderA64.h"
#include "Luau/IrData.h"
#include <vector>
struct Proto;
namespace Luau
{
namespace CodeGen
{
struct ModuleHelpers;
struct NativeState;
struct AssemblyOptions;
namespace A64
{
struct IrLoweringA64
{
IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, NativeState& data, Proto* proto, IrFunction& function);
static bool canLower(const IrFunction& function);
void lowerInst(IrInst& inst, uint32_t index, IrBlock& next);
bool isFallthroughBlock(IrBlock target, IrBlock next);
void jumpOrFallthrough(IrBlock& target, IrBlock& next);
// Operand data lookup helpers
RegisterA64 regOp(IrOp op) const;
IrConst constOp(IrOp op) const;
uint8_t tagOp(IrOp op) const;
bool boolOp(IrOp op) const;
int intOp(IrOp op) const;
unsigned uintOp(IrOp op) const;
double doubleOp(IrOp op) const;
IrBlock& blockOp(IrOp op) const;
Label& labelOp(IrOp op) const;
AssemblyBuilderA64& build;
ModuleHelpers& helpers;
NativeState& data;
Proto* proto = nullptr; // Temporarily required to provide 'Instruction* pc' to old emitInst* methods
IrFunction& function;
// TODO:
// IrRegAllocA64 regs;
};
} // namespace A64
} // namespace CodeGen
} // namespace Luau

View file

@ -14,8 +14,6 @@
#include "lstate.h" #include "lstate.h"
#include <algorithm>
namespace Luau namespace Luau
{ {
namespace CodeGen namespace CodeGen
@ -23,11 +21,10 @@ namespace CodeGen
namespace X64 namespace X64
{ {
IrLoweringX64::IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, NativeState& data, Proto* proto, IrFunction& function) IrLoweringX64::IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function)
: build(build) : build(build)
, helpers(helpers) , helpers(helpers)
, data(data) , data(data)
, proto(proto)
, function(function) , function(function)
, regs(function) , regs(function)
{ {
@ -35,146 +32,6 @@ IrLoweringX64::IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers,
updateLastUseLocations(function); updateLastUseLocations(function);
} }
void IrLoweringX64::lower(AssemblyOptions options)
{
// While we will need a better block ordering in the future, right now we want to mostly preserve build order with fallbacks outlined
std::vector<uint32_t> sortedBlocks;
sortedBlocks.reserve(function.blocks.size());
for (uint32_t i = 0; i < function.blocks.size(); i++)
sortedBlocks.push_back(i);
std::sort(sortedBlocks.begin(), sortedBlocks.end(), [&](uint32_t idxA, uint32_t idxB) {
const IrBlock& a = function.blocks[idxA];
const IrBlock& b = function.blocks[idxB];
// Place fallback blocks at the end
if ((a.kind == IrBlockKind::Fallback) != (b.kind == IrBlockKind::Fallback))
return (a.kind == IrBlockKind::Fallback) < (b.kind == IrBlockKind::Fallback);
// Try to order by instruction order
return a.start < b.start;
});
DenseHashMap<uint32_t, uint32_t> bcLocations{~0u};
// Create keys for IR assembly locations that original bytecode instruction are interested in
for (const auto& [irLocation, asmLocation] : function.bcMapping)
{
if (irLocation != ~0u)
bcLocations[irLocation] = 0;
}
DenseHashMap<uint32_t, uint32_t> indexIrToBc{~0u};
bool outputEnabled = options.includeAssembly || options.includeIr;
if (outputEnabled && options.annotator)
{
// Create reverse mapping from IR location to bytecode location
for (size_t i = 0; i < function.bcMapping.size(); ++i)
{
uint32_t irLocation = function.bcMapping[i].irLocation;
if (irLocation != ~0u)
indexIrToBc[irLocation] = uint32_t(i);
}
}
IrToStringContext ctx{build.text, function.blocks, function.constants, function.cfg};
// We use this to skip outlined fallback blocks from IR/asm text output
size_t textSize = build.text.length();
uint32_t codeSize = build.getCodeSize();
bool seenFallback = false;
IrBlock dummy;
dummy.start = ~0u;
for (size_t i = 0; i < sortedBlocks.size(); ++i)
{
uint32_t blockIndex = sortedBlocks[i];
IrBlock& block = function.blocks[blockIndex];
if (block.kind == IrBlockKind::Dead)
continue;
LUAU_ASSERT(block.start != ~0u);
LUAU_ASSERT(block.finish != ~0u);
// If we want to skip fallback code IR/asm, we'll record when those blocks start once we see them
if (block.kind == IrBlockKind::Fallback && !seenFallback)
{
textSize = build.text.length();
codeSize = build.getCodeSize();
seenFallback = true;
}
if (options.includeIr)
{
build.logAppend("# ");
toStringDetailed(ctx, block, blockIndex, /* includeUseInfo */ true);
}
build.setLabel(block.label);
for (uint32_t index = block.start; index <= block.finish; index++)
{
LUAU_ASSERT(index < function.instructions.size());
// If IR instruction is the first one for the original bytecode, we can annotate it with source code text
if (outputEnabled && options.annotator)
{
if (uint32_t* bcIndex = indexIrToBc.find(index))
options.annotator(options.annotatorContext, build.text, proto->bytecodeid, *bcIndex);
}
// If bytecode needs the location of this instruction for jumps, record it
if (uint32_t* bcLocation = bcLocations.find(index))
*bcLocation = build.getCodeSize();
IrInst& inst = function.instructions[index];
// Skip pseudo instructions, but make sure they are not used at this stage
// This also prevents them from getting into text output when that's enabled
if (isPseudo(inst.cmd))
{
LUAU_ASSERT(inst.useCount == 0);
continue;
}
if (options.includeIr)
{
build.logAppend("# ");
toStringDetailed(ctx, inst, index, /* includeUseInfo */ true);
}
IrBlock& next = i + 1 < sortedBlocks.size() ? function.blocks[sortedBlocks[i + 1]] : dummy;
lowerInst(inst, index, next);
regs.freeLastUseRegs(inst, index);
}
if (options.includeIr)
build.logAppend("#\n");
}
if (outputEnabled && !options.includeOutlinedCode && seenFallback)
{
build.text.resize(textSize);
if (options.includeAssembly)
build.logAppend("; skipping %u bytes of outlined code\n", build.getCodeSize() - codeSize);
}
// Copy assembly locations of IR instructions that are mapped to bytecode instructions
for (auto& [irLocation, asmLocation] : function.bcMapping)
{
if (irLocation != ~0u)
asmLocation = bcLocations[irLocation];
}
}
void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next) void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
{ {
switch (inst.cmd) switch (inst.cmd)
@ -183,9 +40,9 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
inst.regX64 = regs.allocGprReg(SizeX64::dword); inst.regX64 = regs.allocGprReg(SizeX64::dword);
if (inst.a.kind == IrOpKind::VmReg) if (inst.a.kind == IrOpKind::VmReg)
build.mov(inst.regX64, luauRegTag(inst.a.index)); build.mov(inst.regX64, luauRegTag(vmRegOp(inst.a)));
else if (inst.a.kind == IrOpKind::VmConst) else if (inst.a.kind == IrOpKind::VmConst)
build.mov(inst.regX64, luauConstantTag(inst.a.index)); build.mov(inst.regX64, luauConstantTag(vmConstOp(inst.a)));
// If we have a register, we assume it's a pointer to TValue // If we have a register, we assume it's a pointer to TValue
// We might introduce explicit operand types in the future to make this more robust // We might introduce explicit operand types in the future to make this more robust
else if (inst.a.kind == IrOpKind::Inst) else if (inst.a.kind == IrOpKind::Inst)
@ -197,9 +54,9 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
inst.regX64 = regs.allocGprReg(SizeX64::qword); inst.regX64 = regs.allocGprReg(SizeX64::qword);
if (inst.a.kind == IrOpKind::VmReg) if (inst.a.kind == IrOpKind::VmReg)
build.mov(inst.regX64, luauRegValue(inst.a.index)); build.mov(inst.regX64, luauRegValue(vmRegOp(inst.a)));
else if (inst.a.kind == IrOpKind::VmConst) else if (inst.a.kind == IrOpKind::VmConst)
build.mov(inst.regX64, luauConstantValue(inst.a.index)); build.mov(inst.regX64, luauConstantValue(vmConstOp(inst.a)));
// If we have a register, we assume it's a pointer to TValue // If we have a register, we assume it's a pointer to TValue
// We might introduce explicit operand types in the future to make this more robust // We might introduce explicit operand types in the future to make this more robust
else if (inst.a.kind == IrOpKind::Inst) else if (inst.a.kind == IrOpKind::Inst)
@ -211,26 +68,24 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
inst.regX64 = regs.allocXmmReg(); inst.regX64 = regs.allocXmmReg();
if (inst.a.kind == IrOpKind::VmReg) if (inst.a.kind == IrOpKind::VmReg)
build.vmovsd(inst.regX64, luauRegValue(inst.a.index)); build.vmovsd(inst.regX64, luauRegValue(vmRegOp(inst.a)));
else if (inst.a.kind == IrOpKind::VmConst) else if (inst.a.kind == IrOpKind::VmConst)
build.vmovsd(inst.regX64, luauConstantValue(inst.a.index)); build.vmovsd(inst.regX64, luauConstantValue(vmConstOp(inst.a)));
else else
LUAU_ASSERT(!"Unsupported instruction form"); LUAU_ASSERT(!"Unsupported instruction form");
break; break;
case IrCmd::LOAD_INT: case IrCmd::LOAD_INT:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
inst.regX64 = regs.allocGprReg(SizeX64::dword); inst.regX64 = regs.allocGprReg(SizeX64::dword);
build.mov(inst.regX64, luauRegValueInt(inst.a.index)); build.mov(inst.regX64, luauRegValueInt(vmRegOp(inst.a)));
break; break;
case IrCmd::LOAD_TVALUE: case IrCmd::LOAD_TVALUE:
inst.regX64 = regs.allocXmmReg(); inst.regX64 = regs.allocXmmReg();
if (inst.a.kind == IrOpKind::VmReg) if (inst.a.kind == IrOpKind::VmReg)
build.vmovups(inst.regX64, luauReg(inst.a.index)); build.vmovups(inst.regX64, luauReg(vmRegOp(inst.a)));
else if (inst.a.kind == IrOpKind::VmConst) else if (inst.a.kind == IrOpKind::VmConst)
build.vmovups(inst.regX64, luauConstant(inst.a.index)); build.vmovups(inst.regX64, luauConstant(vmConstOp(inst.a)));
else if (inst.a.kind == IrOpKind::Inst) else if (inst.a.kind == IrOpKind::Inst)
build.vmovups(inst.regX64, xmmword[regOp(inst.a)]); build.vmovups(inst.regX64, xmmword[regOp(inst.a)]);
else else
@ -301,31 +156,25 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
break; break;
}; };
case IrCmd::STORE_TAG: case IrCmd::STORE_TAG:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
if (inst.b.kind == IrOpKind::Constant) if (inst.b.kind == IrOpKind::Constant)
build.mov(luauRegTag(inst.a.index), tagOp(inst.b)); build.mov(luauRegTag(vmRegOp(inst.a)), tagOp(inst.b));
else else
LUAU_ASSERT(!"Unsupported instruction form"); LUAU_ASSERT(!"Unsupported instruction form");
break; break;
case IrCmd::STORE_POINTER: case IrCmd::STORE_POINTER:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg); build.mov(luauRegValue(vmRegOp(inst.a)), regOp(inst.b));
build.mov(luauRegValue(inst.a.index), regOp(inst.b));
break; break;
case IrCmd::STORE_DOUBLE: case IrCmd::STORE_DOUBLE:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
if (inst.b.kind == IrOpKind::Constant) if (inst.b.kind == IrOpKind::Constant)
{ {
ScopedRegX64 tmp{regs, SizeX64::xmmword}; ScopedRegX64 tmp{regs, SizeX64::xmmword};
build.vmovsd(tmp.reg, build.f64(doubleOp(inst.b))); build.vmovsd(tmp.reg, build.f64(doubleOp(inst.b)));
build.vmovsd(luauRegValue(inst.a.index), tmp.reg); build.vmovsd(luauRegValue(vmRegOp(inst.a)), tmp.reg);
} }
else if (inst.b.kind == IrOpKind::Inst) else if (inst.b.kind == IrOpKind::Inst)
{ {
build.vmovsd(luauRegValue(inst.a.index), regOp(inst.b)); build.vmovsd(luauRegValue(vmRegOp(inst.a)), regOp(inst.b));
} }
else else
{ {
@ -334,19 +183,17 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
break; break;
case IrCmd::STORE_INT: case IrCmd::STORE_INT:
{ {
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
if (inst.b.kind == IrOpKind::Constant) if (inst.b.kind == IrOpKind::Constant)
build.mov(luauRegValueInt(inst.a.index), intOp(inst.b)); build.mov(luauRegValueInt(vmRegOp(inst.a)), intOp(inst.b));
else if (inst.b.kind == IrOpKind::Inst) else if (inst.b.kind == IrOpKind::Inst)
build.mov(luauRegValueInt(inst.a.index), regOp(inst.b)); build.mov(luauRegValueInt(vmRegOp(inst.a)), regOp(inst.b));
else else
LUAU_ASSERT(!"Unsupported instruction form"); LUAU_ASSERT(!"Unsupported instruction form");
break; break;
} }
case IrCmd::STORE_TVALUE: case IrCmd::STORE_TVALUE:
if (inst.a.kind == IrOpKind::VmReg) if (inst.a.kind == IrOpKind::VmReg)
build.vmovups(luauReg(inst.a.index), regOp(inst.b)); build.vmovups(luauReg(vmRegOp(inst.a)), regOp(inst.b));
else if (inst.a.kind == IrOpKind::Inst) else if (inst.a.kind == IrOpKind::Inst)
build.vmovups(xmmword[regOp(inst.a)], regOp(inst.b)); build.vmovups(xmmword[regOp(inst.a)], regOp(inst.b));
else else
@ -642,15 +489,11 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
jumpOrFallthrough(blockOp(inst.a), next); jumpOrFallthrough(blockOp(inst.a), next);
break; break;
case IrCmd::JUMP_IF_TRUTHY: case IrCmd::JUMP_IF_TRUTHY:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg); jumpIfTruthy(build, vmRegOp(inst.a), labelOp(inst.b), labelOp(inst.c));
jumpIfTruthy(build, inst.a.index, labelOp(inst.b), labelOp(inst.c));
jumpOrFallthrough(blockOp(inst.c), next); jumpOrFallthrough(blockOp(inst.c), next);
break; break;
case IrCmd::JUMP_IF_FALSY: case IrCmd::JUMP_IF_FALSY:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg); jumpIfFalsy(build, vmRegOp(inst.a), labelOp(inst.b), labelOp(inst.c));
jumpIfFalsy(build, inst.a.index, labelOp(inst.b), labelOp(inst.c));
jumpOrFallthrough(blockOp(inst.c), next); jumpOrFallthrough(blockOp(inst.c), next);
break; break;
case IrCmd::JUMP_EQ_TAG: case IrCmd::JUMP_EQ_TAG:
@ -686,9 +529,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
break; break;
case IrCmd::JUMP_CMP_NUM: case IrCmd::JUMP_CMP_NUM:
{ {
LUAU_ASSERT(inst.c.kind == IrOpKind::Condition); IrCondition cond = conditionOp(inst.c);
IrCondition cond = IrCondition(inst.c.index);
ScopedRegX64 tmp{regs, SizeX64::xmmword}; ScopedRegX64 tmp{regs, SizeX64::xmmword};
@ -698,24 +539,14 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
break; break;
} }
case IrCmd::JUMP_CMP_ANY: case IrCmd::JUMP_CMP_ANY:
{ jumpOnAnyCmpFallback(build, vmRegOp(inst.a), vmRegOp(inst.b), conditionOp(inst.c), labelOp(inst.d));
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::Condition);
IrCondition cond = IrCondition(inst.c.index);
jumpOnAnyCmpFallback(build, inst.a.index, inst.b.index, cond, labelOp(inst.d));
jumpOrFallthrough(blockOp(inst.e), next); jumpOrFallthrough(blockOp(inst.e), next);
break; break;
}
case IrCmd::JUMP_SLOT_MATCH: case IrCmd::JUMP_SLOT_MATCH:
{ {
LUAU_ASSERT(inst.b.kind == IrOpKind::VmConst);
ScopedRegX64 tmp{regs, SizeX64::qword}; ScopedRegX64 tmp{regs, SizeX64::qword};
jumpIfNodeKeyNotInExpectedSlot(build, tmp.reg, regOp(inst.a), luauConstantValue(inst.b.index), labelOp(inst.d)); jumpIfNodeKeyNotInExpectedSlot(build, tmp.reg, regOp(inst.a), luauConstantValue(vmConstOp(inst.b)), labelOp(inst.d));
jumpOrFallthrough(blockOp(inst.c), next); jumpOrFallthrough(blockOp(inst.c), next);
break; break;
} }
@ -774,13 +605,11 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
break; break;
case IrCmd::ADJUST_STACK_TO_REG: case IrCmd::ADJUST_STACK_TO_REG:
{ {
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
if (inst.b.kind == IrOpKind::Constant) if (inst.b.kind == IrOpKind::Constant)
{ {
ScopedRegX64 tmp{regs, SizeX64::qword}; ScopedRegX64 tmp{regs, SizeX64::qword};
build.lea(tmp.reg, addr[rBase + (inst.a.index + intOp(inst.b)) * sizeof(TValue)]); build.lea(tmp.reg, addr[rBase + (vmRegOp(inst.a) + intOp(inst.b)) * sizeof(TValue)]);
build.mov(qword[rState + offsetof(lua_State, top)], tmp.reg); build.mov(qword[rState + offsetof(lua_State, top)], tmp.reg);
} }
else if (inst.b.kind == IrOpKind::Inst) else if (inst.b.kind == IrOpKind::Inst)
@ -788,7 +617,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
ScopedRegX64 tmp(regs, regs.allocGprRegOrReuse(SizeX64::dword, index, {inst.b})); ScopedRegX64 tmp(regs, regs.allocGprRegOrReuse(SizeX64::dword, index, {inst.b}));
build.shl(qwordReg(tmp.reg), kTValueSizeLog2); build.shl(qwordReg(tmp.reg), kTValueSizeLog2);
build.lea(qwordReg(tmp.reg), addr[rBase + qwordReg(tmp.reg) + inst.a.index * sizeof(TValue)]); build.lea(qwordReg(tmp.reg), addr[rBase + qwordReg(tmp.reg) + vmRegOp(inst.a) * sizeof(TValue)]);
build.mov(qword[rState + offsetof(lua_State, top)], qwordReg(tmp.reg)); build.mov(qword[rState + offsetof(lua_State, top)], qwordReg(tmp.reg));
} }
else else
@ -807,28 +636,23 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
} }
case IrCmd::FASTCALL: case IrCmd::FASTCALL:
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); emitBuiltin(regs, build, uintOp(inst.a), vmRegOp(inst.b), vmRegOp(inst.c), inst.d, intOp(inst.e), intOp(inst.f));
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
emitBuiltin(regs, build, uintOp(inst.a), inst.b.index, inst.c.index, inst.d, intOp(inst.e), intOp(inst.f));
break; break;
case IrCmd::INVOKE_FASTCALL: case IrCmd::INVOKE_FASTCALL:
{ {
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
unsigned bfid = uintOp(inst.a); unsigned bfid = uintOp(inst.a);
OperandX64 args = 0; OperandX64 args = 0;
if (inst.d.kind == IrOpKind::VmReg) if (inst.d.kind == IrOpKind::VmReg)
args = luauRegAddress(inst.d.index); args = luauRegAddress(vmRegOp(inst.d));
else if (inst.d.kind == IrOpKind::VmConst) else if (inst.d.kind == IrOpKind::VmConst)
args = luauConstantAddress(inst.d.index); args = luauConstantAddress(vmConstOp(inst.d));
else else
LUAU_ASSERT(boolOp(inst.d) == false); LUAU_ASSERT(boolOp(inst.d) == false);
int ra = inst.b.index; int ra = vmRegOp(inst.b);
int arg = inst.c.index; int arg = vmRegOp(inst.c);
int nparams = intOp(inst.e); int nparams = intOp(inst.e);
int nresults = intOp(inst.f); int nresults = intOp(inst.f);
@ -889,34 +713,24 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
break; break;
} }
case IrCmd::DO_ARITH: case IrCmd::DO_ARITH:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg || inst.c.kind == IrOpKind::VmConst);
if (inst.c.kind == IrOpKind::VmReg) if (inst.c.kind == IrOpKind::VmReg)
callArithHelper(build, inst.a.index, inst.b.index, luauRegAddress(inst.c.index), TMS(intOp(inst.d))); callArithHelper(build, vmRegOp(inst.a), vmRegOp(inst.b), luauRegAddress(vmRegOp(inst.c)), TMS(intOp(inst.d)));
else else
callArithHelper(build, inst.a.index, inst.b.index, luauConstantAddress(inst.c.index), TMS(intOp(inst.d))); callArithHelper(build, vmRegOp(inst.a), vmRegOp(inst.b), luauConstantAddress(vmConstOp(inst.c)), TMS(intOp(inst.d)));
break; break;
case IrCmd::DO_LEN: case IrCmd::DO_LEN:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg); callLengthHelper(build, vmRegOp(inst.a), vmRegOp(inst.b));
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
callLengthHelper(build, inst.a.index, inst.b.index);
break; break;
case IrCmd::GET_TABLE: case IrCmd::GET_TABLE:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
if (inst.c.kind == IrOpKind::VmReg) if (inst.c.kind == IrOpKind::VmReg)
{ {
callGetTable(build, inst.b.index, luauRegAddress(inst.c.index), inst.a.index); callGetTable(build, vmRegOp(inst.b), luauRegAddress(vmRegOp(inst.c)), vmRegOp(inst.a));
} }
else if (inst.c.kind == IrOpKind::Constant) else if (inst.c.kind == IrOpKind::Constant)
{ {
TValue n; TValue n;
setnvalue(&n, uintOp(inst.c)); setnvalue(&n, uintOp(inst.c));
callGetTable(build, inst.b.index, build.bytes(&n, sizeof(n)), inst.a.index); callGetTable(build, vmRegOp(inst.b), build.bytes(&n, sizeof(n)), vmRegOp(inst.a));
} }
else else
{ {
@ -924,18 +738,15 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
} }
break; break;
case IrCmd::SET_TABLE: case IrCmd::SET_TABLE:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
if (inst.c.kind == IrOpKind::VmReg) if (inst.c.kind == IrOpKind::VmReg)
{ {
callSetTable(build, inst.b.index, luauRegAddress(inst.c.index), inst.a.index); callSetTable(build, vmRegOp(inst.b), luauRegAddress(vmRegOp(inst.c)), vmRegOp(inst.a));
} }
else if (inst.c.kind == IrOpKind::Constant) else if (inst.c.kind == IrOpKind::Constant)
{ {
TValue n; TValue n;
setnvalue(&n, uintOp(inst.c)); setnvalue(&n, uintOp(inst.c));
callSetTable(build, inst.b.index, build.bytes(&n, sizeof(n)), inst.a.index); callSetTable(build, vmRegOp(inst.b), build.bytes(&n, sizeof(n)), vmRegOp(inst.a));
} }
else else
{ {
@ -943,30 +754,23 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
} }
break; break;
case IrCmd::GET_IMPORT: case IrCmd::GET_IMPORT:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg); emitInstGetImportFallback(build, vmRegOp(inst.a), uintOp(inst.b));
emitInstGetImportFallback(build, inst.a.index, uintOp(inst.b));
break; break;
case IrCmd::CONCAT: case IrCmd::CONCAT:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
build.mov(rArg1, rState); build.mov(rArg1, rState);
build.mov(dwordReg(rArg2), uintOp(inst.b)); build.mov(dwordReg(rArg2), uintOp(inst.b));
build.mov(dwordReg(rArg3), inst.a.index + uintOp(inst.b) - 1); build.mov(dwordReg(rArg3), vmRegOp(inst.a) + uintOp(inst.b) - 1);
build.call(qword[rNativeContext + offsetof(NativeContext, luaV_concat)]); build.call(qword[rNativeContext + offsetof(NativeContext, luaV_concat)]);
emitUpdateBase(build); emitUpdateBase(build);
break; break;
case IrCmd::GET_UPVALUE: case IrCmd::GET_UPVALUE:
{ {
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.b.kind == IrOpKind::VmUpvalue);
ScopedRegX64 tmp1{regs, SizeX64::qword}; ScopedRegX64 tmp1{regs, SizeX64::qword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword}; ScopedRegX64 tmp2{regs, SizeX64::xmmword};
build.mov(tmp1.reg, sClosure); build.mov(tmp1.reg, sClosure);
build.add(tmp1.reg, offsetof(Closure, l.uprefs) + sizeof(TValue) * inst.b.index); build.add(tmp1.reg, offsetof(Closure, l.uprefs) + sizeof(TValue) * vmUpvalueOp(inst.b));
// uprefs[] is either an actual value, or it points to UpVal object which has a pointer to value // uprefs[] is either an actual value, or it points to UpVal object which has a pointer to value
Label skip; Label skip;
@ -981,32 +785,29 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
build.setLabel(skip); build.setLabel(skip);
build.vmovups(tmp2.reg, xmmword[tmp1.reg]); build.vmovups(tmp2.reg, xmmword[tmp1.reg]);
build.vmovups(luauReg(inst.a.index), tmp2.reg); build.vmovups(luauReg(vmRegOp(inst.a)), tmp2.reg);
break; break;
} }
case IrCmd::SET_UPVALUE: case IrCmd::SET_UPVALUE:
{ {
LUAU_ASSERT(inst.a.kind == IrOpKind::VmUpvalue);
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
Label next; Label next;
ScopedRegX64 tmp1{regs, SizeX64::qword}; ScopedRegX64 tmp1{regs, SizeX64::qword};
ScopedRegX64 tmp2{regs, SizeX64::qword}; ScopedRegX64 tmp2{regs, SizeX64::qword};
ScopedRegX64 tmp3{regs, SizeX64::xmmword}; ScopedRegX64 tmp3{regs, SizeX64::xmmword};
build.mov(tmp1.reg, sClosure); build.mov(tmp1.reg, sClosure);
build.mov(tmp2.reg, qword[tmp1.reg + offsetof(Closure, l.uprefs) + sizeof(TValue) * inst.a.index + offsetof(TValue, value.gc)]); build.mov(tmp2.reg, qword[tmp1.reg + offsetof(Closure, l.uprefs) + sizeof(TValue) * vmUpvalueOp(inst.a) + offsetof(TValue, value.gc)]);
build.mov(tmp1.reg, qword[tmp2.reg + offsetof(UpVal, v)]); build.mov(tmp1.reg, qword[tmp2.reg + offsetof(UpVal, v)]);
build.vmovups(tmp3.reg, luauReg(inst.b.index)); build.vmovups(tmp3.reg, luauReg(vmRegOp(inst.b)));
build.vmovups(xmmword[tmp1.reg], tmp3.reg); build.vmovups(xmmword[tmp1.reg], tmp3.reg);
callBarrierObject(build, tmp1.reg, tmp2.reg, inst.b.index, next); callBarrierObject(build, tmp1.reg, tmp2.reg, vmRegOp(inst.b), next);
build.setLabel(next); build.setLabel(next);
break; break;
} }
case IrCmd::PREPARE_FORN: case IrCmd::PREPARE_FORN:
callPrepareForN(build, inst.a.index, inst.b.index, inst.c.index); callPrepareForN(build, vmRegOp(inst.a), vmRegOp(inst.b), vmRegOp(inst.c));
break; break;
case IrCmd::CHECK_TAG: case IrCmd::CHECK_TAG:
if (inst.a.kind == IrOpKind::Inst) if (inst.a.kind == IrOpKind::Inst)
@ -1016,11 +817,11 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
} }
else if (inst.a.kind == IrOpKind::VmReg) else if (inst.a.kind == IrOpKind::VmReg)
{ {
jumpIfTagIsNot(build, inst.a.index, lua_Type(tagOp(inst.b)), labelOp(inst.c)); jumpIfTagIsNot(build, vmRegOp(inst.a), lua_Type(tagOp(inst.b)), labelOp(inst.c));
} }
else if (inst.a.kind == IrOpKind::VmConst) else if (inst.a.kind == IrOpKind::VmConst)
{ {
build.cmp(luauConstantTag(inst.a.index), tagOp(inst.b)); build.cmp(luauConstantTag(vmConstOp(inst.a)), tagOp(inst.b));
build.jcc(ConditionX64::NotEqual, labelOp(inst.c)); build.jcc(ConditionX64::NotEqual, labelOp(inst.c));
} }
else else
@ -1053,11 +854,9 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
break; break;
case IrCmd::CHECK_SLOT_MATCH: case IrCmd::CHECK_SLOT_MATCH:
{ {
LUAU_ASSERT(inst.b.kind == IrOpKind::VmConst);
ScopedRegX64 tmp{regs, SizeX64::qword}; ScopedRegX64 tmp{regs, SizeX64::qword};
jumpIfNodeKeyNotInExpectedSlot(build, tmp.reg, regOp(inst.a), luauConstantValue(inst.b.index), labelOp(inst.c)); jumpIfNodeKeyNotInExpectedSlot(build, tmp.reg, regOp(inst.a), luauConstantValue(vmConstOp(inst.b)), labelOp(inst.c));
break; break;
} }
case IrCmd::CHECK_NODE_NO_NEXT: case IrCmd::CHECK_NODE_NO_NEXT:
@ -1075,12 +874,10 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
} }
case IrCmd::BARRIER_OBJ: case IrCmd::BARRIER_OBJ:
{ {
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
Label skip; Label skip;
ScopedRegX64 tmp{regs, SizeX64::qword}; ScopedRegX64 tmp{regs, SizeX64::qword};
callBarrierObject(build, tmp.reg, regOp(inst.a), inst.b.index, skip); callBarrierObject(build, tmp.reg, regOp(inst.a), vmRegOp(inst.b), skip);
build.setLabel(skip); build.setLabel(skip);
break; break;
} }
@ -1094,12 +891,10 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
} }
case IrCmd::BARRIER_TABLE_FORWARD: case IrCmd::BARRIER_TABLE_FORWARD:
{ {
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
Label skip; Label skip;
ScopedRegX64 tmp{regs, SizeX64::qword}; ScopedRegX64 tmp{regs, SizeX64::qword};
callBarrierTable(build, tmp.reg, regOp(inst.a), inst.b.index, skip); callBarrierTable(build, tmp.reg, regOp(inst.a), vmRegOp(inst.b), skip);
build.setLabel(skip); build.setLabel(skip);
break; break;
} }
@ -1117,8 +912,6 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
} }
case IrCmd::CLOSE_UPVALS: case IrCmd::CLOSE_UPVALS:
{ {
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg);
Label next; Label next;
ScopedRegX64 tmp1{regs, SizeX64::qword}; ScopedRegX64 tmp1{regs, SizeX64::qword};
ScopedRegX64 tmp2{regs, SizeX64::qword}; ScopedRegX64 tmp2{regs, SizeX64::qword};
@ -1129,7 +922,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
build.jcc(ConditionX64::Zero, next); build.jcc(ConditionX64::Zero, next);
// ra <= L->openuval->v // ra <= L->openuval->v
build.lea(tmp2.reg, addr[rBase + inst.a.index * sizeof(TValue)]); build.lea(tmp2.reg, addr[rBase + vmRegOp(inst.a) * sizeof(TValue)]);
build.cmp(tmp2.reg, qword[tmp1.reg + offsetof(UpVal, v)]); build.cmp(tmp2.reg, qword[tmp1.reg + offsetof(UpVal, v)]);
build.jcc(ConditionX64::Above, next); build.jcc(ConditionX64::Above, next);
@ -1149,60 +942,38 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
// Fallbacks to non-IR instruction implementations // Fallbacks to non-IR instruction implementations
case IrCmd::LOP_SETLIST: case IrCmd::LOP_SETLIST:
{ {
const Instruction* pc = proto->code + uintOp(inst.a);
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.d.kind == IrOpKind::Constant);
LUAU_ASSERT(inst.e.kind == IrOpKind::Constant);
Label next; Label next;
emitInstSetList(build, pc, next); emitInstSetList(build, next, vmRegOp(inst.b), vmRegOp(inst.c), intOp(inst.d), uintOp(inst.e));
build.setLabel(next); build.setLabel(next);
break; break;
} }
case IrCmd::LOP_CALL: case IrCmd::LOP_CALL:
{ emitInstCall(build, helpers, vmRegOp(inst.a), intOp(inst.b), intOp(inst.c));
const Instruction* pc = proto->code + uintOp(inst.a);
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::Constant);
LUAU_ASSERT(inst.d.kind == IrOpKind::Constant);
emitInstCall(build, helpers, pc, uintOp(inst.a));
break; break;
}
case IrCmd::LOP_RETURN: case IrCmd::LOP_RETURN:
{ emitInstReturn(build, helpers, vmRegOp(inst.a), intOp(inst.b));
const Instruction* pc = proto->code + uintOp(inst.a);
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind == IrOpKind::Constant);
emitInstReturn(build, helpers, pc, uintOp(inst.a));
break; break;
}
case IrCmd::LOP_FORGLOOP: case IrCmd::LOP_FORGLOOP:
LUAU_ASSERT(inst.a.kind == IrOpKind::VmReg); emitinstForGLoop(build, vmRegOp(inst.a), intOp(inst.b), labelOp(inst.c), labelOp(inst.d));
emitinstForGLoop(build, inst.a.index, intOp(inst.b), labelOp(inst.c), labelOp(inst.d));
break; break;
case IrCmd::LOP_FORGLOOP_FALLBACK: case IrCmd::LOP_FORGLOOP_FALLBACK:
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); emitinstForGLoopFallback(build, uintOp(inst.a), vmRegOp(inst.b), intOp(inst.c), labelOp(inst.d));
emitinstForGLoopFallback(build, uintOp(inst.a), inst.b.index, intOp(inst.c), labelOp(inst.d));
build.jmp(labelOp(inst.e)); build.jmp(labelOp(inst.e));
break; break;
case IrCmd::LOP_FORGPREP_XNEXT_FALLBACK: case IrCmd::LOP_FORGPREP_XNEXT_FALLBACK:
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); emitInstForGPrepXnextFallback(build, uintOp(inst.a), vmRegOp(inst.b), labelOp(inst.c));
emitInstForGPrepXnextFallback(build, uintOp(inst.a), inst.b.index, labelOp(inst.c));
break; break;
case IrCmd::LOP_AND: case IrCmd::LOP_AND:
emitInstAnd(build, proto->code + uintOp(inst.a)); emitInstAnd(build, vmRegOp(inst.a), vmRegOp(inst.b), vmRegOp(inst.c));
break; break;
case IrCmd::LOP_ANDK: case IrCmd::LOP_ANDK:
emitInstAndK(build, proto->code + uintOp(inst.a)); emitInstAndK(build, vmRegOp(inst.a), vmRegOp(inst.b), vmConstOp(inst.c));
break; break;
case IrCmd::LOP_OR: case IrCmd::LOP_OR:
emitInstOr(build, proto->code + uintOp(inst.a)); emitInstOr(build, vmRegOp(inst.a), vmRegOp(inst.b), vmRegOp(inst.c));
break; break;
case IrCmd::LOP_ORK: case IrCmd::LOP_ORK:
emitInstOrK(build, proto->code + uintOp(inst.a)); emitInstOrK(build, vmRegOp(inst.a), vmRegOp(inst.b), vmConstOp(inst.c));
break; break;
case IrCmd::LOP_COVERAGE: case IrCmd::LOP_COVERAGE:
emitInstCoverage(build, uintOp(inst.a)); emitInstCoverage(build, uintOp(inst.a));
@ -1272,6 +1043,8 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
LUAU_ASSERT(!"Not supported yet"); LUAU_ASSERT(!"Not supported yet");
break; break;
} }
regs.freeLastUseRegs(inst, index);
} }
bool IrLoweringX64::isFallthroughBlock(IrBlock target, IrBlock next) bool IrLoweringX64::isFallthroughBlock(IrBlock target, IrBlock next)
@ -1294,9 +1067,9 @@ OperandX64 IrLoweringX64::memRegDoubleOp(IrOp op) const
case IrOpKind::Constant: case IrOpKind::Constant:
return build.f64(doubleOp(op)); return build.f64(doubleOp(op));
case IrOpKind::VmReg: case IrOpKind::VmReg:
return luauRegValue(op.index); return luauRegValue(vmRegOp(op));
case IrOpKind::VmConst: case IrOpKind::VmConst:
return luauConstantValue(op.index); return luauConstantValue(vmConstOp(op));
default: default:
LUAU_ASSERT(!"Unsupported operand kind"); LUAU_ASSERT(!"Unsupported operand kind");
} }
@ -1311,9 +1084,9 @@ OperandX64 IrLoweringX64::memRegTagOp(IrOp op) const
case IrOpKind::Inst: case IrOpKind::Inst:
return regOp(op); return regOp(op);
case IrOpKind::VmReg: case IrOpKind::VmReg:
return luauRegTag(op.index); return luauRegTag(vmRegOp(op));
case IrOpKind::VmConst: case IrOpKind::VmConst:
return luauConstantTag(op.index); return luauConstantTag(vmConstOp(op));
default: default:
LUAU_ASSERT(!"Unsupported operand kind"); LUAU_ASSERT(!"Unsupported operand kind");
} }
@ -1323,7 +1096,9 @@ OperandX64 IrLoweringX64::memRegTagOp(IrOp op) const
RegisterX64 IrLoweringX64::regOp(IrOp op) const RegisterX64 IrLoweringX64::regOp(IrOp op) const
{ {
return function.instOp(op).regX64; IrInst& inst = function.instOp(op);
LUAU_ASSERT(inst.regX64 != noreg);
return inst.regX64;
} }
IrConst IrLoweringX64::constOp(IrOp op) const IrConst IrLoweringX64::constOp(IrOp op) const

View file

@ -24,10 +24,7 @@ namespace X64
struct IrLoweringX64 struct IrLoweringX64
{ {
// Some of these arguments are only required while we re-use old direct bytecode to x64 lowering IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function);
IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, NativeState& data, Proto* proto, IrFunction& function);
void lower(AssemblyOptions options);
void lowerInst(IrInst& inst, uint32_t index, IrBlock& next); void lowerInst(IrInst& inst, uint32_t index, IrBlock& next);
@ -52,7 +49,6 @@ struct IrLoweringX64
AssemblyBuilderX64& build; AssemblyBuilderX64& build;
ModuleHelpers& helpers; ModuleHelpers& helpers;
NativeState& data; NativeState& data;
Proto* proto = nullptr; // Temporarily required to provide 'Instruction* pc' to old emitInst* methods
IrFunction& function; IrFunction& function;

View file

@ -240,6 +240,10 @@ BuiltinImplResult translateBuiltinTypeof(IrBuilder& build, int nparams, int ra,
BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg, IrOp args, int nparams, int nresults, IrOp fallback) BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg, IrOp args, int nparams, int nresults, IrOp fallback)
{ {
// Builtins are not allowed to handle variadic arguments
if (nparams == LUA_MULTRET)
return {BuiltinImplType::None, -1};
switch (bfid) switch (bfid)
{ {
case LBF_ASSERT: case LBF_ASSERT:

View file

@ -501,10 +501,10 @@ void translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool
if (br.type == BuiltinImplType::UsesFallback) if (br.type == BuiltinImplType::UsesFallback)
{ {
LUAU_ASSERT(nparams != LUA_MULTRET && "builtins are not allowed to handle variadic arguments");
if (nresults == LUA_MULTRET) if (nresults == LUA_MULTRET)
build.inst(IrCmd::ADJUST_STACK_TO_REG, build.vmReg(ra), build.constInt(br.actualResultCount)); build.inst(IrCmd::ADJUST_STACK_TO_REG, build.vmReg(ra), build.constInt(br.actualResultCount));
else if (nparams == LUA_MULTRET)
build.inst(IrCmd::ADJUST_STACK_TO_TOP);
} }
else else
{ {

View file

@ -354,7 +354,7 @@ void foldConstants(IrBuilder& build, IrFunction& function, IrBlock& block, uint3
case IrCmd::JUMP_CMP_NUM: case IrCmd::JUMP_CMP_NUM:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant) if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{ {
if (compare(function.doubleOp(inst.a), function.doubleOp(inst.b), function.conditionOp(inst.c))) if (compare(function.doubleOp(inst.a), function.doubleOp(inst.b), conditionOp(inst.c)))
replace(function, block, index, {IrCmd::JUMP, inst.d}); replace(function, block, index, {IrCmd::JUMP, inst.d});
else else
replace(function, block, index, {IrCmd::JUMP, inst.e}); replace(function, block, index, {IrCmd::JUMP, inst.e});

View file

@ -109,6 +109,7 @@ void initHelperFunctions(NativeState& data)
data.context.forgPrepXnextFallback = forgPrepXnextFallback; data.context.forgPrepXnextFallback = forgPrepXnextFallback;
data.context.callProlog = callProlog; data.context.callProlog = callProlog;
data.context.callEpilogC = callEpilogC; data.context.callEpilogC = callEpilogC;
data.context.returnFallback = returnFallback;
} }
} // namespace CodeGen } // namespace CodeGen

View file

@ -47,12 +47,6 @@ struct NativeContext
uint8_t* gateEntry = nullptr; uint8_t* gateEntry = nullptr;
uint8_t* gateExit = nullptr; uint8_t* gateExit = nullptr;
// Opcode fallbacks, implemented in C
NativeFallback fallback[LOP__COUNT] = {};
// Fast call methods, implemented in C
luau_FastFunction luauF_table[256] = {};
// Helper functions, implemented in C // Helper functions, implemented in C
int (*luaV_lessthan)(lua_State* L, const TValue* l, const TValue* r) = nullptr; int (*luaV_lessthan)(lua_State* L, const TValue* l, const TValue* r) = nullptr;
int (*luaV_lessequal)(lua_State* L, const TValue* l, const TValue* r) = nullptr; int (*luaV_lessequal)(lua_State* L, const TValue* l, const TValue* r) = nullptr;
@ -107,6 +101,13 @@ struct NativeContext
void (*forgPrepXnextFallback)(lua_State* L, TValue* ra, int pc) = nullptr; void (*forgPrepXnextFallback)(lua_State* L, TValue* ra, int pc) = nullptr;
Closure* (*callProlog)(lua_State* L, TValue* ra, StkId argtop, int nresults) = nullptr; Closure* (*callProlog)(lua_State* L, TValue* ra, StkId argtop, int nresults) = nullptr;
void (*callEpilogC)(lua_State* L, int nresults, int n) = nullptr; void (*callEpilogC)(lua_State* L, int nresults, int n) = nullptr;
const Instruction* (*returnFallback)(lua_State* L, StkId ra, int n) = nullptr;
// Opcode fallbacks, implemented in C
NativeFallback fallback[LOP__COUNT] = {};
// Fast call methods, implemented in C
luau_FastFunction luauF_table[256] = {};
}; };
struct NativeState struct NativeState

View file

@ -42,6 +42,11 @@ struct RegisterLink
// Data we know about the current VM state // Data we know about the current VM state
struct ConstPropState struct ConstPropState
{ {
ConstPropState(const IrFunction& function)
: function(function)
{
}
uint8_t tryGetTag(IrOp op) uint8_t tryGetTag(IrOp op)
{ {
if (RegisterInfo* info = tryGetRegisterInfo(op)) if (RegisterInfo* info = tryGetRegisterInfo(op))
@ -107,14 +112,29 @@ struct ConstPropState
invalidate(regs[regOp.index], /* invalidateTag */ true, /* invalidateValue */ true); invalidate(regs[regOp.index], /* invalidateTag */ true, /* invalidateValue */ true);
} }
void invalidateRegistersFrom(uint32_t firstReg) void invalidateRegistersFrom(int firstReg)
{ {
for (int i = int(firstReg); i <= maxReg; ++i) for (int i = firstReg; i <= maxReg; ++i)
invalidate(regs[i], /* invalidateTag */ true, /* invalidateValue */ true); invalidate(regs[i], /* invalidateTag */ true, /* invalidateValue */ true);
maxReg = int(firstReg) - 1; maxReg = int(firstReg) - 1;
} }
void invalidateRegisterRange(int firstReg, int count)
{
for (int i = firstReg; i < firstReg + count && i <= maxReg; ++i)
invalidate(regs[i], /* invalidateTag */ true, /* invalidateValue */ true);
}
void invalidateCapturedRegisters()
{
for (int i = 0; i <= maxReg; ++i)
{
if (function.cfg.captured.regs.test(i))
invalidate(regs[i], /* invalidateTag */ true, /* invalidateValue */ true);
}
}
void invalidateHeap() void invalidateHeap()
{ {
for (int i = 0; i <= maxReg; ++i) for (int i = 0; i <= maxReg; ++i)
@ -127,10 +147,10 @@ struct ConstPropState
reg.knownNoMetatable = false; reg.knownNoMetatable = false;
} }
void invalidateAll() void invalidateUserCall()
{ {
// Invalidating registers also invalidates what we know about the heap (stored in RegisterInfo) invalidateHeap();
invalidateRegistersFrom(0u); invalidateCapturedRegisters();
inSafeEnv = false; inSafeEnv = false;
} }
@ -175,6 +195,8 @@ struct ConstPropState
return nullptr; return nullptr;
} }
const IrFunction& function;
RegisterInfo regs[256]; RegisterInfo regs[256];
// For range/full invalidations, we only want to visit a limited number of data that we have recorded // For range/full invalidations, we only want to visit a limited number of data that we have recorded
@ -411,7 +433,7 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
if (valueA && valueB) if (valueA && valueB)
{ {
if (compare(*valueA, *valueB, function.conditionOp(inst.c))) if (compare(*valueA, *valueB, conditionOp(inst.c)))
replace(function, block, index, {IrCmd::JUMP, inst.d}); replace(function, block, index, {IrCmd::JUMP, inst.d});
else else
replace(function, block, index, {IrCmd::JUMP, inst.e}); replace(function, block, index, {IrCmd::JUMP, inst.e});
@ -485,7 +507,7 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
case IrCmd::LOP_ANDK: case IrCmd::LOP_ANDK:
case IrCmd::LOP_OR: case IrCmd::LOP_OR:
case IrCmd::LOP_ORK: case IrCmd::LOP_ORK:
state.invalidate(inst.b); state.invalidate(inst.a);
break; break;
case IrCmd::FASTCALL: case IrCmd::FASTCALL:
case IrCmd::INVOKE_FASTCALL: case IrCmd::INVOKE_FASTCALL:
@ -538,35 +560,93 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
case IrCmd::CHECK_FASTCALL_RES: // Changes stack top, but not the values case IrCmd::CHECK_FASTCALL_RES: // Changes stack top, but not the values
break; break;
// We don't model the following instructions, so we just clear all the knowledge we have built up
// Many of these call user functions that can change memory and captured registers
// Some of these might yield with similar effects
case IrCmd::JUMP_CMP_ANY: case IrCmd::JUMP_CMP_ANY:
state.invalidateUserCall(); // TODO: if arguments are strings, there will be no user calls
break;
case IrCmd::DO_ARITH: case IrCmd::DO_ARITH:
state.invalidate(inst.a);
state.invalidateUserCall();
break;
case IrCmd::DO_LEN: case IrCmd::DO_LEN:
state.invalidate(inst.a);
state.invalidateUserCall(); // TODO: if argument is a string, there will be no user call
state.saveTag(inst.a, LUA_TNUMBER);
break;
case IrCmd::GET_TABLE: case IrCmd::GET_TABLE:
state.invalidate(inst.a);
state.invalidateUserCall();
break;
case IrCmd::SET_TABLE: case IrCmd::SET_TABLE:
state.invalidateUserCall();
break;
case IrCmd::GET_IMPORT: case IrCmd::GET_IMPORT:
state.invalidate(inst.a);
state.invalidateUserCall();
break;
case IrCmd::CONCAT: case IrCmd::CONCAT:
state.invalidateRegisterRange(inst.a.index, function.uintOp(inst.b));
state.invalidateUserCall(); // TODO: if only strings and numbers are concatenated, there will be no user calls
break;
case IrCmd::PREPARE_FORN: case IrCmd::PREPARE_FORN:
case IrCmd::INTERRUPT: // TODO: it will be important to keep tag/value state, but we have to track register capture state.invalidateValue(inst.a);
state.saveTag(inst.a, LUA_TNUMBER);
state.invalidateValue(inst.b);
state.saveTag(inst.b, LUA_TNUMBER);
state.invalidateValue(inst.c);
state.saveTag(inst.c, LUA_TNUMBER);
break;
case IrCmd::INTERRUPT:
state.invalidateUserCall();
break;
case IrCmd::LOP_CALL: case IrCmd::LOP_CALL:
state.invalidateRegistersFrom(inst.a.index);
state.invalidateUserCall();
break;
case IrCmd::LOP_FORGLOOP: case IrCmd::LOP_FORGLOOP:
state.invalidateRegistersFrom(inst.a.index + 2); // Rn and Rn+1 are not modified
break;
case IrCmd::LOP_FORGLOOP_FALLBACK: case IrCmd::LOP_FORGLOOP_FALLBACK:
state.invalidateRegistersFrom(inst.b.index + 2); // Rn and Rn+1 are not modified
state.invalidateUserCall();
break;
case IrCmd::LOP_FORGPREP_XNEXT_FALLBACK: case IrCmd::LOP_FORGPREP_XNEXT_FALLBACK:
// This fallback only conditionally throws an exception
break;
case IrCmd::FALLBACK_GETGLOBAL: case IrCmd::FALLBACK_GETGLOBAL:
state.invalidate(inst.b);
state.invalidateUserCall();
break;
case IrCmd::FALLBACK_SETGLOBAL: case IrCmd::FALLBACK_SETGLOBAL:
state.invalidateUserCall();
break;
case IrCmd::FALLBACK_GETTABLEKS: case IrCmd::FALLBACK_GETTABLEKS:
state.invalidate(inst.b);
state.invalidateUserCall();
break;
case IrCmd::FALLBACK_SETTABLEKS: case IrCmd::FALLBACK_SETTABLEKS:
state.invalidateUserCall();
break;
case IrCmd::FALLBACK_NAMECALL: case IrCmd::FALLBACK_NAMECALL:
state.invalidate(IrOp{inst.b.kind, inst.b.index + 0u});
state.invalidate(IrOp{inst.b.kind, inst.b.index + 1u});
state.invalidateUserCall();
break;
case IrCmd::FALLBACK_PREPVARARGS: case IrCmd::FALLBACK_PREPVARARGS:
break;
case IrCmd::FALLBACK_GETVARARGS: case IrCmd::FALLBACK_GETVARARGS:
state.invalidateRegistersFrom(inst.b.index);
break;
case IrCmd::FALLBACK_NEWCLOSURE: case IrCmd::FALLBACK_NEWCLOSURE:
state.invalidate(inst.b);
break;
case IrCmd::FALLBACK_DUPCLOSURE: case IrCmd::FALLBACK_DUPCLOSURE:
state.invalidate(inst.b);
break;
case IrCmd::FALLBACK_FORGPREP: case IrCmd::FALLBACK_FORGPREP:
// TODO: this is very conservative, some of there instructions can be tracked better state.invalidate(IrOp{inst.b.kind, inst.b.index + 0u});
// TODO: non-captured register tags and values should not be cleared here state.invalidate(IrOp{inst.b.kind, inst.b.index + 1u});
state.invalidateAll(); state.invalidate(IrOp{inst.b.kind, inst.b.index + 2u});
break; break;
} }
} }
@ -592,7 +672,7 @@ static void constPropInBlockChain(IrBuilder& build, std::vector<uint8_t>& visite
{ {
IrFunction& function = build.function; IrFunction& function = build.function;
ConstPropState state; ConstPropState state{function};
while (block) while (block)
{ {
@ -698,7 +778,7 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector<uint8_t>& visited
return; return;
// Initialize state with the knowledge of our current block // Initialize state with the knowledge of our current block
ConstPropState state; ConstPropState state{function};
constPropInBlock(build, startingBlock, state); constPropInBlock(build, startingBlock, state);
// Veryfy that target hasn't changed // Veryfy that target hasn't changed

View file

@ -117,6 +117,11 @@ ifneq ($(native),)
TESTS_ARGS+=--codegen TESTS_ARGS+=--codegen
endif endif
ifneq ($(nativelj),)
CXXFLAGS+=-DLUA_CUSTOM_EXECUTION=1 -DLUA_USE_LONGJMP=1
TESTS_ARGS+=--codegen
endif
# target-specific flags # target-specific flags
$(AST_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include $(AST_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include
$(COMPILER_OBJECTS): CXXFLAGS+=-std=c++17 -ICompiler/include -ICommon/include -IAst/include $(COMPILER_OBJECTS): CXXFLAGS+=-std=c++17 -ICompiler/include -ICommon/include -IAst/include

View file

@ -84,14 +84,18 @@ target_sources(Luau.CodeGen PRIVATE
CodeGen/src/CodeBlockUnwind.cpp CodeGen/src/CodeBlockUnwind.cpp
CodeGen/src/CodeGen.cpp CodeGen/src/CodeGen.cpp
CodeGen/src/CodeGenUtils.cpp CodeGen/src/CodeGenUtils.cpp
CodeGen/src/CodeGenA64.cpp
CodeGen/src/CodeGenX64.cpp CodeGen/src/CodeGenX64.cpp
CodeGen/src/EmitBuiltinsX64.cpp CodeGen/src/EmitBuiltinsX64.cpp
CodeGen/src/EmitCommonA64.cpp
CodeGen/src/EmitCommonX64.cpp CodeGen/src/EmitCommonX64.cpp
CodeGen/src/EmitInstructionA64.cpp
CodeGen/src/EmitInstructionX64.cpp CodeGen/src/EmitInstructionX64.cpp
CodeGen/src/Fallbacks.cpp CodeGen/src/Fallbacks.cpp
CodeGen/src/IrAnalysis.cpp CodeGen/src/IrAnalysis.cpp
CodeGen/src/IrBuilder.cpp CodeGen/src/IrBuilder.cpp
CodeGen/src/IrDump.cpp CodeGen/src/IrDump.cpp
CodeGen/src/IrLoweringA64.cpp
CodeGen/src/IrLoweringX64.cpp CodeGen/src/IrLoweringX64.cpp
CodeGen/src/IrRegAllocX64.cpp CodeGen/src/IrRegAllocX64.cpp
CodeGen/src/IrTranslateBuiltins.cpp CodeGen/src/IrTranslateBuiltins.cpp
@ -106,13 +110,17 @@ target_sources(Luau.CodeGen PRIVATE
CodeGen/src/ByteUtils.h CodeGen/src/ByteUtils.h
CodeGen/src/CustomExecUtils.h CodeGen/src/CustomExecUtils.h
CodeGen/src/CodeGenUtils.h CodeGen/src/CodeGenUtils.h
CodeGen/src/CodeGenA64.h
CodeGen/src/CodeGenX64.h CodeGen/src/CodeGenX64.h
CodeGen/src/EmitBuiltinsX64.h CodeGen/src/EmitBuiltinsX64.h
CodeGen/src/EmitCommon.h CodeGen/src/EmitCommon.h
CodeGen/src/EmitCommonA64.h
CodeGen/src/EmitCommonX64.h CodeGen/src/EmitCommonX64.h
CodeGen/src/EmitInstructionA64.h
CodeGen/src/EmitInstructionX64.h CodeGen/src/EmitInstructionX64.h
CodeGen/src/Fallbacks.h CodeGen/src/Fallbacks.h
CodeGen/src/FallbacksProlog.h CodeGen/src/FallbacksProlog.h
CodeGen/src/IrLoweringA64.h
CodeGen/src/IrLoweringX64.h CodeGen/src/IrLoweringX64.h
CodeGen/src/IrRegAllocX64.h CodeGen/src/IrRegAllocX64.h
CodeGen/src/IrTranslateBuiltins.h CodeGen/src/IrTranslateBuiltins.h

View file

@ -208,14 +208,14 @@ typedef struct global_State
uint64_t rngstate; // PCG random number generator state uint64_t rngstate; // PCG random number generator state
uint64_t ptrenckey[4]; // pointer encoding key for display uint64_t ptrenckey[4]; // pointer encoding key for display
void (*udatagc[LUA_UTAG_LIMIT])(lua_State*, void*); // for each userdata tag, a gc callback to be called immediately before freeing memory
lua_Callbacks cb; lua_Callbacks cb;
#if LUA_CUSTOM_EXECUTION #if LUA_CUSTOM_EXECUTION
lua_ExecutionCallbacks ecb; lua_ExecutionCallbacks ecb;
#endif #endif
void (*udatagc[LUA_UTAG_LIMIT])(lua_State*, void*); // for each userdata tag, a gc callback to be called immediately before freeing memory
GCStats gcstats; GCStats gcstats;
#ifdef LUAI_GCMETRICS #ifdef LUAI_GCMETRICS

View file

@ -120,6 +120,10 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Loads")
SINGLE_COMPARE(ldrsh(x0, x1), 0x79800020); SINGLE_COMPARE(ldrsh(x0, x1), 0x79800020);
SINGLE_COMPARE(ldrsh(w0, x1), 0x79C00020); SINGLE_COMPARE(ldrsh(w0, x1), 0x79C00020);
SINGLE_COMPARE(ldrsw(x0, x1), 0xB9800020); SINGLE_COMPARE(ldrsw(x0, x1), 0xB9800020);
// paired loads
SINGLE_COMPARE(ldp(x0, x1, mem(x2, 8)), 0xA9408440);
SINGLE_COMPARE(ldp(w0, w1, mem(x2, -8)), 0x297F0440);
} }
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Stores") TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Stores")
@ -135,15 +139,58 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Stores")
SINGLE_COMPARE(str(w0, x1), 0xB9000020); SINGLE_COMPARE(str(w0, x1), 0xB9000020);
SINGLE_COMPARE(strb(w0, x1), 0x39000020); SINGLE_COMPARE(strb(w0, x1), 0x39000020);
SINGLE_COMPARE(strh(w0, x1), 0x79000020); SINGLE_COMPARE(strh(w0, x1), 0x79000020);
// paired stores
SINGLE_COMPARE(stp(x0, x1, mem(x2, 8)), 0xA9008440);
SINGLE_COMPARE(stp(w0, w1, mem(x2, -8)), 0x293F0440);
} }
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Moves") TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Moves")
{ {
SINGLE_COMPARE(mov(x0, x1), 0xAA0103E0); SINGLE_COMPARE(mov(x0, x1), 0xAA0103E0);
SINGLE_COMPARE(mov(w0, w1), 0x2A0103E0); SINGLE_COMPARE(mov(w0, w1), 0x2A0103E0);
SINGLE_COMPARE(mov(x0, 42), 0xD2800540);
SINGLE_COMPARE(mov(w0, 42), 0x52800540); SINGLE_COMPARE(movz(x0, 42), 0xD2800540);
SINGLE_COMPARE(movz(w0, 42), 0x52800540);
SINGLE_COMPARE(movn(x0, 42), 0x92800540);
SINGLE_COMPARE(movn(w0, 42), 0x12800540);
SINGLE_COMPARE(movk(x0, 42, 16), 0xF2A00540); SINGLE_COMPARE(movk(x0, 42, 16), 0xF2A00540);
CHECK(check(
[](AssemblyBuilderA64& build) {
build.mov(x0, 42);
},
{0xD2800540}));
CHECK(check(
[](AssemblyBuilderA64& build) {
build.mov(x0, 424242);
},
{0xD28F2640, 0xF2A000C0}));
CHECK(check(
[](AssemblyBuilderA64& build) {
build.mov(x0, -42);
},
{0x92800520}));
CHECK(check(
[](AssemblyBuilderA64& build) {
build.mov(x0, -424242);
},
{0x928F2620, 0xF2BFFF20}));
CHECK(check(
[](AssemblyBuilderA64& build) {
build.mov(x0, -65536);
},
{0x929FFFE0}));
CHECK(check(
[](AssemblyBuilderA64& build) {
build.mov(x0, -65537);
},
{0x92800000, 0xF2BFFFC0}));
} }
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "ControlFlow") TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "ControlFlow")
@ -222,6 +269,22 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Constants")
// clang-format on // clang-format on
} }
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "AddressOfLabel")
{
// clang-format off
CHECK(check(
[](AssemblyBuilderA64& build) {
Label label;
build.adr(x0, label);
build.add(x0, x0, x0);
build.setLabel(label);
},
{
0x10000040, 0x8b000000,
}));
// clang-format on
}
TEST_CASE("LogTest") TEST_CASE("LogTest")
{ {
AssemblyBuilderA64 build(/* logText= */ true); AssemblyBuilderA64 build(/* logText= */ true);
@ -243,6 +306,9 @@ TEST_CASE("LogTest")
build.b(ConditionA64::Plus, l); build.b(ConditionA64::Plus, l);
build.cbz(x7, l); build.cbz(x7, l);
build.ldp(x0, x1, mem(x8, 8));
build.adr(x0, l);
build.setLabel(l); build.setLabel(l);
build.ret(); build.ret();
@ -263,6 +329,8 @@ TEST_CASE("LogTest")
blr x0 blr x0
b.pl .L1 b.pl .L1
cbz x7,.L1 cbz x7,.L1
ldp x0,x1,[x8,#8]
adr x0,.L1
.L1: .L1:
ret ret
)"; )";

View file

@ -465,6 +465,7 @@ TEST_CASE("GeneratedCodeExecutionA64")
build.add(x1, x1, 2); build.add(x1, x1, 2);
build.add(x0, x0, x1, /* LSL */ 1); build.add(x0, x0, x1, /* LSL */ 1);
build.ret(); build.ret();
build.finalize(); build.finalize();

View file

@ -1250,7 +1250,9 @@ TEST_CASE("Interrupt")
13, 13,
13, 13,
16, 16,
20, 23,
21,
25,
}; };
static int index; static int index;

View file

@ -764,12 +764,14 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "ConcatInvalidation")
build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnumber)); build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnumber));
build.inst(IrCmd::STORE_INT, build.vmReg(1), build.constInt(10)); build.inst(IrCmd::STORE_INT, build.vmReg(1), build.constInt(10));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(2), build.constDouble(0.5)); build.inst(IrCmd::STORE_DOUBLE, build.vmReg(2), build.constDouble(0.5));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(3), build.constDouble(2.0));
build.inst(IrCmd::CONCAT, build.vmReg(0), build.vmReg(3)); // Concat invalidates more than the target register build.inst(IrCmd::CONCAT, build.vmReg(0), build.constUint(3));
build.inst(IrCmd::STORE_TAG, build.vmReg(3), build.inst(IrCmd::LOAD_TAG, build.vmReg(0))); build.inst(IrCmd::STORE_TAG, build.vmReg(4), build.inst(IrCmd::LOAD_TAG, build.vmReg(0)));
build.inst(IrCmd::STORE_INT, build.vmReg(4), build.inst(IrCmd::LOAD_INT, build.vmReg(1))); build.inst(IrCmd::STORE_INT, build.vmReg(5), build.inst(IrCmd::LOAD_INT, build.vmReg(1)));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(5), build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(2))); build.inst(IrCmd::STORE_DOUBLE, build.vmReg(6), build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(2)));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(7), build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(3)));
build.inst(IrCmd::LOP_RETURN, build.constUint(0)); build.inst(IrCmd::LOP_RETURN, build.constUint(0));
@ -781,13 +783,15 @@ bb_0:
STORE_TAG R0, tnumber STORE_TAG R0, tnumber
STORE_INT R1, 10i STORE_INT R1, 10i
STORE_DOUBLE R2, 0.5 STORE_DOUBLE R2, 0.5
CONCAT R0, R3 STORE_DOUBLE R3, 2
%4 = LOAD_TAG R0 CONCAT R0, 3u
STORE_TAG R3, %4 %5 = LOAD_TAG R0
%6 = LOAD_INT R1 STORE_TAG R4, %5
STORE_INT R4, %6 %7 = LOAD_INT R1
%8 = LOAD_DOUBLE R2 STORE_INT R5, %7
STORE_DOUBLE R5, %8 %9 = LOAD_DOUBLE R2
STORE_DOUBLE R6, %9
STORE_DOUBLE R7, 2
LOP_RETURN 0u LOP_RETURN 0u
)"); )");
@ -1179,7 +1183,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "EntryBlockUseRemoval")
build.inst(IrCmd::JUMP_IF_TRUTHY, build.vmReg(0), exit, repeat); build.inst(IrCmd::JUMP_IF_TRUTHY, build.vmReg(0), exit, repeat);
build.beginBlock(exit); build.beginBlock(exit);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(0)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(0));
build.beginBlock(repeat); build.beginBlock(repeat);
build.inst(IrCmd::INTERRUPT, build.constUint(0)); build.inst(IrCmd::INTERRUPT, build.constUint(0));
@ -1194,7 +1198,7 @@ bb_0:
JUMP bb_1 JUMP bb_1
bb_1: bb_1:
LOP_RETURN 0u, R0, 0i LOP_RETURN R0, 0i
)"); )");
} }
@ -1207,14 +1211,14 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "RecursiveSccUseRemoval1")
IrOp repeat = build.block(IrBlockKind::Internal); IrOp repeat = build.block(IrBlockKind::Internal);
build.beginBlock(entry); build.beginBlock(entry);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(0)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(0));
build.beginBlock(block); build.beginBlock(block);
build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnumber)); build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnumber));
build.inst(IrCmd::JUMP_IF_TRUTHY, build.vmReg(0), exit, repeat); build.inst(IrCmd::JUMP_IF_TRUTHY, build.vmReg(0), exit, repeat);
build.beginBlock(exit); build.beginBlock(exit);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(0)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(0));
build.beginBlock(repeat); build.beginBlock(repeat);
build.inst(IrCmd::INTERRUPT, build.constUint(0)); build.inst(IrCmd::INTERRUPT, build.constUint(0));
@ -1225,14 +1229,14 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "RecursiveSccUseRemoval1")
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"( CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
bb_0: bb_0:
LOP_RETURN 0u, R0, 0i LOP_RETURN R0, 0i
bb_1: bb_1:
STORE_TAG R0, tnumber STORE_TAG R0, tnumber
JUMP bb_2 JUMP bb_2
bb_2: bb_2:
LOP_RETURN 0u, R0, 0i LOP_RETURN R0, 0i
)"); )");
} }
@ -1249,14 +1253,14 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "RecursiveSccUseRemoval2")
build.inst(IrCmd::JUMP_EQ_INT, build.constInt(0), build.constInt(1), block, exit1); build.inst(IrCmd::JUMP_EQ_INT, build.constInt(0), build.constInt(1), block, exit1);
build.beginBlock(exit1); build.beginBlock(exit1);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(0)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(0));
build.beginBlock(block); build.beginBlock(block);
build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnumber)); build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnumber));
build.inst(IrCmd::JUMP_IF_TRUTHY, build.vmReg(0), exit2, repeat); build.inst(IrCmd::JUMP_IF_TRUTHY, build.vmReg(0), exit2, repeat);
build.beginBlock(exit2); build.beginBlock(exit2);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(0)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(0));
build.beginBlock(repeat); build.beginBlock(repeat);
build.inst(IrCmd::INTERRUPT, build.constUint(0)); build.inst(IrCmd::INTERRUPT, build.constUint(0));
@ -1270,14 +1274,14 @@ bb_0:
JUMP bb_1 JUMP bb_1
bb_1: bb_1:
LOP_RETURN 0u, R0, 0i LOP_RETURN R0, 0i
bb_2: bb_2:
STORE_TAG R0, tnumber STORE_TAG R0, tnumber
JUMP bb_3 JUMP bb_3
bb_3: bb_3:
LOP_RETURN 0u, R0, 0i LOP_RETURN R0, 0i
)"); )");
} }
@ -1318,7 +1322,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "SimplePathExtraction")
build.inst(IrCmd::JUMP, block4); build.inst(IrCmd::JUMP, block4);
build.beginBlock(block4); build.beginBlock(block4);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(0)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(0));
updateUseCounts(build.function); updateUseCounts(build.function);
constPropInBlockChains(build); constPropInBlockChains(build);
@ -1346,10 +1350,10 @@ bb_4:
JUMP bb_5 JUMP bb_5
bb_5: bb_5:
LOP_RETURN 0u, R0, 0i LOP_RETURN R0, 0i
bb_linear_6: bb_linear_6:
LOP_RETURN 0u, R0, 0i LOP_RETURN R0, 0i
)"); )");
} }
@ -1389,11 +1393,11 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NoPathExtractionForBlocksWithLiveOutValues"
build.beginBlock(block4a); build.beginBlock(block4a);
build.inst(IrCmd::STORE_TAG, build.vmReg(0), tag3a); build.inst(IrCmd::STORE_TAG, build.vmReg(0), tag3a);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(0)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(0));
build.beginBlock(block4b); build.beginBlock(block4b);
build.inst(IrCmd::STORE_TAG, build.vmReg(0), tag3a); build.inst(IrCmd::STORE_TAG, build.vmReg(0), tag3a);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(0)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(0));
updateUseCounts(build.function); updateUseCounts(build.function);
constPropInBlockChains(build); constPropInBlockChains(build);
@ -1423,11 +1427,11 @@ bb_4:
bb_5: bb_5:
STORE_TAG R0, %10 STORE_TAG R0, %10
LOP_RETURN 0u, R0, 0i LOP_RETURN R0, 0i
bb_6: bb_6:
STORE_TAG R0, %10 STORE_TAG R0, %10
LOP_RETURN 0u, R0, 0i LOP_RETURN R0, 0i
)"); )");
} }
@ -1484,7 +1488,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "SimpleDiamond")
build.inst(IrCmd::JUMP, exit); build.inst(IrCmd::JUMP, exit);
build.beginBlock(exit); build.beginBlock(exit);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(2), build.constInt(2)); build.inst(IrCmd::LOP_RETURN, build.vmReg(2), build.constInt(2));
updateUseCounts(build.function); updateUseCounts(build.function);
computeCfgInfo(build.function); computeCfgInfo(build.function);
@ -1518,7 +1522,7 @@ bb_2:
bb_3: bb_3:
; predecessors: bb_1, bb_2 ; predecessors: bb_1, bb_2
; in regs: R2, R3 ; in regs: R2, R3
LOP_RETURN 0u, R2, 2i LOP_RETURN R2, 2i
)"); )");
} }
@ -1530,11 +1534,11 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "ImplicitFixedRegistersInVarargCall")
build.beginBlock(entry); build.beginBlock(entry);
build.inst(IrCmd::FALLBACK_GETVARARGS, build.constUint(0), build.vmReg(3), build.constInt(-1)); build.inst(IrCmd::FALLBACK_GETVARARGS, build.constUint(0), build.vmReg(3), build.constInt(-1));
build.inst(IrCmd::LOP_CALL, build.constUint(0), build.vmReg(0), build.constInt(-1), build.constInt(5)); build.inst(IrCmd::LOP_CALL, build.vmReg(0), build.constInt(-1), build.constInt(5));
build.inst(IrCmd::JUMP, exit); build.inst(IrCmd::JUMP, exit);
build.beginBlock(exit); build.beginBlock(exit);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(5)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(5));
updateUseCounts(build.function); updateUseCounts(build.function);
computeCfgInfo(build.function); computeCfgInfo(build.function);
@ -1545,13 +1549,13 @@ bb_0:
; in regs: R0, R1, R2 ; in regs: R0, R1, R2
; out regs: R0, R1, R2, R3, R4 ; out regs: R0, R1, R2, R3, R4
FALLBACK_GETVARARGS 0u, R3, -1i FALLBACK_GETVARARGS 0u, R3, -1i
LOP_CALL 0u, R0, -1i, 5i LOP_CALL R0, -1i, 5i
JUMP bb_1 JUMP bb_1
bb_1: bb_1:
; predecessors: bb_0 ; predecessors: bb_0
; in regs: R0, R1, R2, R3, R4 ; in regs: R0, R1, R2, R3, R4
LOP_RETURN 0u, R0, 5i LOP_RETURN R0, 5i
)"); )");
} }
@ -1563,11 +1567,13 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "ExplicitUseOfRegisterInVarargSequence")
build.beginBlock(entry); build.beginBlock(entry);
build.inst(IrCmd::FALLBACK_GETVARARGS, build.constUint(0), build.vmReg(1), build.constInt(-1)); build.inst(IrCmd::FALLBACK_GETVARARGS, build.constUint(0), build.vmReg(1), build.constInt(-1));
build.inst(IrCmd::INVOKE_FASTCALL, build.constUint(0), build.vmReg(0), build.vmReg(1), build.vmReg(2), build.constInt(-1), build.constInt(-1)); IrOp results = build.inst(
IrCmd::INVOKE_FASTCALL, build.constUint(0), build.vmReg(0), build.vmReg(1), build.vmReg(2), build.constInt(-1), build.constInt(-1));
build.inst(IrCmd::ADJUST_STACK_TO_REG, build.vmReg(0), results);
build.inst(IrCmd::JUMP, exit); build.inst(IrCmd::JUMP, exit);
build.beginBlock(exit); build.beginBlock(exit);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(-1)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(-1));
updateUseCounts(build.function); updateUseCounts(build.function);
computeCfgInfo(build.function); computeCfgInfo(build.function);
@ -1578,12 +1584,13 @@ bb_0:
; out regs: R0... ; out regs: R0...
FALLBACK_GETVARARGS 0u, R1, -1i FALLBACK_GETVARARGS 0u, R1, -1i
%1 = INVOKE_FASTCALL 0u, R0, R1, R2, -1i, -1i %1 = INVOKE_FASTCALL 0u, R0, R1, R2, -1i, -1i
ADJUST_STACK_TO_REG R0, %1
JUMP bb_1 JUMP bb_1
bb_1: bb_1:
; predecessors: bb_0 ; predecessors: bb_0
; in regs: R0... ; in regs: R0...
LOP_RETURN 0u, R0, -1i LOP_RETURN R0, -1i
)"); )");
} }
@ -1594,12 +1601,12 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "VariadicSequenceRestart")
IrOp exit = build.block(IrBlockKind::Internal); IrOp exit = build.block(IrBlockKind::Internal);
build.beginBlock(entry); build.beginBlock(entry);
build.inst(IrCmd::LOP_CALL, build.constUint(0), build.vmReg(1), build.constInt(0), build.constInt(-1)); build.inst(IrCmd::LOP_CALL, build.vmReg(1), build.constInt(0), build.constInt(-1));
build.inst(IrCmd::LOP_CALL, build.constUint(0), build.vmReg(0), build.constInt(-1), build.constInt(-1)); build.inst(IrCmd::LOP_CALL, build.vmReg(0), build.constInt(-1), build.constInt(-1));
build.inst(IrCmd::JUMP, exit); build.inst(IrCmd::JUMP, exit);
build.beginBlock(exit); build.beginBlock(exit);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(-1)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(-1));
updateUseCounts(build.function); updateUseCounts(build.function);
computeCfgInfo(build.function); computeCfgInfo(build.function);
@ -1609,14 +1616,14 @@ bb_0:
; successors: bb_1 ; successors: bb_1
; in regs: R0, R1 ; in regs: R0, R1
; out regs: R0... ; out regs: R0...
LOP_CALL 0u, R1, 0i, -1i LOP_CALL R1, 0i, -1i
LOP_CALL 0u, R0, -1i, -1i LOP_CALL R0, -1i, -1i
JUMP bb_1 JUMP bb_1
bb_1: bb_1:
; predecessors: bb_0 ; predecessors: bb_0
; in regs: R0... ; in regs: R0...
LOP_RETURN 0u, R0, -1i LOP_RETURN R0, -1i
)"); )");
} }
@ -1630,15 +1637,15 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "FallbackDoesNotFlowUp")
build.beginBlock(entry); build.beginBlock(entry);
build.inst(IrCmd::FALLBACK_GETVARARGS, build.constUint(0), build.vmReg(1), build.constInt(-1)); build.inst(IrCmd::FALLBACK_GETVARARGS, build.constUint(0), build.vmReg(1), build.constInt(-1));
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(0)), build.constTag(tnumber), fallback); build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(0)), build.constTag(tnumber), fallback);
build.inst(IrCmd::LOP_CALL, build.constUint(0), build.vmReg(0), build.constInt(-1), build.constInt(-1)); build.inst(IrCmd::LOP_CALL, build.vmReg(0), build.constInt(-1), build.constInt(-1));
build.inst(IrCmd::JUMP, exit); build.inst(IrCmd::JUMP, exit);
build.beginBlock(fallback); build.beginBlock(fallback);
build.inst(IrCmd::LOP_CALL, build.constUint(0), build.vmReg(0), build.constInt(-1), build.constInt(-1)); build.inst(IrCmd::LOP_CALL, build.vmReg(0), build.constInt(-1), build.constInt(-1));
build.inst(IrCmd::JUMP, exit); build.inst(IrCmd::JUMP, exit);
build.beginBlock(exit); build.beginBlock(exit);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(0), build.constInt(-1)); build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(-1));
updateUseCounts(build.function); updateUseCounts(build.function);
computeCfgInfo(build.function); computeCfgInfo(build.function);
@ -1651,7 +1658,7 @@ bb_0:
FALLBACK_GETVARARGS 0u, R1, -1i FALLBACK_GETVARARGS 0u, R1, -1i
%1 = LOAD_TAG R0 %1 = LOAD_TAG R0
CHECK_TAG %1, tnumber, bb_fallback_1 CHECK_TAG %1, tnumber, bb_fallback_1
LOP_CALL 0u, R0, -1i, -1i LOP_CALL R0, -1i, -1i
JUMP bb_2 JUMP bb_2
bb_fallback_1: bb_fallback_1:
@ -1659,13 +1666,13 @@ bb_fallback_1:
; successors: bb_2 ; successors: bb_2
; in regs: R0, R1... ; in regs: R0, R1...
; out regs: R0... ; out regs: R0...
LOP_CALL 0u, R0, -1i, -1i LOP_CALL R0, -1i, -1i
JUMP bb_2 JUMP bb_2
bb_2: bb_2:
; predecessors: bb_0, bb_fallback_1 ; predecessors: bb_0, bb_fallback_1
; in regs: R0... ; in regs: R0...
LOP_RETURN 0u, R0, -1i LOP_RETURN R0, -1i
)"); )");
} }
@ -1690,7 +1697,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "VariadicSequencePeeling")
build.inst(IrCmd::JUMP, exit); build.inst(IrCmd::JUMP, exit);
build.beginBlock(exit); build.beginBlock(exit);
build.inst(IrCmd::LOP_RETURN, build.constUint(0), build.vmReg(2), build.constInt(-1)); build.inst(IrCmd::LOP_RETURN, build.vmReg(2), build.constInt(-1));
updateUseCounts(build.function); updateUseCounts(build.function);
computeCfgInfo(build.function); computeCfgInfo(build.function);
@ -1725,7 +1732,65 @@ bb_2:
bb_3: bb_3:
; predecessors: bb_1, bb_2 ; predecessors: bb_1, bb_2
; in regs: R2... ; in regs: R2...
LOP_RETURN 0u, R2, -1i LOP_RETURN R2, -1i
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "BuiltinVariadicStart")
{
IrOp entry = build.block(IrBlockKind::Internal);
IrOp exit = build.block(IrBlockKind::Internal);
build.beginBlock(entry);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(1), build.constDouble(1.0));
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(2), build.constDouble(2.0));
build.inst(IrCmd::ADJUST_STACK_TO_REG, build.vmReg(2), build.constInt(1));
build.inst(IrCmd::LOP_CALL, build.vmReg(1), build.constInt(-1), build.constInt(1));
build.inst(IrCmd::JUMP, exit);
build.beginBlock(exit);
build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(2));
updateUseCounts(build.function);
computeCfgInfo(build.function);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
bb_0:
; successors: bb_1
; in regs: R0
; out regs: R0, R1
STORE_DOUBLE R1, 1
STORE_DOUBLE R2, 2
ADJUST_STACK_TO_REG R2, 1i
LOP_CALL R1, -1i, 1i
JUMP bb_1
bb_1:
; predecessors: bb_0
; in regs: R0, R1
LOP_RETURN R0, 2i
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "SetTable")
{
IrOp entry = build.block(IrBlockKind::Internal);
build.beginBlock(entry);
build.inst(IrCmd::SET_TABLE, build.vmReg(0), build.vmReg(1), build.constUint(1));
build.inst(IrCmd::LOP_RETURN, build.vmReg(0), build.constInt(1));
updateUseCounts(build.function);
computeCfgInfo(build.function);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
bb_0:
; in regs: R0, R1
SET_TABLE R0, R1, 1u
LOP_RETURN R0, 1i
)"); )");
} }

View file

@ -338,7 +338,7 @@ type B = A
TEST_CASE_FIXTURE(BuiltinsFixture, "do_not_clone_reexports") TEST_CASE_FIXTURE(BuiltinsFixture, "do_not_clone_reexports")
{ {
ScopedFastFlag flags[] = { ScopedFastFlag flags[] = {
{"LuauClonePublicInterfaceLess", true}, {"LuauClonePublicInterfaceLess2", true},
{"LuauSubstitutionReentrant", true}, {"LuauSubstitutionReentrant", true},
{"LuauClassTypeVarsInSubstitution", true}, {"LuauClassTypeVarsInSubstitution", true},
{"LuauSubstitutionFixMissingFields", true}, {"LuauSubstitutionFixMissingFields", true},
@ -376,7 +376,7 @@ return {}
TEST_CASE_FIXTURE(BuiltinsFixture, "do_not_clone_types_of_reexported_values") TEST_CASE_FIXTURE(BuiltinsFixture, "do_not_clone_types_of_reexported_values")
{ {
ScopedFastFlag flags[] = { ScopedFastFlag flags[] = {
{"LuauClonePublicInterfaceLess", true}, {"LuauClonePublicInterfaceLess2", true},
{"LuauSubstitutionReentrant", true}, {"LuauSubstitutionReentrant", true},
{"LuauClassTypeVarsInSubstitution", true}, {"LuauClassTypeVarsInSubstitution", true},
{"LuauSubstitutionFixMissingFields", true}, {"LuauSubstitutionFixMissingFields", true},

View file

@ -802,7 +802,7 @@ TEST_CASE_FIXTURE(NormalizeFixture, "negations_of_tables")
TEST_CASE_FIXTURE(NormalizeFixture, "normalize_blocked_types") TEST_CASE_FIXTURE(NormalizeFixture, "normalize_blocked_types")
{ {
ScopedFastFlag sff[] { ScopedFastFlag sff[]{
{"LuauNormalizeBlockedTypes", true}, {"LuauNormalizeBlockedTypes", true},
}; };
@ -813,4 +813,14 @@ TEST_CASE_FIXTURE(NormalizeFixture, "normalize_blocked_types")
CHECK_EQ(normalizer.typeFromNormal(*norm), &blocked); CHECK_EQ(normalizer.typeFromNormal(*norm), &blocked);
} }
TEST_CASE_FIXTURE(NormalizeFixture, "normalize_pending_expansion_types")
{
AstName name;
Type pending{PendingExpansionType{std::nullopt, name, {}, {}}};
const NormalizedType* norm = normalizer.normalize(&pending);
CHECK_EQ(normalizer.typeFromNormal(*norm), &pending);
}
TEST_SUITE_END(); TEST_SUITE_END();

View file

@ -947,4 +947,71 @@ TEST_CASE_FIXTURE(Fixture, "type_alias_locations")
CHECK(mod->scopes[3].second->typeAliasNameLocations["X"] == Location(Position(5, 17), 1)); CHECK(mod->scopes[3].second->typeAliasNameLocations["X"] == Location(Position(5, 17), 1));
} }
/*
* We had a bug in DCR where substitution would improperly clone a
* PendingExpansionType.
*
* This cloned type did not have a matching constraint to expand it, so it was
* left dangling and unexpanded forever.
*
* We must also delay the dispatch a constraint if doing so would require
* unifying a PendingExpansionType.
*/
TEST_CASE_FIXTURE(BuiltinsFixture, "dont_lose_track_of_PendingExpansionTypes_after_substitution")
{
fileResolver.source["game/ReactCurrentDispatcher"] = R"(
export type BasicStateAction<S> = ((S) -> S) | S
export type Dispatch<A> = (A) -> ()
export type Dispatcher = {
useState: <S>(initialState: (() -> S) | S) -> (S, Dispatch<BasicStateAction<S>>),
}
return {}
)";
// Note: This script path is actually as short as it can be. Any shorter
// and we somehow fail to surface the bug.
fileResolver.source["game/React/React/ReactHooks"] = R"(
local RCD = require(script.Parent.Parent.Parent.ReactCurrentDispatcher)
local function resolveDispatcher(): RCD.Dispatcher
return (nil :: any) :: RCD.Dispatcher
end
function useState<S>(
initialState: (() -> S) | S
): (S, RCD.Dispatch<RCD.BasicStateAction<S>>)
local dispatcher = resolveDispatcher()
return dispatcher.useState(initialState)
end
)";
CheckResult result = frontend.check("game/React/React/ReactHooks");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "another_thing_from_roact")
{
CheckResult result = check(R"(
type Map<K, V> = { [K]: V }
type Set<T> = { [T]: boolean }
type FiberRoot = {
pingCache: Map<Wakeable, (Set<any> | Map<Wakeable, Set<any>>)> | nil,
}
type Wakeable = {
andThen: (self: Wakeable) -> nil | Wakeable,
}
local function attachPingListener(root: FiberRoot, wakeable: Wakeable, lanes: number)
local pingCache: Map<Wakeable, (Set<any> | Map<Wakeable, Set<any>>)> | nil = root.pingCache
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_SUITE_END(); TEST_SUITE_END();

View file

@ -9,7 +9,6 @@
using namespace Luau; using namespace Luau;
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution); LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAG(LuauMatchReturnsOptionalString);
TEST_SUITE_BEGIN("BuiltinTests"); TEST_SUITE_BEGIN("BuiltinTests");
@ -1064,10 +1063,7 @@ TEST_CASE_FIXTURE(Fixture, "string_match")
)"); )");
LUAU_REQUIRE_NO_ERRORS(result); LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::LuauMatchReturnsOptionalString)
CHECK_EQ(toString(requireType("p")), "string?"); CHECK_EQ(toString(requireType("p")), "string?");
else
CHECK_EQ(toString(requireType("p")), "string");
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types") TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types")
@ -1078,18 +1074,9 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types")
LUAU_REQUIRE_NO_ERRORS(result); LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::LuauMatchReturnsOptionalString)
{
CHECK_EQ(toString(requireType("a")), "string?"); CHECK_EQ(toString(requireType("a")), "string?");
CHECK_EQ(toString(requireType("b")), "number?"); CHECK_EQ(toString(requireType("b")), "number?");
CHECK_EQ(toString(requireType("c")), "string?"); CHECK_EQ(toString(requireType("c")), "string?");
}
else
{
CHECK_EQ(toString(requireType("a")), "string");
CHECK_EQ(toString(requireType("b")), "number");
CHECK_EQ(toString(requireType("c")), "string");
}
} }
TEST_CASE_FIXTURE(Fixture, "gmatch_capture_types2") TEST_CASE_FIXTURE(Fixture, "gmatch_capture_types2")
@ -1100,18 +1087,9 @@ TEST_CASE_FIXTURE(Fixture, "gmatch_capture_types2")
LUAU_REQUIRE_NO_ERRORS(result); LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::LuauMatchReturnsOptionalString)
{
CHECK_EQ(toString(requireType("a")), "string?"); CHECK_EQ(toString(requireType("a")), "string?");
CHECK_EQ(toString(requireType("b")), "number?"); CHECK_EQ(toString(requireType("b")), "number?");
CHECK_EQ(toString(requireType("c")), "string?"); CHECK_EQ(toString(requireType("c")), "string?");
}
else
{
CHECK_EQ(toString(requireType("a")), "string");
CHECK_EQ(toString(requireType("b")), "number");
CHECK_EQ(toString(requireType("c")), "string");
}
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_default_capture") TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_default_capture")
@ -1128,10 +1106,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_default_capture")
CHECK_EQ(acm->expected, 1); CHECK_EQ(acm->expected, 1);
CHECK_EQ(acm->actual, 4); CHECK_EQ(acm->actual, 4);
if (FFlag::LuauMatchReturnsOptionalString)
CHECK_EQ(toString(requireType("a")), "string?"); CHECK_EQ(toString(requireType("a")), "string?");
else
CHECK_EQ(toString(requireType("a")), "string");
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_balanced_escaped_parens") TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_balanced_escaped_parens")
@ -1148,18 +1123,9 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_balanced_escaped_parens
CHECK_EQ(acm->expected, 3); CHECK_EQ(acm->expected, 3);
CHECK_EQ(acm->actual, 4); CHECK_EQ(acm->actual, 4);
if (FFlag::LuauMatchReturnsOptionalString)
{
CHECK_EQ(toString(requireType("a")), "string?"); CHECK_EQ(toString(requireType("a")), "string?");
CHECK_EQ(toString(requireType("b")), "string?"); CHECK_EQ(toString(requireType("b")), "string?");
CHECK_EQ(toString(requireType("c")), "number?"); CHECK_EQ(toString(requireType("c")), "number?");
}
else
{
CHECK_EQ(toString(requireType("a")), "string");
CHECK_EQ(toString(requireType("b")), "string");
CHECK_EQ(toString(requireType("c")), "number");
}
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_parens_in_sets_are_ignored") TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_parens_in_sets_are_ignored")
@ -1176,16 +1142,8 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_parens_in_sets_are_igno
CHECK_EQ(acm->expected, 2); CHECK_EQ(acm->expected, 2);
CHECK_EQ(acm->actual, 3); CHECK_EQ(acm->actual, 3);
if (FFlag::LuauMatchReturnsOptionalString)
{
CHECK_EQ(toString(requireType("a")), "string?"); CHECK_EQ(toString(requireType("a")), "string?");
CHECK_EQ(toString(requireType("b")), "number?"); CHECK_EQ(toString(requireType("b")), "number?");
}
else
{
CHECK_EQ(toString(requireType("a")), "string");
CHECK_EQ(toString(requireType("b")), "number");
}
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_set_containing_lbracket") TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_set_containing_lbracket")
@ -1196,16 +1154,8 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_set_containing_lbracket
LUAU_REQUIRE_NO_ERRORS(result); LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::LuauMatchReturnsOptionalString)
{
CHECK_EQ(toString(requireType("a")), "number?"); CHECK_EQ(toString(requireType("a")), "number?");
CHECK_EQ(toString(requireType("b")), "string?"); CHECK_EQ(toString(requireType("b")), "string?");
}
else
{
CHECK_EQ(toString(requireType("a")), "number");
CHECK_EQ(toString(requireType("b")), "string");
}
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_leading_end_bracket_is_part_of_set") TEST_CASE_FIXTURE(BuiltinsFixture, "gmatch_capture_types_leading_end_bracket_is_part_of_set")
@ -1253,18 +1203,9 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "match_capture_types")
LUAU_REQUIRE_NO_ERRORS(result); LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::LuauMatchReturnsOptionalString)
{
CHECK_EQ(toString(requireType("a")), "string?"); CHECK_EQ(toString(requireType("a")), "string?");
CHECK_EQ(toString(requireType("b")), "number?"); CHECK_EQ(toString(requireType("b")), "number?");
CHECK_EQ(toString(requireType("c")), "string?"); CHECK_EQ(toString(requireType("c")), "string?");
}
else
{
CHECK_EQ(toString(requireType("a")), "string");
CHECK_EQ(toString(requireType("b")), "number");
CHECK_EQ(toString(requireType("c")), "string");
}
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "match_capture_types2") TEST_CASE_FIXTURE(BuiltinsFixture, "match_capture_types2")
@ -1280,18 +1221,9 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "match_capture_types2")
CHECK_EQ(toString(tm->wantedType), "number?"); CHECK_EQ(toString(tm->wantedType), "number?");
CHECK_EQ(toString(tm->givenType), "string"); CHECK_EQ(toString(tm->givenType), "string");
if (FFlag::LuauMatchReturnsOptionalString)
{
CHECK_EQ(toString(requireType("a")), "string?"); CHECK_EQ(toString(requireType("a")), "string?");
CHECK_EQ(toString(requireType("b")), "number?"); CHECK_EQ(toString(requireType("b")), "number?");
CHECK_EQ(toString(requireType("c")), "string?"); CHECK_EQ(toString(requireType("c")), "string?");
}
else
{
CHECK_EQ(toString(requireType("a")), "string");
CHECK_EQ(toString(requireType("b")), "number");
CHECK_EQ(toString(requireType("c")), "string");
}
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "find_capture_types") TEST_CASE_FIXTURE(BuiltinsFixture, "find_capture_types")
@ -1302,18 +1234,9 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "find_capture_types")
LUAU_REQUIRE_NO_ERRORS(result); LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::LuauMatchReturnsOptionalString)
{
CHECK_EQ(toString(requireType("a")), "string?"); CHECK_EQ(toString(requireType("a")), "string?");
CHECK_EQ(toString(requireType("b")), "number?"); CHECK_EQ(toString(requireType("b")), "number?");
CHECK_EQ(toString(requireType("c")), "string?"); CHECK_EQ(toString(requireType("c")), "string?");
}
else
{
CHECK_EQ(toString(requireType("a")), "string");
CHECK_EQ(toString(requireType("b")), "number");
CHECK_EQ(toString(requireType("c")), "string");
}
CHECK_EQ(toString(requireType("d")), "number?"); CHECK_EQ(toString(requireType("d")), "number?");
CHECK_EQ(toString(requireType("e")), "number?"); CHECK_EQ(toString(requireType("e")), "number?");
} }
@ -1331,18 +1254,9 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "find_capture_types2")
CHECK_EQ(toString(tm->wantedType), "number?"); CHECK_EQ(toString(tm->wantedType), "number?");
CHECK_EQ(toString(tm->givenType), "string"); CHECK_EQ(toString(tm->givenType), "string");
if (FFlag::LuauMatchReturnsOptionalString)
{
CHECK_EQ(toString(requireType("a")), "string?"); CHECK_EQ(toString(requireType("a")), "string?");
CHECK_EQ(toString(requireType("b")), "number?"); CHECK_EQ(toString(requireType("b")), "number?");
CHECK_EQ(toString(requireType("c")), "string?"); CHECK_EQ(toString(requireType("c")), "string?");
}
else
{
CHECK_EQ(toString(requireType("a")), "string");
CHECK_EQ(toString(requireType("b")), "number");
CHECK_EQ(toString(requireType("c")), "string");
}
CHECK_EQ(toString(requireType("d")), "number?"); CHECK_EQ(toString(requireType("d")), "number?");
CHECK_EQ(toString(requireType("e")), "number?"); CHECK_EQ(toString(requireType("e")), "number?");
} }
@ -1360,18 +1274,9 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "find_capture_types3")
CHECK_EQ(toString(tm->wantedType), "boolean?"); CHECK_EQ(toString(tm->wantedType), "boolean?");
CHECK_EQ(toString(tm->givenType), "string"); CHECK_EQ(toString(tm->givenType), "string");
if (FFlag::LuauMatchReturnsOptionalString)
{
CHECK_EQ(toString(requireType("a")), "string?"); CHECK_EQ(toString(requireType("a")), "string?");
CHECK_EQ(toString(requireType("b")), "number?"); CHECK_EQ(toString(requireType("b")), "number?");
CHECK_EQ(toString(requireType("c")), "string?"); CHECK_EQ(toString(requireType("c")), "string?");
}
else
{
CHECK_EQ(toString(requireType("a")), "string");
CHECK_EQ(toString(requireType("b")), "number");
CHECK_EQ(toString(requireType("c")), "string");
}
CHECK_EQ(toString(requireType("d")), "number?"); CHECK_EQ(toString(requireType("d")), "number?");
CHECK_EQ(toString(requireType("e")), "number?"); CHECK_EQ(toString(requireType("e")), "number?");
} }

View file

@ -1860,7 +1860,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "dont_assert_when_the_tarjan_limit_is_exceede
ScopedFastInt sfi{"LuauTarjanChildLimit", 2}; ScopedFastInt sfi{"LuauTarjanChildLimit", 2};
ScopedFastFlag sff[] = { ScopedFastFlag sff[] = {
{"DebugLuauDeferredConstraintResolution", true}, {"DebugLuauDeferredConstraintResolution", true},
{"LuauClonePublicInterfaceLess", true}, {"LuauClonePublicInterfaceLess2", true},
{"LuauSubstitutionReentrant", true}, {"LuauSubstitutionReentrant", true},
{"LuauSubstitutionFixMissingFields", true}, {"LuauSubstitutionFixMissingFields", true},
}; };
@ -1880,4 +1880,33 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "dont_assert_when_the_tarjan_limit_is_exceede
CHECK(Location({0, 0}, {4, 4}) == result.errors[1].location); CHECK(Location({0, 0}, {4, 4}) == result.errors[1].location);
} }
/* We had a bug under DCR where instantiated type packs had a nullptr scope.
*
* This caused an issue with promotion.
*/
TEST_CASE_FIXTURE(Fixture, "instantiated_type_packs_must_have_a_non_null_scope")
{
CheckResult result = check(R"(
function pcall<A..., R...>(...: A...): R...
end
type Dispatch<A> = (A) -> ()
function mountReducer()
dispatchAction()
return nil :: any
end
function dispatchAction()
end
function useReducer(): Dispatch<any>
local result, setResult = pcall(mountReducer)
return setResult
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_SUITE_END(); TEST_SUITE_END();

View file

@ -327,7 +327,12 @@ TEST_CASE_FIXTURE(Fixture, "table_intersection_write_sealed")
)"); )");
LUAU_REQUIRE_ERROR_COUNT(1, result); LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ(toString(result.errors[0]), "Cannot add property 'z' to table 'X & Y'"); auto e = toString(result.errors[0]);
// In DCR, because of type normalization, we print a different error message
if (FFlag::DebugLuauDeferredConstraintResolution)
CHECK_EQ("Cannot add property 'z' to table '{| x: number, y: number |}'", e);
else
CHECK_EQ("Cannot add property 'z' to table 'X & Y'", e);
} }
TEST_CASE_FIXTURE(Fixture, "table_intersection_write_sealed_indirect") TEST_CASE_FIXTURE(Fixture, "table_intersection_write_sealed_indirect")

View file

@ -326,4 +326,59 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "flag_when_index_metamethod_returns_0_values"
CHECK("nil" == toString(requireType("p"))); CHECK("nil" == toString(requireType("p")));
} }
TEST_CASE_FIXTURE(BuiltinsFixture, "augmenting_an_unsealed_table_with_a_metatable")
{
CheckResult result = check(R"(
local A = {number = 8}
local B = setmetatable({}, A)
function B:method()
return "hello!!"
end
)");
CHECK("{ @metatable { number: number }, { method: <a>(a) -> string } }" == toString(requireType("B"), {true}));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "react_style_oo")
{
CheckResult result = check(R"(
local Prototype = {}
local ClassMetatable = {
__index = Prototype
}
local BaseClass = (setmetatable({}, ClassMetatable))
function BaseClass:extend(name)
local class = {
name=name
}
class.__index = class
function class.ctor(props)
return setmetatable({props=props}, class)
end
return setmetatable(class, getmetatable(self))
end
local C = BaseClass:extend('C')
local i = C.ctor({hello='world'})
local iName = i.name
local cName = C.name
local hello = i.props.hello
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("string" == toString(requireType("iName")));
CHECK("string" == toString(requireType("cName")));
CHECK("string" == toString(requireType("hello")));
}
TEST_SUITE_END(); TEST_SUITE_END();

View file

@ -526,7 +526,17 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "typecheck_unary_minus_error")
LUAU_REQUIRE_ERROR_COUNT(1, result); LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::DebugLuauDeferredConstraintResolution)
{
// Under DCR, this currently functions as a failed overload resolution, and so we can't say
// anything about the result type of the unary minus.
CHECK_EQ("any", toString(requireType("a")));
}
else
{
CHECK_EQ("string", toString(requireType("a"))); CHECK_EQ("string", toString(requireType("a")));
}
TypeMismatch* tm = get<TypeMismatch>(result.errors[0]); TypeMismatch* tm = get<TypeMismatch>(result.errors[0]);
REQUIRE_EQ(*tm->wantedType, *builtinTypes->booleanType); REQUIRE_EQ(*tm->wantedType, *builtinTypes->booleanType);

View file

@ -196,7 +196,6 @@ TEST_CASE_FIXTURE(Fixture, "index_on_a_union_type_with_missing_property")
REQUIRE(bTy); REQUIRE(bTy);
CHECK_EQ(mup->missing[0], *bTy); CHECK_EQ(mup->missing[0], *bTy);
CHECK_EQ(mup->key, "x"); CHECK_EQ(mup->key, "x");
CHECK_EQ("*error-type*", toString(requireType("r"))); CHECK_EQ("*error-type*", toString(requireType("r")));
} }
@ -354,7 +353,11 @@ a.x = 2
)"); )");
LUAU_REQUIRE_ERROR_COUNT(1, result); LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_EQ("Value of type '({| x: number |} & {| y: number |})?' could be nil", toString(result.errors[0])); auto s = toString(result.errors[0]);
if (FFlag::DebugLuauDeferredConstraintResolution)
CHECK_EQ("Value of type '{| x: number, y: number |}?' could be nil", s);
else
CHECK_EQ("Value of type '({| x: number |} & {| y: number |})?' could be nil", s);
} }
TEST_CASE_FIXTURE(Fixture, "optional_length_error") TEST_CASE_FIXTURE(Fixture, "optional_length_error")

View file

@ -17,4 +17,9 @@ end
bar() bar()
function baz()
end
baz()
return "OK" return "OK"

View file

@ -345,5 +345,7 @@ assert(math.round("1.8") == 2)
assert(select('#', math.floor(1.4)) == 1) assert(select('#', math.floor(1.4)) == 1)
assert(select('#', math.ceil(1.6)) == 1) assert(select('#', math.ceil(1.6)) == 1)
assert(select('#', math.sqrt(9)) == 1) assert(select('#', math.sqrt(9)) == 1)
assert(select('#', math.deg(9)) == 1)
assert(select('#', math.rad(9)) == 1)
return('OK') return('OK')

View file

@ -59,6 +59,25 @@ static bool debuggerPresent()
int ret = sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, nullptr, 0); int ret = sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, nullptr, 0);
// debugger is attached if the P_TRACED flag is set // debugger is attached if the P_TRACED flag is set
return ret == 0 && (info.kp_proc.p_flag & P_TRACED) != 0; return ret == 0 && (info.kp_proc.p_flag & P_TRACED) != 0;
#elif defined(__linux__)
FILE* st = fopen("/proc/self/status", "r");
if (!st)
return false; // assume no debugger is attached.
int tpid = 0;
char buf[256];
while (fgets(buf, sizeof(buf), st))
{
if (strncmp(buf, "TracerPid:\t", 11) == 0)
{
tpid = atoi(buf + 11);
break;
}
}
fclose(st);
return tpid != 0;
#else #else
return false; // assume no debugger is attached. return false; // assume no debugger is attached.
#endif #endif
@ -67,7 +86,7 @@ static bool debuggerPresent()
static int testAssertionHandler(const char* expr, const char* file, int line, const char* function) static int testAssertionHandler(const char* expr, const char* file, int line, const char* function)
{ {
if (debuggerPresent()) if (debuggerPresent())
LUAU_DEBUGBREAK(); return 1; // LUAU_ASSERT will trigger LUAU_DEBUGBREAK for a more convenient debugging experience
ADD_FAIL_AT(file, line, "Assertion failed: ", std::string(expr)); ADD_FAIL_AT(file, line, "Assertion failed: ", std::string(expr));
return 1; return 1;

View file

@ -25,7 +25,6 @@ BuiltinTests.string_format_correctly_ordered_types
BuiltinTests.string_format_report_all_type_errors_at_correct_positions BuiltinTests.string_format_report_all_type_errors_at_correct_positions
BuiltinTests.string_format_tostring_specifier_type_constraint BuiltinTests.string_format_tostring_specifier_type_constraint
BuiltinTests.string_format_use_correct_argument2 BuiltinTests.string_format_use_correct_argument2
BuiltinTests.table_insert_correctly_infers_type_of_array_3_args_overload
BuiltinTests.table_pack BuiltinTests.table_pack
BuiltinTests.table_pack_reduce BuiltinTests.table_pack_reduce
BuiltinTests.table_pack_variadic BuiltinTests.table_pack_variadic
@ -49,9 +48,9 @@ GenericsTests.infer_generic_lib_function_function_argument
GenericsTests.instantiated_function_argument_names GenericsTests.instantiated_function_argument_names
GenericsTests.no_stack_overflow_from_quantifying GenericsTests.no_stack_overflow_from_quantifying
GenericsTests.self_recursive_instantiated_param GenericsTests.self_recursive_instantiated_param
IntersectionTypes.table_intersection_write_sealed
IntersectionTypes.table_intersection_write_sealed_indirect IntersectionTypes.table_intersection_write_sealed_indirect
IntersectionTypes.table_write_sealed_indirect IntersectionTypes.table_write_sealed_indirect
isSubtype.any_is_unknown_union_error
ProvisionalTests.assign_table_with_refined_property_with_a_similar_type_is_illegal ProvisionalTests.assign_table_with_refined_property_with_a_similar_type_is_illegal
ProvisionalTests.bail_early_if_unification_is_too_complicated ProvisionalTests.bail_early_if_unification_is_too_complicated
ProvisionalTests.do_not_ice_when_trying_to_pick_first_of_generic_type_pack ProvisionalTests.do_not_ice_when_trying_to_pick_first_of_generic_type_pack
@ -70,7 +69,6 @@ RefinementTest.typeguard_in_assert_position
RefinementTest.x_as_any_if_x_is_instance_elseif_x_is_table RefinementTest.x_as_any_if_x_is_instance_elseif_x_is_table
RuntimeLimits.typescript_port_of_Result_type RuntimeLimits.typescript_port_of_Result_type
TableTests.a_free_shape_cannot_turn_into_a_scalar_if_it_is_not_compatible TableTests.a_free_shape_cannot_turn_into_a_scalar_if_it_is_not_compatible
TableTests.casting_tables_with_props_into_table_with_indexer3
TableTests.checked_prop_too_early TableTests.checked_prop_too_early
TableTests.disallow_indexing_into_an_unsealed_table_with_no_indexer_in_strict_mode TableTests.disallow_indexing_into_an_unsealed_table_with_no_indexer_in_strict_mode
TableTests.dont_crash_when_setmetatable_does_not_produce_a_metatabletypevar TableTests.dont_crash_when_setmetatable_does_not_produce_a_metatabletypevar
@ -123,6 +121,7 @@ ToString.toStringNamedFunction_generic_pack
TryUnifyTests.members_of_failed_typepack_unification_are_unified_with_errorType TryUnifyTests.members_of_failed_typepack_unification_are_unified_with_errorType
TryUnifyTests.result_of_failed_typepack_unification_is_constrained TryUnifyTests.result_of_failed_typepack_unification_is_constrained
TryUnifyTests.typepack_unification_should_trim_free_tails TryUnifyTests.typepack_unification_should_trim_free_tails
TypeAliases.dont_lose_track_of_PendingExpansionTypes_after_substitution
TypeAliases.generic_param_remap TypeAliases.generic_param_remap
TypeAliases.mismatched_generic_type_param TypeAliases.mismatched_generic_type_param
TypeAliases.mutually_recursive_types_restriction_not_ok_1 TypeAliases.mutually_recursive_types_restriction_not_ok_1
@ -218,10 +217,5 @@ TypeSingletons.widen_the_supertype_if_it_is_free_and_subtype_has_singleton
TypeSingletons.widening_happens_almost_everywhere TypeSingletons.widening_happens_almost_everywhere
UnionTypes.generic_function_with_optional_arg UnionTypes.generic_function_with_optional_arg
UnionTypes.index_on_a_union_type_with_missing_property UnionTypes.index_on_a_union_type_with_missing_property
UnionTypes.optional_assignment_errors
UnionTypes.optional_call_error
UnionTypes.optional_index_error
UnionTypes.optional_iteration
UnionTypes.optional_length_error
UnionTypes.optional_union_follow UnionTypes.optional_union_follow
UnionTypes.table_union_write_indirect UnionTypes.table_union_write_indirect