Merge remote-tracking branch 'upstream/master' into prototyping-function-overload-resolution

This commit is contained in:
ajeffrey@roblox.com 2022-05-27 00:13:44 -05:00
commit 57759f33f6
145 changed files with 8239 additions and 4195 deletions

View file

@ -1,6 +1,7 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/TypeArena.h"
#include "Luau/TypeVar.h"
#include <unordered_map>
@ -18,7 +19,6 @@ struct CloneState
SeenTypePacks seenTypePacks;
int recursionCount = 0;
bool encounteredFreeType = false; // TODO: Remove with LuauLosslessClone.
};
TypePackId clone(TypePackId tp, TypeArena& dest, CloneState& cloneState);

View file

@ -5,6 +5,7 @@
#include "Luau/Location.h"
#include "Luau/TypeVar.h"
#include "Luau/Variant.h"
#include "Luau/TypeArena.h"
namespace Luau
{
@ -108,9 +109,6 @@ struct FunctionDoesNotTakeSelf
struct FunctionRequiresSelf
{
// TODO: Delete with LuauAnyInIsOptionalIsOptional
int requiredExtraNils = 0;
bool operator==(const FunctionRequiresSelf& rhs) const;
};

View file

@ -12,9 +12,6 @@
#include <vector>
#include <optional>
LUAU_FASTFLAG(LuauSeparateTypechecks)
LUAU_FASTFLAG(LuauDirtySourceModule)
namespace Luau
{
@ -60,17 +57,12 @@ struct SourceNode
{
bool hasDirtySourceModule() const
{
LUAU_ASSERT(FFlag::LuauDirtySourceModule);
return dirtySourceModule;
}
bool hasDirtyModule(bool forAutocomplete) const
{
if (FFlag::LuauSeparateTypechecks)
return forAutocomplete ? dirtyModuleForAutocomplete : dirtyModule;
else
return dirtyModule;
return forAutocomplete ? dirtyModuleForAutocomplete : dirtyModule;
}
ModuleName name;
@ -90,10 +82,6 @@ struct FrontendOptions
// is complete.
bool retainFullTypeGraphs = false;
// When true, we run typechecking twice, once in the regular mode, and once in strict mode
// in order to get more precise type information (e.g. for autocomplete).
bool typecheckTwice_DEPRECATED = false;
// Run typechecking only in mode required for autocomplete (strict mode in order to get more precise type information)
bool forAutocomplete = false;
};
@ -145,7 +133,6 @@ struct Frontend
*/
std::pair<SourceModule, LintResult> lintFragment(std::string_view source, std::optional<LintOptions> enabledLintWarnings = {});
CheckResult check(const SourceModule& module); // OLD. TODO KILL
LintResult lint(const SourceModule& module, std::optional<LintOptions> enabledLintWarnings = {});
bool isDirty(const ModuleName& name, bool forAutocomplete = false) const;
@ -172,7 +159,7 @@ struct Frontend
void applyBuiltinDefinitionToEnvironment(const std::string& environmentName, const std::string& definitionName);
private:
std::pair<SourceNode*, SourceModule*> getSourceNode(CheckResult& checkResult, const ModuleName& name, bool forAutocomplete_DEPRECATED);
std::pair<SourceNode*, SourceModule*> getSourceNode(CheckResult& checkResult, const ModuleName& name);
SourceModule parse(const ModuleName& name, std::string_view src, const ParseOptions& parseOptions);
bool parseGraph(std::vector<ModuleName>& buildQueue, CheckResult& checkResult, const ModuleName& root, bool forAutocomplete);

View file

@ -0,0 +1,53 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Substitution.h"
#include "Luau/TypeVar.h"
#include "Luau/Unifiable.h"
namespace Luau
{
struct TypeArena;
struct TxnLog;
// A substitution which replaces generic types in a given set by free types.
struct ReplaceGenerics : Substitution
{
ReplaceGenerics(
const TxnLog* log, TypeArena* arena, TypeLevel level, const std::vector<TypeId>& generics, const std::vector<TypePackId>& genericPacks)
: Substitution(log, arena)
, level(level)
, generics(generics)
, genericPacks(genericPacks)
{
}
TypeLevel level;
std::vector<TypeId> generics;
std::vector<TypePackId> genericPacks;
bool ignoreChildren(TypeId ty) override;
bool isDirty(TypeId ty) override;
bool isDirty(TypePackId tp) override;
TypeId clean(TypeId ty) override;
TypePackId clean(TypePackId tp) override;
};
// A substitution which replaces generic functions by monomorphic functions
struct Instantiation : Substitution
{
Instantiation(const TxnLog* log, TypeArena* arena, TypeLevel level)
: Substitution(log, arena)
, level(level)
{
}
TypeLevel level;
bool ignoreChildren(TypeId ty) override;
bool isDirty(TypeId ty) override;
bool isDirty(TypePackId tp) override;
TypeId clean(TypeId ty) override;
TypePackId clean(TypePackId tp) override;
};
} // namespace Luau

View file

@ -34,10 +34,6 @@ const LValue* baseof(const LValue& lvalue);
std::optional<LValue> tryGetLValue(const class AstExpr& expr);
// Utility function: breaks down an LValue to get at the Symbol, and reverses the vector of keys.
// TODO: remove with FFlagLuauTypecheckOptPass
std::pair<Symbol, std::vector<std::string>> getFullName(const LValue& lvalue);
// Utility function: breaks down an LValue to get at the Symbol
Symbol getBaseSymbol(const LValue& lvalue);

View file

@ -2,11 +2,10 @@
#pragma once
#include "Luau/FileResolver.h"
#include "Luau/TypePack.h"
#include "Luau/TypedAllocator.h"
#include "Luau/ParseOptions.h"
#include "Luau/Error.h"
#include "Luau/ParseResult.h"
#include "Luau/TypeArena.h"
#include <memory>
#include <vector>
@ -54,35 +53,6 @@ struct RequireCycle
std::vector<ModuleName> path; // one of the paths for a require() to go all the way back to the originating module
};
struct TypeArena
{
TypedAllocator<TypeVar> typeVars;
TypedAllocator<TypePackVar> typePacks;
void clear();
template<typename T>
TypeId addType(T tv)
{
if constexpr (std::is_same_v<T, UnionTypeVar>)
LUAU_ASSERT(tv.options.size() >= 2);
return addTV(TypeVar(std::move(tv)));
}
TypeId addTV(TypeVar&& tv);
TypeId freshType(TypeLevel level);
TypePackId addTypePack(std::initializer_list<TypeId> types);
TypePackId addTypePack(std::vector<TypeId> types);
TypePackId addTypePack(TypePack pack);
TypePackId addTypePack(TypePackVar pack);
};
void freeze(TypeArena& arena);
void unfreeze(TypeArena& arena);
struct Module
{
~Module();
@ -111,9 +81,7 @@ struct Module
// Once a module has been typechecked, we clone its public interface into a separate arena.
// This helps us to force TypeVar ownership into a DAG rather than a DCG.
// Returns true if there were any free types encountered in the public interface. This
// indicates a bug in the type checker that we want to surface.
bool clonePublicInterface(InternalErrorReporter& ice);
void clonePublicInterface(InternalErrorReporter& ice);
};
} // namespace Luau

View file

@ -1,8 +1,7 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Module.h"
#include "Luau/ModuleResolver.h"
#include "Luau/TypeArena.h"
#include "Luau/TypePack.h"
#include "Luau/TypeVar.h"
#include "Luau/DenseHash.h"

View file

@ -28,6 +28,7 @@ struct ToStringOptions
bool functionTypeArguments = false; // If true, output function type argument names when they are available
bool hideTableKind = false; // If true, all tables will be surrounded with plain '{}'
bool hideNamedFunctionTypeParameters = false; // If true, type parameters of functions will be hidden at top-level.
bool hideFunctionSelfArgument = false; // If true, `self: X` will be omitted from the function signature if the function has self
bool indent = false;
size_t maxTableLength = size_t(FInt::LuauTableTypeMaximumStringifierLength); // Only applied to TableTypeVars
size_t maxTypeLength = size_t(FInt::LuauTypeMaximumStringifierLength);

View file

@ -7,8 +7,6 @@
#include "Luau/TypeVar.h"
#include "Luau/TypePack.h"
LUAU_FASTFLAG(LuauTypecheckOptPass)
namespace Luau
{
@ -93,15 +91,6 @@ struct TxnLog
{
}
TxnLog(TxnLog* parent, std::vector<std::pair<TypeOrPackId, TypeOrPackId>>* sharedSeen)
: typeVarChanges(nullptr)
, typePackChanges(nullptr)
, parent(parent)
, sharedSeen(sharedSeen)
{
LUAU_ASSERT(!FFlag::LuauTypecheckOptPass);
}
TxnLog(const TxnLog&) = delete;
TxnLog& operator=(const TxnLog&) = delete;

View file

@ -0,0 +1,42 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/TypedAllocator.h"
#include "Luau/TypeVar.h"
#include "Luau/TypePack.h"
#include <vector>
namespace Luau
{
struct TypeArena
{
TypedAllocator<TypeVar> typeVars;
TypedAllocator<TypePackVar> typePacks;
void clear();
template<typename T>
TypeId addType(T tv)
{
if constexpr (std::is_same_v<T, UnionTypeVar>)
LUAU_ASSERT(tv.options.size() >= 2);
return addTV(TypeVar(std::move(tv)));
}
TypeId addTV(TypeVar&& tv);
TypeId freshType(TypeLevel level);
TypePackId addTypePack(std::initializer_list<TypeId> types);
TypePackId addTypePack(std::vector<TypeId> types);
TypePackId addTypePack(TypePack pack);
TypePackId addTypePack(TypePackVar pack);
};
void freeze(TypeArena& arena);
void unfreeze(TypeArena& arena);
} // namespace Luau

View file

@ -34,45 +34,6 @@ const AstStat* getFallthrough(const AstStat* node);
struct UnifierOptions;
struct Unifier;
// A substitution which replaces generic types in a given set by free types.
struct ReplaceGenerics : Substitution
{
ReplaceGenerics(
const TxnLog* log, TypeArena* arena, TypeLevel level, const std::vector<TypeId>& generics, const std::vector<TypePackId>& genericPacks)
: Substitution(log, arena)
, level(level)
, generics(generics)
, genericPacks(genericPacks)
{
}
TypeLevel level;
std::vector<TypeId> generics;
std::vector<TypePackId> genericPacks;
bool ignoreChildren(TypeId ty) override;
bool isDirty(TypeId ty) override;
bool isDirty(TypePackId tp) override;
TypeId clean(TypeId ty) override;
TypePackId clean(TypePackId tp) override;
};
// A substitution which replaces generic functions by monomorphic functions
struct Instantiation : Substitution
{
Instantiation(const TxnLog* log, TypeArena* arena, TypeLevel level)
: Substitution(log, arena)
, level(level)
{
}
TypeLevel level;
bool ignoreChildren(TypeId ty) override;
bool isDirty(TypeId ty) override;
bool isDirty(TypePackId tp) override;
TypeId clean(TypeId ty) override;
TypePackId clean(TypePackId tp) override;
};
// A substitution which replaces free types by any
struct Anyification : Substitution
{
@ -187,7 +148,6 @@ struct TypeChecker
ExprResult<TypeId> checkExpr(const ScopePtr& scope, const AstExprIndexExpr& expr);
ExprResult<TypeId> checkExpr(const ScopePtr& scope, const AstExprFunction& expr, std::optional<TypeId> expectedType = std::nullopt);
ExprResult<TypeId> checkExpr(const ScopePtr& scope, const AstExprTable& expr, std::optional<TypeId> expectedType = std::nullopt);
ExprResult<TypeId> checkExpr_(const ScopePtr& scope, const AstExprTable& expr, std::optional<TypeId> expectedType = std::nullopt);
ExprResult<TypeId> checkExpr(const ScopePtr& scope, const AstExprUnary& expr);
TypeId checkRelationalOperation(
const ScopePtr& scope, const AstExprBinary& expr, TypeId lhsType, TypeId rhsType, const PredicateVec& predicates = {});
@ -395,7 +355,7 @@ private:
const AstArray<AstGenericType>& genericNames, const AstArray<AstGenericTypePack>& genericPackNames, bool useCache = false);
public:
ErrorVec resolve(const PredicateVec& predicates, const ScopePtr& scope, bool sense);
void resolve(const PredicateVec& predicates, const ScopePtr& scope, bool sense);
private:
void refineLValue(const LValue& lvalue, RefinementMap& refis, const ScopePtr& scope, TypeIdPredicate predicate);
@ -403,14 +363,14 @@ private:
std::optional<TypeId> resolveLValue(const ScopePtr& scope, const LValue& lvalue);
std::optional<TypeId> resolveLValue(const RefinementMap& refis, const ScopePtr& scope, const LValue& lvalue);
void resolve(const PredicateVec& predicates, ErrorVec& errVec, RefinementMap& refis, const ScopePtr& scope, bool sense, bool fromOr = false);
void resolve(const Predicate& predicate, ErrorVec& errVec, RefinementMap& refis, const ScopePtr& scope, bool sense, bool fromOr);
void resolve(const TruthyPredicate& truthyP, ErrorVec& errVec, RefinementMap& refis, const ScopePtr& scope, bool sense, bool fromOr);
void resolve(const AndPredicate& andP, ErrorVec& errVec, RefinementMap& refis, const ScopePtr& scope, bool sense);
void resolve(const OrPredicate& orP, ErrorVec& errVec, RefinementMap& refis, const ScopePtr& scope, bool sense);
void resolve(const IsAPredicate& isaP, ErrorVec& errVec, RefinementMap& refis, const ScopePtr& scope, bool sense);
void resolve(const TypeGuardPredicate& typeguardP, ErrorVec& errVec, RefinementMap& refis, const ScopePtr& scope, bool sense);
void resolve(const EqPredicate& eqP, ErrorVec& errVec, RefinementMap& refis, const ScopePtr& scope, bool sense);
void resolve(const PredicateVec& predicates, RefinementMap& refis, const ScopePtr& scope, bool sense, bool fromOr = false);
void resolve(const Predicate& predicate, RefinementMap& refis, const ScopePtr& scope, bool sense, bool fromOr);
void resolve(const TruthyPredicate& truthyP, RefinementMap& refis, const ScopePtr& scope, bool sense, bool fromOr);
void resolve(const AndPredicate& andP, RefinementMap& refis, const ScopePtr& scope, bool sense);
void resolve(const OrPredicate& orP, RefinementMap& refis, const ScopePtr& scope, bool sense);
void resolve(const IsAPredicate& isaP, RefinementMap& refis, const ScopePtr& scope, bool sense);
void resolve(const TypeGuardPredicate& typeguardP, RefinementMap& refis, const ScopePtr& scope, bool sense);
void resolve(const EqPredicate& eqP, RefinementMap& refis, const ScopePtr& scope, bool sense);
bool isNonstrictMode() const;
bool useConstrainedIntersections() const;

View file

@ -329,7 +329,7 @@ struct TableTypeVar
// We need to know which is which when we stringify types.
std::optional<std::string> syntheticName;
std::map<Name, Location> methodDefinitionLocations;
std::map<Name, Location> methodDefinitionLocations; // TODO: Remove with FFlag::LuauNoMethodLocations
std::vector<TypeId> instantiatedTypeParams;
std::vector<TypePackId> instantiatedTypePackParams;
ModuleName definitionModuleName;

View file

@ -5,7 +5,7 @@
#include "Luau/Location.h"
#include "Luau/TxnLog.h"
#include "Luau/TypeInfer.h"
#include "Luau/Module.h" // FIXME: For TypeArena. It merits breaking out into its own header.
#include "Luau/TypeArena.h"
#include "Luau/UnifierSharedState.h"
#include <unordered_set>
@ -32,6 +32,9 @@ struct Widen : Substitution
TypeId clean(TypeId ty) override;
TypePackId clean(TypePackId ty) override;
bool ignoreChildren(TypeId ty) override;
TypeId operator()(TypeId ty);
TypePackId operator()(TypePackId ty);
};
// TODO: Use this more widely.
@ -55,8 +58,6 @@ struct Unifier
UnifierSharedState& sharedState;
Unifier(TypeArena* types, Mode mode, const Location& location, Variance variance, UnifierSharedState& sharedState, TxnLog* parentLog = nullptr);
Unifier(TypeArena* types, Mode mode, std::vector<std::pair<TypeOrPackId, TypeOrPackId>>* sharedSeen, const Location& location, Variance variance,
UnifierSharedState& sharedState, TxnLog* parentLog = nullptr);
// Test whether the two type vars unify. Never commits the result.
ErrorVec canUnify(TypeId subTy, TypeId superTy);

View file

@ -42,7 +42,6 @@ struct UnifierSharedState
InternalErrorReporter* iceHandler;
DenseHashSet<void*> seenAny{nullptr};
DenseHashMap<TypeId, bool> skipCacheForType{nullptr};
DenseHashSet<std::pair<TypeId, TypeId>, TypeIdPairHash> cachedUnify{{nullptr, nullptr}};
DenseHashMap<std::pair<TypeId, TypeId>, TypeErrorData, TypeIdPairHash> cachedUnifyError{{nullptr, nullptr}};

View file

@ -1,9 +1,15 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <unordered_set>
#include "Luau/DenseHash.h"
#include "Luau/TypeVar.h"
#include "Luau/RecursionCounter.h"
#include "Luau/TypePack.h"
#include "Luau/TypeVar.h"
LUAU_FASTINT(LuauVisitRecursionLimit)
LUAU_FASTFLAG(LuauNormalizeFlagIsConservative)
namespace Luau
{
@ -55,188 +61,293 @@ inline void unsee(DenseHashSet<void*>& seen, const void* tv)
// When DenseHashSet is used for 'visitTypeVarOnce', where don't forget visited elements
}
template<typename F, typename Set>
void visit(TypePackId tp, F& f, Set& seen);
} // namespace visit_detail
template<typename F, typename Set>
void visit(TypeId ty, F& f, Set& seen)
template<typename S>
struct GenericTypeVarVisitor
{
if (visit_detail::hasSeen(seen, ty))
using Set = S;
Set seen;
int recursionCounter = 0;
GenericTypeVarVisitor() = default;
explicit GenericTypeVarVisitor(Set seen)
: seen(std::move(seen))
{
f.cycle(ty);
return;
}
if (auto btv = get<BoundTypeVar>(ty))
virtual void cycle(TypeId) {}
virtual void cycle(TypePackId) {}
virtual bool visit(TypeId ty)
{
if (apply(ty, *btv, seen, f))
visit(btv->boundTo, f, seen);
return true;
}
virtual bool visit(TypeId ty, const BoundTypeVar& btv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const FreeTypeVar& ftv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const GenericTypeVar& gtv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const ErrorTypeVar& etv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const ConstrainedTypeVar& ctv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const PrimitiveTypeVar& ptv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const FunctionTypeVar& ftv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const TableTypeVar& ttv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const MetatableTypeVar& mtv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const ClassTypeVar& ctv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const AnyTypeVar& atv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const UnionTypeVar& utv)
{
return visit(ty);
}
virtual bool visit(TypeId ty, const IntersectionTypeVar& itv)
{
return visit(ty);
}
else if (auto ftv = get<FreeTypeVar>(ty))
apply(ty, *ftv, seen, f);
else if (auto gtv = get<GenericTypeVar>(ty))
apply(ty, *gtv, seen, f);
else if (auto etv = get<ErrorTypeVar>(ty))
apply(ty, *etv, seen, f);
else if (auto ctv = get<ConstrainedTypeVar>(ty))
virtual bool visit(TypePackId tp)
{
if (apply(ty, *ctv, seen, f))
return true;
}
virtual bool visit(TypePackId tp, const BoundTypePack& btp)
{
return visit(tp);
}
virtual bool visit(TypePackId tp, const FreeTypePack& ftp)
{
return visit(tp);
}
virtual bool visit(TypePackId tp, const GenericTypePack& gtp)
{
return visit(tp);
}
virtual bool visit(TypePackId tp, const Unifiable::Error& etp)
{
return visit(tp);
}
virtual bool visit(TypePackId tp, const TypePack& pack)
{
return visit(tp);
}
virtual bool visit(TypePackId tp, const VariadicTypePack& vtp)
{
return visit(tp);
}
void traverse(TypeId ty)
{
RecursionLimiter limiter{&recursionCounter, FInt::LuauVisitRecursionLimit, "TypeVarVisitor"};
if (visit_detail::hasSeen(seen, ty))
{
for (TypeId part : ctv->parts)
visit(part, f, seen);
cycle(ty);
return;
}
}
else if (auto ptv = get<PrimitiveTypeVar>(ty))
apply(ty, *ptv, seen, f);
else if (auto ftv = get<FunctionTypeVar>(ty))
{
if (apply(ty, *ftv, seen, f))
if (auto btv = get<BoundTypeVar>(ty))
{
visit(ftv->argTypes, f, seen);
visit(ftv->retType, f, seen);
if (visit(ty, *btv))
traverse(btv->boundTo);
}
}
else if (auto ttv = get<TableTypeVar>(ty))
{
// Some visitors want to see bound tables, that's why we visit the original type
if (apply(ty, *ttv, seen, f))
else if (auto ftv = get<FreeTypeVar>(ty))
visit(ty, *ftv);
else if (auto gtv = get<GenericTypeVar>(ty))
visit(ty, *gtv);
else if (auto etv = get<ErrorTypeVar>(ty))
visit(ty, *etv);
else if (auto ctv = get<ConstrainedTypeVar>(ty))
{
if (ttv->boundTo)
if (visit(ty, *ctv))
{
visit(*ttv->boundTo, f, seen);
for (TypeId part : ctv->parts)
traverse(part);
}
else
{
for (auto& [_name, prop] : ttv->props)
visit(prop.type, f, seen);
}
if (ttv->indexer)
else if (auto ptv = get<PrimitiveTypeVar>(ty))
visit(ty, *ptv);
else if (auto ftv = get<FunctionTypeVar>(ty))
{
if (visit(ty, *ftv))
{
traverse(ftv->argTypes);
traverse(ftv->retType);
}
}
else if (auto ttv = get<TableTypeVar>(ty))
{
// Some visitors want to see bound tables, that's why we traverse the original type
if (visit(ty, *ttv))
{
if (ttv->boundTo)
{
visit(ttv->indexer->indexType, f, seen);
visit(ttv->indexer->indexResultType, f, seen);
traverse(*ttv->boundTo);
}
else
{
for (auto& [_name, prop] : ttv->props)
traverse(prop.type);
if (ttv->indexer)
{
traverse(ttv->indexer->indexType);
traverse(ttv->indexer->indexResultType);
}
}
}
}
}
else if (auto mtv = get<MetatableTypeVar>(ty))
{
if (apply(ty, *mtv, seen, f))
else if (auto mtv = get<MetatableTypeVar>(ty))
{
visit(mtv->table, f, seen);
visit(mtv->metatable, f, seen);
if (visit(ty, *mtv))
{
traverse(mtv->table);
traverse(mtv->metatable);
}
}
}
else if (auto ctv = get<ClassTypeVar>(ty))
{
if (apply(ty, *ctv, seen, f))
else if (auto ctv = get<ClassTypeVar>(ty))
{
for (const auto& [name, prop] : ctv->props)
visit(prop.type, f, seen);
if (visit(ty, *ctv))
{
for (const auto& [name, prop] : ctv->props)
traverse(prop.type);
if (ctv->parent)
visit(*ctv->parent, f, seen);
if (ctv->parent)
traverse(*ctv->parent);
if (ctv->metatable)
visit(*ctv->metatable, f, seen);
if (ctv->metatable)
traverse(*ctv->metatable);
}
}
}
else if (auto atv = get<AnyTypeVar>(ty))
apply(ty, *atv, seen, f);
else if (auto atv = get<AnyTypeVar>(ty))
visit(ty, *atv);
else if (auto utv = get<UnionTypeVar>(ty))
{
if (apply(ty, *utv, seen, f))
else if (auto utv = get<UnionTypeVar>(ty))
{
for (TypeId optTy : utv->options)
visit(optTy, f, seen);
if (visit(ty, *utv))
{
for (TypeId optTy : utv->options)
traverse(optTy);
}
}
}
else if (auto itv = get<IntersectionTypeVar>(ty))
{
if (apply(ty, *itv, seen, f))
else if (auto itv = get<IntersectionTypeVar>(ty))
{
for (TypeId partTy : itv->parts)
visit(partTy, f, seen);
if (visit(ty, *itv))
{
for (TypeId partTy : itv->parts)
traverse(partTy);
}
}
visit_detail::unsee(seen, ty);
}
visit_detail::unsee(seen, ty);
}
template<typename F, typename Set>
void visit(TypePackId tp, F& f, Set& seen)
{
if (visit_detail::hasSeen(seen, tp))
void traverse(TypePackId tp)
{
f.cycle(tp);
return;
if (visit_detail::hasSeen(seen, tp))
{
cycle(tp);
return;
}
if (auto btv = get<BoundTypePack>(tp))
{
if (visit(tp, *btv))
traverse(btv->boundTo);
}
else if (auto ftv = get<Unifiable::Free>(tp))
visit(tp, *ftv);
else if (auto gtv = get<Unifiable::Generic>(tp))
visit(tp, *gtv);
else if (auto etv = get<Unifiable::Error>(tp))
visit(tp, *etv);
else if (auto pack = get<TypePack>(tp))
{
bool res = visit(tp, *pack);
if (!FFlag::LuauNormalizeFlagIsConservative || res)
{
for (TypeId ty : pack->head)
traverse(ty);
if (pack->tail)
traverse(*pack->tail);
}
}
else if (auto pack = get<VariadicTypePack>(tp))
{
bool res = visit(tp, *pack);
if (!FFlag::LuauNormalizeFlagIsConservative || res)
traverse(pack->ty);
}
else
LUAU_ASSERT(!"GenericTypeVarVisitor::traverse(TypePackId) is not exhaustive!");
visit_detail::unsee(seen, tp);
}
};
if (auto btv = get<BoundTypePack>(tp))
{
if (apply(tp, *btv, seen, f))
visit(btv->boundTo, f, seen);
}
else if (auto ftv = get<Unifiable::Free>(tp))
apply(tp, *ftv, seen, f);
else if (auto gtv = get<Unifiable::Generic>(tp))
apply(tp, *gtv, seen, f);
else if (auto etv = get<Unifiable::Error>(tp))
apply(tp, *etv, seen, f);
else if (auto pack = get<TypePack>(tp))
{
apply(tp, *pack, seen, f);
for (TypeId ty : pack->head)
visit(ty, f, seen);
if (pack->tail)
visit(*pack->tail, f, seen);
}
else if (auto pack = get<VariadicTypePack>(tp))
{
apply(tp, *pack, seen, f);
visit(pack->ty, f, seen);
}
visit_detail::unsee(seen, tp);
}
} // namespace visit_detail
template<typename TID, typename F>
void visitTypeVar(TID ty, F& f, std::unordered_set<void*>& seen)
/** Visit each type under a given type. Skips over cycles and keeps recursion depth under control.
*
* The same type may be visited multiple times if there are multiple distinct paths to it. If this is undesirable, use
* TypeVarOnceVisitor.
*/
struct TypeVarVisitor : GenericTypeVarVisitor<std::unordered_set<void*>>
{
visit_detail::visit(ty, f, seen);
}
};
template<typename TID, typename F>
void visitTypeVar(TID ty, F& f)
/// Visit each type under a given type. Each type will only be checked once even if there are multiple paths to it.
struct TypeVarOnceVisitor : GenericTypeVarVisitor<DenseHashSet<void*>>
{
std::unordered_set<void*> seen;
visit_detail::visit(ty, f, seen);
}
template<typename TID, typename F>
void visitTypeVarOnce(TID ty, F& f, DenseHashSet<void*>& seen)
{
seen.clear();
visit_detail::visit(ty, f, seen);
}
TypeVarOnceVisitor()
: GenericTypeVarVisitor{DenseHashSet<void*>{nullptr}}
{
}
};
} // namespace Luau

View file

@ -71,9 +71,11 @@ struct FindFullAncestry final : public AstVisitor
{
std::vector<AstNode*> nodes;
Position pos;
Position documentEnd;
explicit FindFullAncestry(Position pos)
explicit FindFullAncestry(Position pos, Position documentEnd)
: pos(pos)
, documentEnd(documentEnd)
{
}
@ -84,6 +86,16 @@ struct FindFullAncestry final : public AstVisitor
nodes.push_back(node);
return true;
}
// Edge case: If we ask for the node at the position that is the very end of the document
// return the innermost AST element that ends at that position.
if (node->location.end == documentEnd && pos >= documentEnd)
{
nodes.push_back(node);
return true;
}
return false;
}
};
@ -92,7 +104,11 @@ struct FindFullAncestry final : public AstVisitor
std::vector<AstNode*> findAstAncestryOfPosition(const SourceModule& source, Position pos)
{
FindFullAncestry finder(pos);
const Position end = source.root->location.end;
if (pos > end)
pos = end;
FindFullAncestry finder(pos, end);
source.root->visit(&finder);
return std::move(finder.nodes);
}

View file

@ -14,7 +14,6 @@
#include <utility>
LUAU_FASTFLAGVARIABLE(LuauIfElseExprFixCompletionIssue, false);
LUAU_FASTFLAGVARIABLE(LuauAutocompleteSingletonTypes, false);
LUAU_FASTFLAGVARIABLE(LuauFixAutocompleteClassSecurityLevel, false);
LUAU_FASTFLAG(LuauSelfCallAutocompleteFix)
@ -1341,38 +1340,21 @@ static void autocompleteExpression(const SourceModule& sourceModule, const Modul
scope = scope->parent;
}
if (FFlag::LuauAutocompleteSingletonTypes)
{
TypeCorrectKind correctForNil = checkTypeCorrectKind(module, typeArena, node, position, typeChecker.nilType);
TypeCorrectKind correctForTrue = checkTypeCorrectKind(module, typeArena, node, position, getSingletonTypes().trueType);
TypeCorrectKind correctForFalse = checkTypeCorrectKind(module, typeArena, node, position, getSingletonTypes().falseType);
TypeCorrectKind correctForFunction =
functionIsExpectedAt(module, node, position).value_or(false) ? TypeCorrectKind::Correct : TypeCorrectKind::None;
TypeCorrectKind correctForNil = checkTypeCorrectKind(module, typeArena, node, position, typeChecker.nilType);
TypeCorrectKind correctForTrue = checkTypeCorrectKind(module, typeArena, node, position, getSingletonTypes().trueType);
TypeCorrectKind correctForFalse = checkTypeCorrectKind(module, typeArena, node, position, getSingletonTypes().falseType);
TypeCorrectKind correctForFunction =
functionIsExpectedAt(module, node, position).value_or(false) ? TypeCorrectKind::Correct : TypeCorrectKind::None;
result["if"] = {AutocompleteEntryKind::Keyword, std::nullopt, false, false};
result["true"] = {AutocompleteEntryKind::Keyword, typeChecker.booleanType, false, false, correctForTrue};
result["false"] = {AutocompleteEntryKind::Keyword, typeChecker.booleanType, false, false, correctForFalse};
result["nil"] = {AutocompleteEntryKind::Keyword, typeChecker.nilType, false, false, correctForNil};
result["not"] = {AutocompleteEntryKind::Keyword};
result["function"] = {AutocompleteEntryKind::Keyword, std::nullopt, false, false, correctForFunction};
result["if"] = {AutocompleteEntryKind::Keyword, std::nullopt, false, false};
result["true"] = {AutocompleteEntryKind::Keyword, typeChecker.booleanType, false, false, correctForTrue};
result["false"] = {AutocompleteEntryKind::Keyword, typeChecker.booleanType, false, false, correctForFalse};
result["nil"] = {AutocompleteEntryKind::Keyword, typeChecker.nilType, false, false, correctForNil};
result["not"] = {AutocompleteEntryKind::Keyword};
result["function"] = {AutocompleteEntryKind::Keyword, std::nullopt, false, false, correctForFunction};
if (auto ty = findExpectedTypeAt(module, node, position))
autocompleteStringSingleton(*ty, true, result);
}
else
{
TypeCorrectKind correctForNil = checkTypeCorrectKind(module, typeArena, node, position, typeChecker.nilType);
TypeCorrectKind correctForBoolean = checkTypeCorrectKind(module, typeArena, node, position, typeChecker.booleanType);
TypeCorrectKind correctForFunction =
functionIsExpectedAt(module, node, position).value_or(false) ? TypeCorrectKind::Correct : TypeCorrectKind::None;
result["if"] = {AutocompleteEntryKind::Keyword, std::nullopt, false, false};
result["true"] = {AutocompleteEntryKind::Keyword, typeChecker.booleanType, false, false, correctForBoolean};
result["false"] = {AutocompleteEntryKind::Keyword, typeChecker.booleanType, false, false, correctForBoolean};
result["nil"] = {AutocompleteEntryKind::Keyword, typeChecker.nilType, false, false, correctForNil};
result["not"] = {AutocompleteEntryKind::Keyword};
result["function"] = {AutocompleteEntryKind::Keyword, std::nullopt, false, false, correctForFunction};
}
if (auto ty = findExpectedTypeAt(module, node, position))
autocompleteStringSingleton(*ty, true, result);
}
}
@ -1680,11 +1662,8 @@ static AutocompleteResult autocomplete(const SourceModule& sourceModule, const M
{
AutocompleteEntryMap result;
if (FFlag::LuauAutocompleteSingletonTypes)
{
if (auto it = module->astExpectedTypes.find(node->asExpr()))
autocompleteStringSingleton(*it, false, result);
}
if (auto it = module->astExpectedTypes.find(node->asExpr()))
autocompleteStringSingleton(*it, false, result);
if (finder.ancestry.size() >= 2)
{
@ -1693,8 +1672,7 @@ static AutocompleteResult autocomplete(const SourceModule& sourceModule, const M
if (auto it = module->astTypes.find(idxExpr->expr))
autocompleteProps(*module, typeArena, follow(*it), PropIndexType::Point, finder.ancestry, result);
}
else if (auto binExpr = finder.ancestry.at(finder.ancestry.size() - 2)->as<AstExprBinary>();
binExpr && FFlag::LuauAutocompleteSingletonTypes)
else if (auto binExpr = finder.ancestry.at(finder.ancestry.size() - 2)->as<AstExprBinary>())
{
if (binExpr->op == AstExprBinary::CompareEq || binExpr->op == AstExprBinary::CompareNe)
{
@ -1722,31 +1700,18 @@ static AutocompleteResult autocomplete(const SourceModule& sourceModule, const M
AutocompleteResult autocomplete(Frontend& frontend, const ModuleName& moduleName, Position position, StringCompletionCallback callback)
{
if (FFlag::LuauSeparateTypechecks)
{
// FIXME: We can improve performance here by parsing without checking.
// The old type graph is probably fine. (famous last words!)
FrontendOptions opts;
opts.forAutocomplete = true;
frontend.check(moduleName, opts);
}
else
{
// FIXME: We can improve performance here by parsing without checking.
// The old type graph is probably fine. (famous last words!)
// FIXME: We don't need to typecheck for script analysis here, just for autocomplete.
frontend.check(moduleName);
}
// FIXME: We can improve performance here by parsing without checking.
// The old type graph is probably fine. (famous last words!)
FrontendOptions opts;
opts.forAutocomplete = true;
frontend.check(moduleName, opts);
const SourceModule* sourceModule = frontend.getSourceModule(moduleName);
if (!sourceModule)
return {};
TypeChecker& typeChecker =
(frontend.options.typecheckTwice_DEPRECATED || FFlag::LuauSeparateTypechecks ? frontend.typeCheckerForAutocomplete : frontend.typeChecker);
ModulePtr module =
(frontend.options.typecheckTwice_DEPRECATED || FFlag::LuauSeparateTypechecks ? frontend.moduleResolverForAutocomplete.getModule(moduleName)
: frontend.moduleResolver.getModule(moduleName));
TypeChecker& typeChecker = frontend.typeCheckerForAutocomplete;
ModulePtr module = frontend.moduleResolverForAutocomplete.getModule(moduleName);
if (!module)
return {};
@ -1774,9 +1739,7 @@ OwningAutocompleteResult autocompleteSource(Frontend& frontend, std::string_view
sourceModule->mode = Mode::Strict;
sourceModule->commentLocations = std::move(result.commentLocations);
TypeChecker& typeChecker =
(frontend.options.typecheckTwice_DEPRECATED || FFlag::LuauSeparateTypechecks ? frontend.typeCheckerForAutocomplete : frontend.typeChecker);
TypeChecker& typeChecker = frontend.typeCheckerForAutocomplete;
ModulePtr module = typeChecker.check(*sourceModule, Mode::Strict);
OwningAutocompleteResult autocompleteResult = {

View file

@ -8,7 +8,6 @@
#include <algorithm>
LUAU_FASTFLAG(LuauAssertStripsFalsyTypes)
LUAU_FASTFLAGVARIABLE(LuauSetMetaTableArgsCheck, false)
/** FIXME: Many of these type definitions are not quite completely accurate.
@ -408,41 +407,29 @@ static std::optional<ExprResult<TypePackId>> magicFunctionAssert(
{
auto [paramPack, predicates] = exprResult;
if (FFlag::LuauAssertStripsFalsyTypes)
TypeArena& arena = typechecker.currentModule->internalTypes;
auto [head, tail] = flatten(paramPack);
if (head.empty() && tail)
{
TypeArena& arena = typechecker.currentModule->internalTypes;
auto [head, tail] = flatten(paramPack);
if (head.empty() && tail)
{
std::optional<TypeId> fst = first(*tail);
if (!fst)
return ExprResult<TypePackId>{paramPack};
head.push_back(*fst);
}
typechecker.reportErrors(typechecker.resolve(predicates, scope, true));
if (head.size() > 0)
{
std::optional<TypeId> newhead = typechecker.pickTypesFromSense(head[0], true);
if (!newhead)
head = {typechecker.nilType};
else
head[0] = *newhead;
}
return ExprResult<TypePackId>{arena.addTypePack(TypePack{std::move(head), tail})};
}
else
{
if (expr.args.size < 1)
std::optional<TypeId> fst = first(*tail);
if (!fst)
return ExprResult<TypePackId>{paramPack};
typechecker.reportErrors(typechecker.resolve(predicates, scope, true));
return ExprResult<TypePackId>{paramPack};
head.push_back(*fst);
}
typechecker.resolve(predicates, scope, true);
if (head.size() > 0)
{
std::optional<TypeId> newhead = typechecker.pickTypesFromSense(head[0], true);
if (!newhead)
head = {typechecker.nilType};
else
head[0] = *newhead;
}
return ExprResult<TypePackId>{arena.addTypePack(TypePack{std::move(head), tail})};
}
static std::optional<ExprResult<TypePackId>> magicFunctionPack(

View file

@ -1,7 +1,6 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Clone.h"
#include "Luau/Module.h"
#include "Luau/RecursionCounter.h"
#include "Luau/TypePack.h"
#include "Luau/Unifiable.h"
@ -9,8 +8,7 @@
LUAU_FASTFLAG(DebugLuauCopyBeforeNormalizing)
LUAU_FASTINTVARIABLE(LuauTypeCloneRecursionLimit, 300)
LUAU_FASTFLAG(LuauTypecheckOptPass)
LUAU_FASTFLAGVARIABLE(LuauLosslessClone, false)
LUAU_FASTFLAG(LuauNoMethodLocations)
namespace Luau
{
@ -88,20 +86,8 @@ struct TypePackCloner
void operator()(const Unifiable::Free& t)
{
if (FFlag::LuauLosslessClone)
{
defaultClone(t);
}
else
{
cloneState.encounteredFreeType = true;
TypePackId err = getSingletonTypes().errorRecoveryTypePack(getSingletonTypes().anyTypePack);
TypePackId cloned = dest.addTypePack(*err);
seenTypePacks[typePackId] = cloned;
}
defaultClone(t);
}
void operator()(const Unifiable::Generic& t)
{
defaultClone(t);
@ -151,18 +137,7 @@ void TypeCloner::defaultClone(const T& t)
void TypeCloner::operator()(const Unifiable::Free& t)
{
if (FFlag::LuauLosslessClone)
{
defaultClone(t);
}
else
{
cloneState.encounteredFreeType = true;
TypeId err = getSingletonTypes().errorRecoveryType(getSingletonTypes().anyType);
TypeId cloned = dest.addType(*err);
seenTypes[typeId] = cloned;
}
defaultClone(t);
}
void TypeCloner::operator()(const Unifiable::Generic& t)
@ -190,9 +165,6 @@ void TypeCloner::operator()(const PrimitiveTypeVar& t)
void TypeCloner::operator()(const ConstrainedTypeVar& t)
{
if (!FFlag::LuauLosslessClone)
cloneState.encounteredFreeType = true;
TypeId res = dest.addType(ConstrainedTypeVar{t.level});
ConstrainedTypeVar* ctv = getMutable<ConstrainedTypeVar>(res);
LUAU_ASSERT(ctv);
@ -229,9 +201,7 @@ void TypeCloner::operator()(const FunctionTypeVar& t)
ftv->argTypes = clone(t.argTypes, dest, cloneState);
ftv->argNames = t.argNames;
ftv->retType = clone(t.retType, dest, cloneState);
if (FFlag::LuauTypecheckOptPass)
ftv->hasNoGenerics = t.hasNoGenerics;
ftv->hasNoGenerics = t.hasNoGenerics;
}
void TypeCloner::operator()(const TableTypeVar& t)
@ -269,15 +239,9 @@ void TypeCloner::operator()(const TableTypeVar& t)
for (TypePackId& arg : ttv->instantiatedTypePackParams)
arg = clone(arg, dest, cloneState);
if (!FFlag::LuauLosslessClone && ttv->state == TableState::Free)
{
cloneState.encounteredFreeType = true;
ttv->state = TableState::Sealed;
}
ttv->definitionModuleName = t.definitionModuleName;
ttv->methodDefinitionLocations = t.methodDefinitionLocations;
if (!FFlag::LuauNoMethodLocations)
ttv->methodDefinitionLocations = t.methodDefinitionLocations;
ttv->tags = t.tags;
}

View file

@ -143,7 +143,7 @@ declare coroutine: {
create: <A..., R...>((A...) -> R...) -> thread,
resume: <A..., R...>(thread, A...) -> (boolean, R...),
running: () -> thread,
status: (thread) -> string,
status: (thread) -> "dead" | "running" | "normal" | "suspended",
-- FIXME: This technically returns a function, but we can't represent this yet.
wrap: <A..., R...>((A...) -> R...) -> any,
yield: <A..., R...>(A...) -> R...,

View file

@ -2,7 +2,6 @@
#include "Luau/Error.h"
#include "Luau/Clone.h"
#include "Luau/Module.h"
#include "Luau/StringUtils.h"
#include "Luau/ToString.h"
@ -178,15 +177,7 @@ struct ErrorConverter
std::string operator()(const Luau::FunctionRequiresSelf& e) const
{
if (e.requiredExtraNils)
{
const char* plural = e.requiredExtraNils == 1 ? "" : "s";
return format("This function was declared to accept self, but you did not pass enough arguments. Use a colon instead of a dot or "
"pass %i extra nil%s to suppress this warning",
e.requiredExtraNils, plural);
}
else
return "This function must be called with self. Did you mean to use a colon instead of a dot?";
return "This function must be called with self. Did you mean to use a colon instead of a dot?";
}
std::string operator()(const Luau::OccursCheckFailed&) const
@ -539,7 +530,7 @@ bool FunctionDoesNotTakeSelf::operator==(const FunctionDoesNotTakeSelf&) const
bool FunctionRequiresSelf::operator==(const FunctionRequiresSelf& e) const
{
return requiredExtraNils == e.requiredExtraNils;
return true;
}
bool OccursCheckFailed::operator==(const OccursCheckFailed&) const

View file

@ -18,13 +18,10 @@
LUAU_FASTINT(LuauTypeInferIterationLimit)
LUAU_FASTINT(LuauTarjanChildLimit)
LUAU_FASTFLAG(LuauCyclicModuleTypeSurface)
LUAU_FASTFLAG(LuauInferInNoCheckMode)
LUAU_FASTFLAGVARIABLE(LuauKnowsTheDataModel3, false)
LUAU_FASTFLAGVARIABLE(LuauSeparateTypechecks, false)
LUAU_FASTFLAGVARIABLE(LuauAutocompleteDynamicLimits, false)
LUAU_FASTFLAGVARIABLE(LuauDirtySourceModule, false)
LUAU_FASTINTVARIABLE(LuauAutocompleteCheckTimeoutMs, 0)
LUAU_FASTINTVARIABLE(LuauAutocompleteCheckTimeoutMs, 100)
namespace Luau
{
@ -362,32 +359,21 @@ CheckResult Frontend::check(const ModuleName& name, std::optional<FrontendOption
if (it != sourceNodes.end() && !it->second.hasDirtyModule(frontendOptions.forAutocomplete))
{
// No recheck required.
if (FFlag::LuauSeparateTypechecks)
if (frontendOptions.forAutocomplete)
{
if (frontendOptions.forAutocomplete)
{
auto it2 = moduleResolverForAutocomplete.modules.find(name);
if (it2 == moduleResolverForAutocomplete.modules.end() || it2->second == nullptr)
throw std::runtime_error("Frontend::modules does not have data for " + name);
}
else
{
auto it2 = moduleResolver.modules.find(name);
if (it2 == moduleResolver.modules.end() || it2->second == nullptr)
throw std::runtime_error("Frontend::modules does not have data for " + name);
}
return CheckResult{accumulateErrors(
sourceNodes, frontendOptions.forAutocomplete ? moduleResolverForAutocomplete.modules : moduleResolver.modules, name)};
auto it2 = moduleResolverForAutocomplete.modules.find(name);
if (it2 == moduleResolverForAutocomplete.modules.end() || it2->second == nullptr)
throw std::runtime_error("Frontend::modules does not have data for " + name);
}
else
{
auto it2 = moduleResolver.modules.find(name);
if (it2 == moduleResolver.modules.end() || it2->second == nullptr)
throw std::runtime_error("Frontend::modules does not have data for " + name);
return CheckResult{accumulateErrors(sourceNodes, moduleResolver.modules, name)};
}
return CheckResult{
accumulateErrors(sourceNodes, frontendOptions.forAutocomplete ? moduleResolverForAutocomplete.modules : moduleResolver.modules, name)};
}
std::vector<ModuleName> buildQueue;
@ -429,12 +415,11 @@ CheckResult Frontend::check(const ModuleName& name, std::optional<FrontendOption
// This is used by the type checker to replace the resulting type of cyclic modules with any
sourceModule.cyclic = !requireCycles.empty();
if (FFlag::LuauSeparateTypechecks && frontendOptions.forAutocomplete)
if (frontendOptions.forAutocomplete)
{
// The autocomplete typecheck is always in strict mode with DM awareness
// to provide better type information for IDE features
if (FFlag::LuauCyclicModuleTypeSurface)
typeCheckerForAutocomplete.requireCycles = requireCycles;
typeCheckerForAutocomplete.requireCycles = requireCycles;
if (autocompleteTimeLimit != 0.0)
typeCheckerForAutocomplete.finishTime = TimeTrace::getClock() + autocompleteTimeLimit;
@ -483,23 +468,10 @@ CheckResult Frontend::check(const ModuleName& name, std::optional<FrontendOption
continue;
}
if (FFlag::LuauCyclicModuleTypeSurface)
typeChecker.requireCycles = requireCycles;
typeChecker.requireCycles = requireCycles;
ModulePtr module = typeChecker.check(sourceModule, mode, environmentScope);
// If we're typechecking twice, we do so.
// The second typecheck is always in strict mode with DM awareness
// to provide better typen information for IDE features.
if (!FFlag::LuauSeparateTypechecks && frontendOptions.typecheckTwice_DEPRECATED)
{
if (FFlag::LuauCyclicModuleTypeSurface)
typeCheckerForAutocomplete.requireCycles = requireCycles;
ModulePtr moduleForAutocomplete = typeCheckerForAutocomplete.check(sourceModule, Mode::Strict);
moduleResolverForAutocomplete.modules[moduleName] = moduleForAutocomplete;
}
stats.timeCheck += getTimestamp() - timestamp;
stats.filesStrict += mode == Mode::Strict;
stats.filesNonstrict += mode == Mode::Nonstrict;
@ -567,7 +539,7 @@ bool Frontend::parseGraph(std::vector<ModuleName>& buildQueue, CheckResult& chec
bool cyclic = false;
{
auto [sourceNode, _] = getSourceNode(checkResult, root, forAutocomplete);
auto [sourceNode, _] = getSourceNode(checkResult, root);
if (sourceNode)
stack.push_back(sourceNode);
}
@ -631,7 +603,7 @@ bool Frontend::parseGraph(std::vector<ModuleName>& buildQueue, CheckResult& chec
}
}
auto [sourceNode, _] = getSourceNode(checkResult, dep, forAutocomplete);
auto [sourceNode, _] = getSourceNode(checkResult, dep);
if (sourceNode)
{
stack.push_back(sourceNode);
@ -675,7 +647,7 @@ LintResult Frontend::lint(const ModuleName& name, std::optional<Luau::LintOption
LUAU_TIMETRACE_ARGUMENT("name", name.c_str());
CheckResult checkResult;
auto [_sourceNode, sourceModule] = getSourceNode(checkResult, name, false);
auto [_sourceNode, sourceModule] = getSourceNode(checkResult, name);
if (!sourceModule)
return LintResult{}; // FIXME: We really should do something a bit more obvious when a file is too broken to lint.
@ -706,30 +678,6 @@ std::pair<SourceModule, LintResult> Frontend::lintFragment(std::string_view sour
return {std::move(sourceModule), classifyLints(warnings, config)};
}
CheckResult Frontend::check(const SourceModule& module)
{
LUAU_TIMETRACE_SCOPE("Frontend::check", "Frontend");
LUAU_TIMETRACE_ARGUMENT("module", module.name.c_str());
const Config& config = configResolver->getConfig(module.name);
Mode mode = module.mode.value_or(config.mode);
double timestamp = getTimestamp();
ModulePtr checkedModule = typeChecker.check(module, mode);
stats.timeCheck += getTimestamp() - timestamp;
stats.filesStrict += mode == Mode::Strict;
stats.filesNonstrict += mode == Mode::Nonstrict;
if (checkedModule == nullptr)
throw std::runtime_error("Frontend::check produced a nullptr module for module " + module.name);
moduleResolver.modules[module.name] = checkedModule;
return CheckResult{checkedModule->errors};
}
LintResult Frontend::lint(const SourceModule& module, std::optional<Luau::LintOptions> enabledLintWarnings)
{
LUAU_TIMETRACE_SCOPE("Frontend::lint", "Frontend");
@ -780,16 +728,8 @@ bool Frontend::isDirty(const ModuleName& name, bool forAutocomplete) const
*/
void Frontend::markDirty(const ModuleName& name, std::vector<ModuleName>* markedDirty)
{
if (FFlag::LuauSeparateTypechecks)
{
if (!moduleResolver.modules.count(name) && !moduleResolverForAutocomplete.modules.count(name))
return;
}
else
{
if (!moduleResolver.modules.count(name))
return;
}
if (!moduleResolver.modules.count(name) && !moduleResolverForAutocomplete.modules.count(name))
return;
std::unordered_map<ModuleName, std::vector<ModuleName>> reverseDeps;
for (const auto& module : sourceNodes)
@ -811,32 +751,12 @@ void Frontend::markDirty(const ModuleName& name, std::vector<ModuleName>* marked
if (markedDirty)
markedDirty->push_back(next);
if (FFlag::LuauDirtySourceModule)
{
LUAU_ASSERT(FFlag::LuauSeparateTypechecks);
if (sourceNode.dirtySourceModule && sourceNode.dirtyModule && sourceNode.dirtyModuleForAutocomplete)
continue;
if (sourceNode.dirtySourceModule && sourceNode.dirtyModule && sourceNode.dirtyModuleForAutocomplete)
continue;
sourceNode.dirtySourceModule = true;
sourceNode.dirtyModule = true;
sourceNode.dirtyModuleForAutocomplete = true;
}
else if (FFlag::LuauSeparateTypechecks)
{
if (sourceNode.dirtyModule && sourceNode.dirtyModuleForAutocomplete)
continue;
sourceNode.dirtyModule = true;
sourceNode.dirtyModuleForAutocomplete = true;
}
else
{
if (sourceNode.dirtyModule)
continue;
sourceNode.dirtyModule = true;
}
sourceNode.dirtySourceModule = true;
sourceNode.dirtyModule = true;
sourceNode.dirtyModuleForAutocomplete = true;
if (0 == reverseDeps.count(name))
continue;
@ -863,14 +783,13 @@ const SourceModule* Frontend::getSourceModule(const ModuleName& moduleName) cons
}
// Read AST into sourceModules if necessary. Trace require()s. Report parse errors.
std::pair<SourceNode*, SourceModule*> Frontend::getSourceNode(CheckResult& checkResult, const ModuleName& name, bool forAutocomplete_DEPRECATED)
std::pair<SourceNode*, SourceModule*> Frontend::getSourceNode(CheckResult& checkResult, const ModuleName& name)
{
LUAU_TIMETRACE_SCOPE("Frontend::getSourceNode", "Frontend");
LUAU_TIMETRACE_ARGUMENT("name", name.c_str());
auto it = sourceNodes.find(name);
if (it != sourceNodes.end() &&
(FFlag::LuauDirtySourceModule ? !it->second.hasDirtySourceModule() : !it->second.hasDirtyModule(forAutocomplete_DEPRECATED)))
if (it != sourceNodes.end() && !it->second.hasDirtySourceModule())
{
auto moduleIt = sourceModules.find(name);
if (moduleIt != sourceModules.end())
@ -913,21 +832,12 @@ std::pair<SourceNode*, SourceModule*> Frontend::getSourceNode(CheckResult& check
sourceNode.name = name;
sourceNode.requires.clear();
sourceNode.requireLocations.clear();
sourceNode.dirtySourceModule = false;
if (FFlag::LuauDirtySourceModule)
sourceNode.dirtySourceModule = false;
if (FFlag::LuauSeparateTypechecks)
{
if (it == sourceNodes.end())
{
sourceNode.dirtyModule = true;
sourceNode.dirtyModuleForAutocomplete = true;
}
}
else
if (it == sourceNodes.end())
{
sourceNode.dirtyModule = true;
sourceNode.dirtyModuleForAutocomplete = true;
}
for (const auto& [moduleName, location] : requireTrace.requires)

View file

@ -0,0 +1,128 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Common.h"
#include "Luau/Instantiation.h"
#include "Luau/TxnLog.h"
#include "Luau/TypeArena.h"
LUAU_FASTFLAG(LuauNoMethodLocations)
namespace Luau
{
bool Instantiation::isDirty(TypeId ty)
{
if (const FunctionTypeVar* ftv = log->getMutable<FunctionTypeVar>(ty))
{
if (ftv->hasNoGenerics)
return false;
return true;
}
else
{
return false;
}
}
bool Instantiation::isDirty(TypePackId tp)
{
return false;
}
bool Instantiation::ignoreChildren(TypeId ty)
{
if (log->getMutable<FunctionTypeVar>(ty))
return true;
else
return false;
}
TypeId Instantiation::clean(TypeId ty)
{
const FunctionTypeVar* ftv = log->getMutable<FunctionTypeVar>(ty);
LUAU_ASSERT(ftv);
FunctionTypeVar clone = FunctionTypeVar{level, ftv->argTypes, ftv->retType, ftv->definition, ftv->hasSelf};
clone.magicFunction = ftv->magicFunction;
clone.tags = ftv->tags;
clone.argNames = ftv->argNames;
TypeId result = addType(std::move(clone));
// Annoyingly, we have to do this even if there are no generics,
// to replace any generic tables.
ReplaceGenerics replaceGenerics{log, arena, level, ftv->generics, ftv->genericPacks};
// TODO: What to do if this returns nullopt?
// We don't have access to the error-reporting machinery
result = replaceGenerics.substitute(result).value_or(result);
asMutable(result)->documentationSymbol = ty->documentationSymbol;
return result;
}
TypePackId Instantiation::clean(TypePackId tp)
{
LUAU_ASSERT(false);
return tp;
}
bool ReplaceGenerics::ignoreChildren(TypeId ty)
{
if (const FunctionTypeVar* ftv = log->getMutable<FunctionTypeVar>(ty))
{
if (ftv->hasNoGenerics)
return true;
// We aren't recursing in the case of a generic function which
// binds the same generics. This can happen if, for example, there's recursive types.
// If T = <a>(a,T)->T then instantiating T should produce T' = (X,T)->T not T' = (X,T')->T'.
// It's OK to use vector equality here, since we always generate fresh generics
// whenever we quantify, so the vectors overlap if and only if they are equal.
return (!generics.empty() || !genericPacks.empty()) && (ftv->generics == generics) && (ftv->genericPacks == genericPacks);
}
else
{
return false;
}
}
bool ReplaceGenerics::isDirty(TypeId ty)
{
if (const TableTypeVar* ttv = log->getMutable<TableTypeVar>(ty))
return ttv->state == TableState::Generic;
else if (log->getMutable<GenericTypeVar>(ty))
return std::find(generics.begin(), generics.end(), ty) != generics.end();
else
return false;
}
bool ReplaceGenerics::isDirty(TypePackId tp)
{
if (log->getMutable<GenericTypePack>(tp))
return std::find(genericPacks.begin(), genericPacks.end(), tp) != genericPacks.end();
else
return false;
}
TypeId ReplaceGenerics::clean(TypeId ty)
{
LUAU_ASSERT(isDirty(ty));
if (const TableTypeVar* ttv = log->getMutable<TableTypeVar>(ty))
{
TableTypeVar clone = TableTypeVar{ttv->props, ttv->indexer, level, TableState::Free};
if (!FFlag::LuauNoMethodLocations)
clone.methodDefinitionLocations = ttv->methodDefinitionLocations;
clone.definitionModuleName = ttv->definitionModuleName;
return addType(std::move(clone));
}
else
return addType(FreeTypeVar{level});
}
TypePackId ReplaceGenerics::clean(TypePackId tp)
{
LUAU_ASSERT(isDirty(tp));
return addTypePack(TypePackVar(FreeTypePack{level}));
}
} // namespace Luau

View file

@ -48,7 +48,7 @@ static void errorToString(std::ostream& stream, const T& err)
else if constexpr (std::is_same_v<T, FunctionDoesNotTakeSelf>)
stream << "FunctionDoesNotTakeSelf { }";
else if constexpr (std::is_same_v<T, FunctionRequiresSelf>)
stream << "FunctionRequiresSelf { extraNils " << err.requiredExtraNils << " }";
stream << "FunctionRequiresSelf { }";
else if constexpr (std::is_same_v<T, OccursCheckFailed>)
stream << "OccursCheckFailed { }";
else if constexpr (std::is_same_v<T, UnknownRequire>)

View file

@ -5,8 +5,6 @@
#include <vector>
LUAU_FASTFLAG(LuauTypecheckOptPass)
namespace Luau
{
@ -79,27 +77,8 @@ std::optional<LValue> tryGetLValue(const AstExpr& node)
return std::nullopt;
}
std::pair<Symbol, std::vector<std::string>> getFullName(const LValue& lvalue)
{
LUAU_ASSERT(!FFlag::LuauTypecheckOptPass);
const LValue* current = &lvalue;
std::vector<std::string> keys;
while (auto field = get<Field>(*current))
{
keys.push_back(field->key);
current = baseof(*current);
}
const Symbol* symbol = get<Symbol>(*current);
LUAU_ASSERT(symbol);
return {*symbol, std::vector<std::string>(keys.rbegin(), keys.rend())};
}
Symbol getBaseSymbol(const LValue& lvalue)
{
LUAU_ASSERT(FFlag::LuauTypecheckOptPass);
const LValue* current = &lvalue;
while (auto field = get<Field>(*current))
current = baseof(*current);

View file

@ -2653,12 +2653,12 @@ static void lintComments(LintContext& context, const std::vector<HotComment>& ho
}
else
{
std::string::size_type space = hc.content.find_first_of(" \t");
size_t space = hc.content.find_first_of(" \t");
std::string_view first = std::string_view(hc.content).substr(0, space);
if (first == "nolint")
{
std::string::size_type notspace = hc.content.find_first_not_of(" \t", space);
size_t notspace = hc.content.find_first_not_of(" \t", space);
if (space == std::string::npos || notspace == std::string::npos)
{
@ -2827,7 +2827,7 @@ uint64_t LintWarning::parseMask(const std::vector<HotComment>& hotcomments)
if (hc.content.compare(0, 6, "nolint") != 0)
continue;
std::string::size_type name = hc.content.find_first_not_of(" \t", 6);
size_t name = hc.content.find_first_not_of(" \t", 6);
// --!nolint disables everything
if (name == std::string::npos)

View file

@ -13,9 +13,8 @@
#include <algorithm>
LUAU_FASTFLAGVARIABLE(DebugLuauFreezeArena, false)
LUAU_FASTFLAG(LuauLowerBoundsCalculation)
LUAU_FASTFLAG(LuauLosslessClone)
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
LUAU_FASTFLAG(LuauNormalizeFlagIsConservative);
namespace Luau
{
@ -55,89 +54,35 @@ bool isWithinComment(const SourceModule& sourceModule, Position pos)
return contains(pos, *iter);
}
void TypeArena::clear()
struct ForceNormal : TypeVarOnceVisitor
{
typeVars.clear();
typePacks.clear();
}
const TypeArena* typeArena = nullptr;
TypeId TypeArena::addTV(TypeVar&& tv)
{
TypeId allocated = typeVars.allocate(std::move(tv));
ForceNormal(const TypeArena* typeArena)
: typeArena(typeArena)
{
}
asMutable(allocated)->owningArena = this;
bool visit(TypeId ty) override
{
if (ty->owningArena != typeArena)
return false;
return allocated;
}
asMutable(ty)->normal = true;
return true;
}
TypeId TypeArena::freshType(TypeLevel level)
{
TypeId allocated = typeVars.allocate(FreeTypeVar{level});
bool visit(TypeId ty, const FreeTypeVar& ftv) override
{
visit(ty);
return true;
}
asMutable(allocated)->owningArena = this;
return allocated;
}
TypePackId TypeArena::addTypePack(std::initializer_list<TypeId> types)
{
TypePackId allocated = typePacks.allocate(TypePack{std::move(types)});
asMutable(allocated)->owningArena = this;
return allocated;
}
TypePackId TypeArena::addTypePack(std::vector<TypeId> types)
{
TypePackId allocated = typePacks.allocate(TypePack{std::move(types)});
asMutable(allocated)->owningArena = this;
return allocated;
}
TypePackId TypeArena::addTypePack(TypePack tp)
{
TypePackId allocated = typePacks.allocate(std::move(tp));
asMutable(allocated)->owningArena = this;
return allocated;
}
TypePackId TypeArena::addTypePack(TypePackVar tp)
{
TypePackId allocated = typePacks.allocate(std::move(tp));
asMutable(allocated)->owningArena = this;
return allocated;
}
ScopePtr Module::getModuleScope() const
{
LUAU_ASSERT(!scopes.empty());
return scopes.front().second;
}
void freeze(TypeArena& arena)
{
if (!FFlag::DebugLuauFreezeArena)
return;
arena.typeVars.freeze();
arena.typePacks.freeze();
}
void unfreeze(TypeArena& arena)
{
if (!FFlag::DebugLuauFreezeArena)
return;
arena.typeVars.unfreeze();
arena.typePacks.unfreeze();
}
bool visit(TypePackId tp, const FreeTypePack& ftp) override
{
return true;
}
};
Module::~Module()
{
@ -145,7 +90,7 @@ Module::~Module()
unfreeze(internalTypes);
}
bool Module::clonePublicInterface(InternalErrorReporter& ice)
void Module::clonePublicInterface(InternalErrorReporter& ice)
{
LUAU_ASSERT(interfaceTypes.typeVars.empty());
LUAU_ASSERT(interfaceTypes.typePacks.empty());
@ -165,11 +110,22 @@ bool Module::clonePublicInterface(InternalErrorReporter& ice)
normalize(*moduleScope->varargPack, interfaceTypes, ice);
}
ForceNormal forceNormal{&interfaceTypes};
for (auto& [name, tf] : moduleScope->exportedTypeBindings)
{
tf = clone(tf, interfaceTypes, cloneState);
if (FFlag::LuauLowerBoundsCalculation)
{
normalize(tf.type, interfaceTypes, ice);
if (FFlag::LuauNormalizeFlagIsConservative)
{
// We're about to freeze the memory. We know that the flag is conservative by design. Cyclic tables
// won't be marked normal. If the types aren't normal by now, they never will be.
forceNormal.traverse(tf.type);
}
}
}
for (TypeId ty : moduleScope->returnType)
@ -191,11 +147,12 @@ bool Module::clonePublicInterface(InternalErrorReporter& ice)
freeze(internalTypes);
freeze(interfaceTypes);
}
if (FFlag::LuauLosslessClone)
return false; // TODO: make function return void.
else
return cloneState.encounteredFreeType;
ScopePtr Module::getModuleScope() const
{
LUAU_ASSERT(!scopes.empty());
return scopes.front().second;
}
} // namespace Luau

View file

@ -14,7 +14,8 @@ LUAU_FASTFLAGVARIABLE(DebugLuauCopyBeforeNormalizing, false)
// This could theoretically be 2000 on amd64, but x86 requires this.
LUAU_FASTINTVARIABLE(LuauNormalizeIterationLimit, 1200);
LUAU_FASTFLAGVARIABLE(LuauNormalizeCombineTableFix, false);
LUAU_FASTFLAGVARIABLE(LuauNormalizeCombineIntersectionFix, false);
LUAU_FASTFLAGVARIABLE(LuauNormalizeFlagIsConservative, false);
LUAU_FASTFLAGVARIABLE(LuauNormalizeCombineEqFix, false);
namespace Luau
{
@ -261,8 +262,13 @@ static bool areNormal_(const T& t, const std::unordered_set<void*>& seen, Intern
if (count >= FInt::LuauNormalizeIterationLimit)
ice.ice("Luau::areNormal hit iteration limit");
// The follow is here because a bound type may not be normal, but the bound type is normal.
return ty->normal || follow(ty)->normal || seen.find(asMutable(ty)) != seen.end();
if (FFlag::LuauNormalizeFlagIsConservative)
return ty->normal;
else
{
// The follow is here because a bound type may not be normal, but the bound type is normal.
return ty->normal || follow(ty)->normal || seen.find(asMutable(ty)) != seen.end();
}
};
return std::all_of(begin(t), end(t), isNormal);
@ -304,38 +310,29 @@ static bool areNormal(TypePackId tp, const std::unordered_set<void*>& seen, Inte
++iterationLimit; \
} while (false)
struct Normalize
struct Normalize final : TypeVarVisitor
{
using TypeVarVisitor::Set;
Normalize(TypeArena& arena, InternalErrorReporter& ice)
: arena(arena)
, ice(ice)
{
}
TypeArena& arena;
InternalErrorReporter& ice;
// Debug data. Types being normalized are invalidated but trying to see what's going on is painful.
// To actually see the original type, read it by using the pointer of the type being normalized.
// e.g. in lldb, `e dump(originalTys[ty])`.
SeenTypes originalTys;
SeenTypePacks originalTps;
int iterationLimit = 0;
bool limitExceeded = false;
template<typename T>
bool operator()(TypePackId, const T&)
{
return true;
}
template<typename TID>
void cycle(TID)
{
}
bool operator()(TypeId ty, const FreeTypeVar&)
bool visit(TypeId ty, const FreeTypeVar&) override
{
LUAU_ASSERT(!ty->normal);
return false;
}
bool operator()(TypeId ty, const BoundTypeVar& btv, std::unordered_set<void*>& seen)
bool visit(TypeId ty, const BoundTypeVar& btv) override
{
// A type could be considered normal when it is in the stack, but we will eventually find out it is not normal as normalization progresses.
// So we need to avoid eagerly saying that this bound type is normal if the thing it is bound to is in the stack.
@ -349,13 +346,13 @@ struct Normalize
return !ty->normal;
}
bool operator()(TypeId ty, const PrimitiveTypeVar&)
bool visit(TypeId ty, const PrimitiveTypeVar&) override
{
LUAU_ASSERT(ty->normal);
return false;
}
bool operator()(TypeId ty, const GenericTypeVar&)
bool visit(TypeId ty, const GenericTypeVar&) override
{
if (!ty->normal)
asMutable(ty)->normal = true;
@ -363,14 +360,14 @@ struct Normalize
return false;
}
bool operator()(TypeId ty, const ErrorTypeVar&)
bool visit(TypeId ty, const ErrorTypeVar&) override
{
if (!ty->normal)
asMutable(ty)->normal = true;
return false;
}
bool operator()(TypeId ty, const ConstrainedTypeVar& ctvRef, std::unordered_set<void*>& seen)
bool visit(TypeId ty, const ConstrainedTypeVar& ctvRef) override
{
CHECK_ITERATION_LIMIT(false);
@ -380,7 +377,7 @@ struct Normalize
// We might transmute, so it's not safe to rely on the builtin traversal logic of visitTypeVar
for (TypeId part : parts)
visit_detail::visit(part, *this, seen);
traverse(part);
std::vector<TypeId> newParts = normalizeUnion(parts);
@ -396,22 +393,22 @@ struct Normalize
return false;
}
bool operator()(TypeId ty, const FunctionTypeVar& ftv, std::unordered_set<void*>& seen)
bool visit(TypeId ty, const FunctionTypeVar& ftv) override
{
CHECK_ITERATION_LIMIT(false);
if (ty->normal)
return false;
visit_detail::visit(ftv.argTypes, *this, seen);
visit_detail::visit(ftv.retType, *this, seen);
traverse(ftv.argTypes);
traverse(ftv.retType);
asMutable(ty)->normal = areNormal(ftv.argTypes, seen, ice) && areNormal(ftv.retType, seen, ice);
return false;
}
bool operator()(TypeId ty, const TableTypeVar& ttv, std::unordered_set<void*>& seen)
bool visit(TypeId ty, const TableTypeVar& ttv) override
{
CHECK_ITERATION_LIMIT(false);
@ -429,22 +426,22 @@ struct Normalize
if (ttv.boundTo)
{
visit_detail::visit(*ttv.boundTo, *this, seen);
traverse(*ttv.boundTo);
asMutable(ty)->normal = (*ttv.boundTo)->normal;
return false;
}
for (const auto& [_name, prop] : ttv.props)
{
visit_detail::visit(prop.type, *this, seen);
traverse(prop.type);
checkNormal(prop.type);
}
if (ttv.indexer)
{
visit_detail::visit(ttv.indexer->indexType, *this, seen);
traverse(ttv.indexer->indexType);
checkNormal(ttv.indexer->indexType);
visit_detail::visit(ttv.indexer->indexResultType, *this, seen);
traverse(ttv.indexer->indexResultType);
checkNormal(ttv.indexer->indexResultType);
}
@ -453,35 +450,35 @@ struct Normalize
return false;
}
bool operator()(TypeId ty, const MetatableTypeVar& mtv, std::unordered_set<void*>& seen)
bool visit(TypeId ty, const MetatableTypeVar& mtv) override
{
CHECK_ITERATION_LIMIT(false);
if (ty->normal)
return false;
visit_detail::visit(mtv.table, *this, seen);
visit_detail::visit(mtv.metatable, *this, seen);
traverse(mtv.table);
traverse(mtv.metatable);
asMutable(ty)->normal = mtv.table->normal && mtv.metatable->normal;
return false;
}
bool operator()(TypeId ty, const ClassTypeVar& ctv)
bool visit(TypeId ty, const ClassTypeVar& ctv) override
{
if (!ty->normal)
asMutable(ty)->normal = true;
return false;
}
bool operator()(TypeId ty, const AnyTypeVar&)
bool visit(TypeId ty, const AnyTypeVar&) override
{
LUAU_ASSERT(ty->normal);
return false;
}
bool operator()(TypeId ty, const UnionTypeVar& utvRef, std::unordered_set<void*>& seen)
bool visit(TypeId ty, const UnionTypeVar& utvRef) override
{
CHECK_ITERATION_LIMIT(false);
@ -493,7 +490,7 @@ struct Normalize
// We might transmute, so it's not safe to rely on the builtin traversal logic of visitTypeVar
for (TypeId option : options)
visit_detail::visit(option, *this, seen);
traverse(option);
std::vector<TypeId> newOptions = normalizeUnion(options);
@ -511,7 +508,7 @@ struct Normalize
return false;
}
bool operator()(TypeId ty, const IntersectionTypeVar& itvRef, std::unordered_set<void*>& seen)
bool visit(TypeId ty, const IntersectionTypeVar& itvRef) override
{
CHECK_ITERATION_LIMIT(false);
@ -523,7 +520,7 @@ struct Normalize
std::vector<TypeId> oldParts = std::move(itv->parts);
for (TypeId part : oldParts)
visit_detail::visit(part, *this, seen);
traverse(part);
std::vector<TypeId> tables;
for (TypeId part : oldParts)
@ -570,11 +567,6 @@ struct Normalize
return false;
}
bool operator()(TypeId ty, const LazyTypeVar&)
{
return false;
}
std::vector<TypeId> normalizeUnion(const std::vector<TypeId>& options)
{
if (options.size() == 1)
@ -638,7 +630,7 @@ struct Normalize
TypeId theTable = result->parts.back();
if (!get<TableTypeVar>(FFlag::LuauNormalizeCombineIntersectionFix ? follow(theTable) : theTable))
if (!get<TableTypeVar>(follow(theTable)))
{
result->parts.push_back(arena.addType(TableTypeVar{TableState::Sealed, TypeLevel{}}));
theTable = result->parts.back();
@ -738,6 +730,9 @@ struct Normalize
*/
TypeId combine(Replacer& replacer, TypeId a, TypeId b)
{
if (FFlag::LuauNormalizeCombineEqFix)
b = follow(b);
if (FFlag::LuauNormalizeCombineTableFix && a == b)
return a;
@ -756,7 +751,7 @@ struct Normalize
}
else if (auto ttv = getMutable<TableTypeVar>(a))
{
if (FFlag::LuauNormalizeCombineTableFix && !get<TableTypeVar>(follow(b)))
if (FFlag::LuauNormalizeCombineTableFix && !get<TableTypeVar>(FFlag::LuauNormalizeCombineEqFix ? b : follow(b)))
return arena.addType(IntersectionTypeVar{{a, b}});
combineIntoTable(replacer, ttv, b);
return a;
@ -778,9 +773,8 @@ std::pair<TypeId, bool> normalize(TypeId ty, TypeArena& arena, InternalErrorRepo
if (FFlag::DebugLuauCopyBeforeNormalizing)
(void)clone(ty, arena, state);
Normalize n{arena, ice, std::move(state.seenTypes), std::move(state.seenTypePacks)};
std::unordered_set<void*> seen;
visitTypeVar(ty, n, seen);
Normalize n{arena, ice};
n.traverse(ty);
return {ty, !n.limitExceeded};
}
@ -803,9 +797,8 @@ std::pair<TypePackId, bool> normalize(TypePackId tp, TypeArena& arena, InternalE
if (FFlag::DebugLuauCopyBeforeNormalizing)
(void)clone(tp, arena, state);
Normalize n{arena, ice, std::move(state.seenTypes), std::move(state.seenTypePacks)};
std::unordered_set<void*> seen;
visitTypeVar(tp, n, seen);
Normalize n{arena, ice};
n.traverse(tp);
return {tp, !n.limitExceeded};
}

View file

@ -4,12 +4,12 @@
#include "Luau/VisitTypeVar.h"
LUAU_FASTFLAG(LuauTypecheckOptPass)
LUAU_FASTFLAG(LuauAlwaysQuantify)
namespace Luau
{
struct Quantifier
struct Quantifier final : TypeVarOnceVisitor
{
TypeLevel level;
std::vector<TypeId> generics;
@ -17,26 +17,17 @@ struct Quantifier
bool seenGenericType = false;
bool seenMutableType = false;
Quantifier(TypeLevel level)
explicit Quantifier(TypeLevel level)
: level(level)
{
}
void cycle(TypeId) {}
void cycle(TypePackId) {}
void cycle(TypeId) override {}
void cycle(TypePackId) override {}
bool operator()(TypeId ty, const FreeTypeVar& ftv)
{
if (FFlag::LuauTypecheckOptPass)
seenMutableType = true;
if (!level.subsumes(ftv.level))
return false;
*asMutable(ty) = GenericTypeVar{level};
generics.push_back(ty);
return false;
return visit(ty, ftv);
}
template<typename T>
@ -56,24 +47,45 @@ struct Quantifier
return true;
}
bool operator()(TypeId ty, const TableTypeVar&)
bool operator()(TypeId ty, const TableTypeVar& ttv)
{
return visit(ty, ttv);
}
bool operator()(TypePackId tp, const FreeTypePack& ftp)
{
return visit(tp, ftp);
}
bool visit(TypeId ty, const FreeTypeVar& ftv) override
{
seenMutableType = true;
if (!level.subsumes(ftv.level))
return false;
*asMutable(ty) = GenericTypeVar{level};
generics.push_back(ty);
return false;
}
bool visit(TypeId ty, const TableTypeVar&) override
{
LUAU_ASSERT(getMutable<TableTypeVar>(ty));
TableTypeVar& ttv = *getMutable<TableTypeVar>(ty);
if (FFlag::LuauTypecheckOptPass)
{
if (ttv.state == TableState::Generic)
seenGenericType = true;
if (ttv.state == TableState::Generic)
seenGenericType = true;
if (ttv.state == TableState::Free)
seenMutableType = true;
}
if (ttv.state == TableState::Free)
seenMutableType = true;
if (ttv.state == TableState::Sealed || ttv.state == TableState::Generic)
return false;
if (!level.subsumes(ttv.level))
{
if (FFlag::LuauTypecheckOptPass && ttv.state == TableState::Unsealed)
if (ttv.state == TableState::Unsealed)
seenMutableType = true;
return false;
}
@ -81,9 +93,7 @@ struct Quantifier
if (ttv.state == TableState::Free)
{
ttv.state = TableState::Generic;
if (FFlag::LuauTypecheckOptPass)
seenGenericType = true;
seenGenericType = true;
}
else if (ttv.state == TableState::Unsealed)
ttv.state = TableState::Sealed;
@ -93,10 +103,9 @@ struct Quantifier
return true;
}
bool operator()(TypePackId tp, const FreeTypePack& ftp)
bool visit(TypePackId tp, const FreeTypePack& ftp) override
{
if (FFlag::LuauTypecheckOptPass)
seenMutableType = true;
seenMutableType = true;
if (!level.subsumes(ftp.level))
return false;
@ -110,15 +119,22 @@ struct Quantifier
void quantify(TypeId ty, TypeLevel level)
{
Quantifier q{level};
DenseHashSet<void*> seen{nullptr};
visitTypeVarOnce(ty, q, seen);
q.traverse(ty);
FunctionTypeVar* ftv = getMutable<FunctionTypeVar>(ty);
LUAU_ASSERT(ftv);
ftv->generics = q.generics;
ftv->genericPacks = q.genericPacks;
if (FFlag::LuauAlwaysQuantify)
{
ftv->generics.insert(ftv->generics.end(), q.generics.begin(), q.generics.end());
ftv->genericPacks.insert(ftv->genericPacks.end(), q.genericPacks.begin(), q.genericPacks.end());
}
else
{
ftv->generics = q.generics;
ftv->genericPacks = q.genericPacks;
}
if (FFlag::LuauTypecheckOptPass && ftv->generics.empty() && ftv->genericPacks.empty() && !q.seenMutableType && !q.seenGenericType)
if (ftv->generics.empty() && ftv->genericPacks.empty() && !q.seenMutableType && !q.seenGenericType)
ftv->hasNoGenerics = true;
}

View file

@ -8,36 +8,28 @@
#include <stdexcept>
LUAU_FASTFLAG(LuauLowerBoundsCalculation)
LUAU_FASTINTVARIABLE(LuauTarjanChildLimit, 1000)
LUAU_FASTFLAG(LuauTypecheckOptPass)
LUAU_FASTFLAGVARIABLE(LuauSubstituteFollowNewTypes, false)
LUAU_FASTFLAGVARIABLE(LuauSubstituteFollowPossibleMutations, false)
LUAU_FASTINTVARIABLE(LuauTarjanChildLimit, 10000)
LUAU_FASTFLAG(LuauNoMethodLocations)
namespace Luau
{
void Tarjan::visitChildren(TypeId ty, int index)
{
if (FFlag::LuauTypecheckOptPass)
LUAU_ASSERT(ty == log->follow(ty));
else
ty = log->follow(ty);
LUAU_ASSERT(ty == log->follow(ty));
if (ignoreChildren(ty))
return;
if (FFlag::LuauTypecheckOptPass)
{
if (auto pty = log->pending(ty))
ty = &pty->pending;
}
if (auto pty = log->pending(ty))
ty = &pty->pending;
if (const FunctionTypeVar* ftv = FFlag::LuauTypecheckOptPass ? get<FunctionTypeVar>(ty) : log->getMutable<FunctionTypeVar>(ty))
if (const FunctionTypeVar* ftv = get<FunctionTypeVar>(ty))
{
visitChild(ftv->argTypes);
visitChild(ftv->retType);
}
else if (const TableTypeVar* ttv = FFlag::LuauTypecheckOptPass ? get<TableTypeVar>(ty) : log->getMutable<TableTypeVar>(ty))
else if (const TableTypeVar* ttv = get<TableTypeVar>(ty))
{
LUAU_ASSERT(!ttv->boundTo);
for (const auto& [name, prop] : ttv->props)
@ -54,17 +46,17 @@ void Tarjan::visitChildren(TypeId ty, int index)
for (TypePackId itp : ttv->instantiatedTypePackParams)
visitChild(itp);
}
else if (const MetatableTypeVar* mtv = FFlag::LuauTypecheckOptPass ? get<MetatableTypeVar>(ty) : log->getMutable<MetatableTypeVar>(ty))
else if (const MetatableTypeVar* mtv = get<MetatableTypeVar>(ty))
{
visitChild(mtv->table);
visitChild(mtv->metatable);
}
else if (const UnionTypeVar* utv = FFlag::LuauTypecheckOptPass ? get<UnionTypeVar>(ty) : log->getMutable<UnionTypeVar>(ty))
else if (const UnionTypeVar* utv = get<UnionTypeVar>(ty))
{
for (TypeId opt : utv->options)
visitChild(opt);
}
else if (const IntersectionTypeVar* itv = FFlag::LuauTypecheckOptPass ? get<IntersectionTypeVar>(ty) : log->getMutable<IntersectionTypeVar>(ty))
else if (const IntersectionTypeVar* itv = get<IntersectionTypeVar>(ty))
{
for (TypeId part : itv->parts)
visitChild(part);
@ -78,28 +70,22 @@ void Tarjan::visitChildren(TypeId ty, int index)
void Tarjan::visitChildren(TypePackId tp, int index)
{
if (FFlag::LuauTypecheckOptPass)
LUAU_ASSERT(tp == log->follow(tp));
else
tp = log->follow(tp);
LUAU_ASSERT(tp == log->follow(tp));
if (ignoreChildren(tp))
return;
if (FFlag::LuauTypecheckOptPass)
{
if (auto ptp = log->pending(tp))
tp = &ptp->pending;
}
if (auto ptp = log->pending(tp))
tp = &ptp->pending;
if (const TypePack* tpp = FFlag::LuauTypecheckOptPass ? get<TypePack>(tp) : log->getMutable<TypePack>(tp))
if (const TypePack* tpp = get<TypePack>(tp))
{
for (TypeId tv : tpp->head)
visitChild(tv);
if (tpp->tail)
visitChild(*tpp->tail);
}
else if (const VariadicTypePack* vtp = FFlag::LuauTypecheckOptPass ? get<VariadicTypePack>(tp) : log->getMutable<VariadicTypePack>(tp))
else if (const VariadicTypePack* vtp = get<VariadicTypePack>(tp))
{
visitChild(vtp->ty);
}
@ -107,10 +93,7 @@ void Tarjan::visitChildren(TypePackId tp, int index)
std::pair<int, bool> Tarjan::indexify(TypeId ty)
{
if (FFlag::LuauTypecheckOptPass && !FFlag::LuauSubstituteFollowPossibleMutations)
LUAU_ASSERT(ty == log->follow(ty));
else
ty = log->follow(ty);
ty = log->follow(ty);
bool fresh = !typeToIndex.contains(ty);
int& index = typeToIndex[ty];
@ -128,10 +111,7 @@ std::pair<int, bool> Tarjan::indexify(TypeId ty)
std::pair<int, bool> Tarjan::indexify(TypePackId tp)
{
if (FFlag::LuauTypecheckOptPass && !FFlag::LuauSubstituteFollowPossibleMutations)
LUAU_ASSERT(tp == log->follow(tp));
else
tp = log->follow(tp);
tp = log->follow(tp);
bool fresh = !packToIndex.contains(tp);
int& index = packToIndex[tp];
@ -149,8 +129,7 @@ std::pair<int, bool> Tarjan::indexify(TypePackId tp)
void Tarjan::visitChild(TypeId ty)
{
if (!FFlag::LuauSubstituteFollowPossibleMutations)
ty = log->follow(ty);
ty = log->follow(ty);
edgesTy.push_back(ty);
edgesTp.push_back(nullptr);
@ -158,8 +137,7 @@ void Tarjan::visitChild(TypeId ty)
void Tarjan::visitChild(TypePackId tp)
{
if (!FFlag::LuauSubstituteFollowPossibleMutations)
tp = log->follow(tp);
tp = log->follow(tp);
edgesTy.push_back(nullptr);
edgesTp.push_back(tp);
@ -388,13 +366,10 @@ TypeId Substitution::clone(TypeId ty)
TypeId result = ty;
if (FFlag::LuauTypecheckOptPass)
{
if (auto pty = log->pending(ty))
ty = &pty->pending;
}
if (auto pty = log->pending(ty))
ty = &pty->pending;
if (const FunctionTypeVar* ftv = FFlag::LuauTypecheckOptPass ? get<FunctionTypeVar>(ty) : log->getMutable<FunctionTypeVar>(ty))
if (const FunctionTypeVar* ftv = get<FunctionTypeVar>(ty))
{
FunctionTypeVar clone = FunctionTypeVar{ftv->level, ftv->argTypes, ftv->retType, ftv->definition, ftv->hasSelf};
clone.generics = ftv->generics;
@ -404,11 +379,12 @@ TypeId Substitution::clone(TypeId ty)
clone.argNames = ftv->argNames;
result = addType(std::move(clone));
}
else if (const TableTypeVar* ttv = FFlag::LuauTypecheckOptPass ? get<TableTypeVar>(ty) : log->getMutable<TableTypeVar>(ty))
else if (const TableTypeVar* ttv = get<TableTypeVar>(ty))
{
LUAU_ASSERT(!ttv->boundTo);
TableTypeVar clone = TableTypeVar{ttv->props, ttv->indexer, ttv->level, ttv->state};
clone.methodDefinitionLocations = ttv->methodDefinitionLocations;
if (!FFlag::LuauNoMethodLocations)
clone.methodDefinitionLocations = ttv->methodDefinitionLocations;
clone.definitionModuleName = ttv->definitionModuleName;
clone.name = ttv->name;
clone.syntheticName = ttv->syntheticName;
@ -417,19 +393,19 @@ TypeId Substitution::clone(TypeId ty)
clone.tags = ttv->tags;
result = addType(std::move(clone));
}
else if (const MetatableTypeVar* mtv = FFlag::LuauTypecheckOptPass ? get<MetatableTypeVar>(ty) : log->getMutable<MetatableTypeVar>(ty))
else if (const MetatableTypeVar* mtv = get<MetatableTypeVar>(ty))
{
MetatableTypeVar clone = MetatableTypeVar{mtv->table, mtv->metatable};
clone.syntheticName = mtv->syntheticName;
result = addType(std::move(clone));
}
else if (const UnionTypeVar* utv = FFlag::LuauTypecheckOptPass ? get<UnionTypeVar>(ty) : log->getMutable<UnionTypeVar>(ty))
else if (const UnionTypeVar* utv = get<UnionTypeVar>(ty))
{
UnionTypeVar clone;
clone.options = utv->options;
result = addType(std::move(clone));
}
else if (const IntersectionTypeVar* itv = FFlag::LuauTypecheckOptPass ? get<IntersectionTypeVar>(ty) : log->getMutable<IntersectionTypeVar>(ty))
else if (const IntersectionTypeVar* itv = get<IntersectionTypeVar>(ty))
{
IntersectionTypeVar clone;
clone.parts = itv->parts;
@ -449,20 +425,17 @@ TypePackId Substitution::clone(TypePackId tp)
{
tp = log->follow(tp);
if (FFlag::LuauTypecheckOptPass)
{
if (auto ptp = log->pending(tp))
tp = &ptp->pending;
}
if (auto ptp = log->pending(tp))
tp = &ptp->pending;
if (const TypePack* tpp = FFlag::LuauTypecheckOptPass ? get<TypePack>(tp) : log->getMutable<TypePack>(tp))
if (const TypePack* tpp = get<TypePack>(tp))
{
TypePack clone;
clone.head = tpp->head;
clone.tail = tpp->tail;
return addTypePack(std::move(clone));
}
else if (const VariadicTypePack* vtp = FFlag::LuauTypecheckOptPass ? get<VariadicTypePack>(tp) : log->getMutable<VariadicTypePack>(tp))
else if (const VariadicTypePack* vtp = get<VariadicTypePack>(tp))
{
VariadicTypePack clone;
clone.ty = vtp->ty;
@ -474,28 +447,22 @@ TypePackId Substitution::clone(TypePackId tp)
void Substitution::foundDirty(TypeId ty)
{
if (FFlag::LuauTypecheckOptPass && !FFlag::LuauSubstituteFollowPossibleMutations)
LUAU_ASSERT(ty == log->follow(ty));
else
ty = log->follow(ty);
ty = log->follow(ty);
if (isDirty(ty))
newTypes[ty] = FFlag::LuauSubstituteFollowNewTypes ? follow(clean(ty)) : clean(ty);
newTypes[ty] = follow(clean(ty));
else
newTypes[ty] = FFlag::LuauSubstituteFollowNewTypes ? follow(clone(ty)) : clone(ty);
newTypes[ty] = follow(clone(ty));
}
void Substitution::foundDirty(TypePackId tp)
{
if (FFlag::LuauTypecheckOptPass && !FFlag::LuauSubstituteFollowPossibleMutations)
LUAU_ASSERT(tp == log->follow(tp));
else
tp = log->follow(tp);
tp = log->follow(tp);
if (isDirty(tp))
newPacks[tp] = FFlag::LuauSubstituteFollowNewTypes ? follow(clean(tp)) : clean(tp);
newPacks[tp] = follow(clean(tp));
else
newPacks[tp] = FFlag::LuauSubstituteFollowNewTypes ? follow(clone(tp)) : clone(tp);
newPacks[tp] = follow(clone(tp));
}
TypeId Substitution::replace(TypeId ty)
@ -523,10 +490,7 @@ void Substitution::replaceChildren(TypeId ty)
if (BoundTypeVar* btv = log->getMutable<BoundTypeVar>(ty); FFlag::LuauLowerBoundsCalculation && btv)
btv->boundTo = replace(btv->boundTo);
if (FFlag::LuauTypecheckOptPass)
LUAU_ASSERT(ty == log->follow(ty));
else
ty = log->follow(ty);
LUAU_ASSERT(ty == log->follow(ty));
if (ignoreChildren(ty))
return;
@ -577,10 +541,7 @@ void Substitution::replaceChildren(TypeId ty)
void Substitution::replaceChildren(TypePackId tp)
{
if (FFlag::LuauTypecheckOptPass)
LUAU_ASSERT(tp == log->follow(tp));
else
tp = log->follow(tp);
LUAU_ASSERT(tp == log->follow(tp));
if (ignoreChildren(tp))
return;

View file

@ -26,7 +26,7 @@ namespace Luau
namespace
{
struct FindCyclicTypes
struct FindCyclicTypes final : TypeVarVisitor
{
FindCyclicTypes() = default;
FindCyclicTypes(const FindCyclicTypes&) = delete;
@ -38,25 +38,27 @@ struct FindCyclicTypes
std::set<TypeId> cycles;
std::set<TypePackId> cycleTPs;
void cycle(TypeId ty)
void cycle(TypeId ty) override
{
cycles.insert(ty);
}
void cycle(TypePackId tp)
void cycle(TypePackId tp) override
{
cycleTPs.insert(tp);
}
template<typename T>
bool operator()(TypeId ty, const T&)
bool visit(TypeId ty) override
{
return visited.insert(ty).second;
}
bool operator()(TypeId ty, const TableTypeVar& ttv) = delete;
bool visit(TypePackId tp) override
{
return visitedPacks.insert(tp).second;
}
bool operator()(TypeId ty, const TableTypeVar& ttv, std::unordered_set<void*>& seen)
bool visit(TypeId ty, const TableTypeVar& ttv) override
{
if (!visited.insert(ty).second)
return false;
@ -64,10 +66,10 @@ struct FindCyclicTypes
if (ttv.name || ttv.syntheticName)
{
for (TypeId itp : ttv.instantiatedTypeParams)
visitTypeVar(itp, *this, seen);
traverse(itp);
for (TypePackId itp : ttv.instantiatedTypePackParams)
visitTypeVar(itp, *this, seen);
traverse(itp);
return exhaustive;
}
@ -75,16 +77,10 @@ struct FindCyclicTypes
return true;
}
bool operator()(TypeId, const ClassTypeVar&)
bool visit(TypeId ty, const ClassTypeVar&) override
{
return false;
}
template<typename T>
bool operator()(TypePackId tp, const T&)
{
return visitedPacks.insert(tp).second;
}
};
template<typename TID>
@ -92,7 +88,7 @@ void findCyclicTypes(std::set<TypeId>& cycles, std::set<TypePackId>& cycleTPs, T
{
FindCyclicTypes fct;
fct.exhaustive = exhaustive;
visitTypeVar(ty, fct);
fct.traverse(ty);
cycles = std::move(fct.cycles);
cycleTPs = std::move(fct.cycleTPs);
@ -183,6 +179,8 @@ struct StringifierState
return generateName(s);
}
int previousNameIndex = 0;
std::string getName(TypePackId ty)
{
const size_t s = result.nameMap.typePacks.size();
@ -192,9 +190,10 @@ struct StringifierState
for (int count = 0; count < 256; ++count)
{
std::string candidate = generateName(usedNames.size() + count);
std::string candidate = generateName(previousNameIndex + count);
if (!usedNames.count(candidate))
{
previousNameIndex += count;
usedNames.insert(candidate);
n = candidate;
return candidate;
@ -212,6 +211,13 @@ struct StringifierState
result.name += s;
}
void emit(TypeLevel level)
{
emit(std::to_string(level.level));
emit("-");
emit(std::to_string(level.subLevel));
}
void emit(const char* s)
{
if (opts.maxTypeLength > 0 && result.name.length() > opts.maxTypeLength)
@ -343,7 +349,7 @@ struct TypeVarStringifier
if (FFlag::DebugLuauVerboseTypeNames)
{
state.emit("-");
state.emit(std::to_string(ftv.level.level));
state.emit(ftv.level);
}
}
@ -356,6 +362,7 @@ struct TypeVarStringifier
{
if (gtv.explicitName)
{
state.usedNames.insert(gtv.name);
state.result.nameMap.typeVars[ty] = gtv.name;
state.emit(gtv.name);
}
@ -367,7 +374,10 @@ struct TypeVarStringifier
{
state.result.invalid = true;
state.emit("[[");
state.emit("[");
if (FFlag::DebugLuauVerboseTypeNames)
state.emit(ctv.level);
state.emit("[");
bool first = true;
for (TypeId ty : ctv.parts)
@ -699,7 +709,10 @@ struct TypeVarStringifier
for (std::string& ss : results)
{
if (!first)
state.emit(" | ");
{
state.newline();
state.emit("| ");
}
state.emit(ss);
first = false;
}
@ -752,7 +765,10 @@ struct TypeVarStringifier
for (std::string& ss : results)
{
if (!first)
state.emit(" & ");
{
state.newline();
state.emit("& ");
}
state.emit(ss);
first = false;
}
@ -891,6 +907,7 @@ struct TypePackStringifier
state.emit("gen-");
if (pack.explicitName)
{
state.usedNames.insert(pack.name);
state.result.nameMap.typePacks[tp] = pack.name;
state.emit(pack.name);
}
@ -911,7 +928,7 @@ struct TypePackStringifier
if (FFlag::DebugLuauVerboseTypeNames)
{
state.emit("-");
state.emit(std::to_string(pack.level.level));
state.emit(pack.level);
}
state.emit("...");
@ -1184,6 +1201,14 @@ std::string toStringNamedFunction(const std::string& funcName, const FunctionTyp
size_t idx = 0;
while (argPackIter != end(ftv.argTypes))
{
// ftv takes a self parameter as the first argument, skip it if specified in option
if (idx == 0 && ftv.hasSelf && opts.hideFunctionSelfArgument)
{
++argPackIter;
++idx;
continue;
}
if (!first)
state.emit(", ");
first = false;

View file

@ -7,9 +7,6 @@
#include <algorithm>
#include <stdexcept>
LUAU_FASTFLAGVARIABLE(LuauTxnLogPreserveOwner, false)
LUAU_FASTFLAGVARIABLE(LuauJustOneCallFrameForHaveSeen, false)
namespace Luau
{
@ -81,31 +78,20 @@ void TxnLog::concat(TxnLog rhs)
void TxnLog::commit()
{
if (FFlag::LuauTxnLogPreserveOwner)
for (auto& [ty, rep] : typeVarChanges)
{
for (auto& [ty, rep] : typeVarChanges)
{
TypeArena* owningArena = ty->owningArena;
TypeVar* mtv = asMutable(ty);
*mtv = rep.get()->pending;
mtv->owningArena = owningArena;
}
for (auto& [tp, rep] : typePackChanges)
{
TypeArena* owningArena = tp->owningArena;
TypePackVar* mpv = asMutable(tp);
*mpv = rep.get()->pending;
mpv->owningArena = owningArena;
}
TypeArena* owningArena = ty->owningArena;
TypeVar* mtv = asMutable(ty);
*mtv = rep.get()->pending;
mtv->owningArena = owningArena;
}
else
{
for (auto& [ty, rep] : typeVarChanges)
*asMutable(ty) = rep.get()->pending;
for (auto& [tp, rep] : typePackChanges)
*asMutable(tp) = rep.get()->pending;
for (auto& [tp, rep] : typePackChanges)
{
TypeArena* owningArena = tp->owningArena;
TypePackVar* mpv = asMutable(tp);
*mpv = rep.get()->pending;
mpv->owningArena = owningArena;
}
clear();
@ -162,37 +148,13 @@ void TxnLog::popSeen(TypePackId lhs, TypePackId rhs)
bool TxnLog::haveSeen(TypeOrPackId lhs, TypeOrPackId rhs) const
{
if (FFlag::LuauJustOneCallFrameForHaveSeen && !FFlag::LuauTypecheckOptPass)
const std::pair<TypeOrPackId, TypeOrPackId> sortedPair = (lhs > rhs) ? std::make_pair(lhs, rhs) : std::make_pair(rhs, lhs);
if (sharedSeen->end() != std::find(sharedSeen->begin(), sharedSeen->end(), sortedPair))
{
// This function will technically work if `this` is nullptr, but this
// indicates a bug, so we explicitly assert.
LUAU_ASSERT(static_cast<const void*>(this) != nullptr);
const std::pair<TypeOrPackId, TypeOrPackId> sortedPair = (lhs > rhs) ? std::make_pair(lhs, rhs) : std::make_pair(rhs, lhs);
for (const TxnLog* current = this; current; current = current->parent)
{
if (current->sharedSeen->end() != std::find(current->sharedSeen->begin(), current->sharedSeen->end(), sortedPair))
return true;
}
return false;
return true;
}
else
{
const std::pair<TypeOrPackId, TypeOrPackId> sortedPair = (lhs > rhs) ? std::make_pair(lhs, rhs) : std::make_pair(rhs, lhs);
if (sharedSeen->end() != std::find(sharedSeen->begin(), sharedSeen->end(), sortedPair))
{
return true;
}
if (!FFlag::LuauTypecheckOptPass && parent)
{
return parent->haveSeen(lhs, rhs);
}
return false;
}
return false;
}
void TxnLog::pushSeen(TypeOrPackId lhs, TypeOrPackId rhs)

View file

@ -0,0 +1,88 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/TypeArena.h"
LUAU_FASTFLAGVARIABLE(DebugLuauFreezeArena, false);
namespace Luau
{
void TypeArena::clear()
{
typeVars.clear();
typePacks.clear();
}
TypeId TypeArena::addTV(TypeVar&& tv)
{
TypeId allocated = typeVars.allocate(std::move(tv));
asMutable(allocated)->owningArena = this;
return allocated;
}
TypeId TypeArena::freshType(TypeLevel level)
{
TypeId allocated = typeVars.allocate(FreeTypeVar{level});
asMutable(allocated)->owningArena = this;
return allocated;
}
TypePackId TypeArena::addTypePack(std::initializer_list<TypeId> types)
{
TypePackId allocated = typePacks.allocate(TypePack{std::move(types)});
asMutable(allocated)->owningArena = this;
return allocated;
}
TypePackId TypeArena::addTypePack(std::vector<TypeId> types)
{
TypePackId allocated = typePacks.allocate(TypePack{std::move(types)});
asMutable(allocated)->owningArena = this;
return allocated;
}
TypePackId TypeArena::addTypePack(TypePack tp)
{
TypePackId allocated = typePacks.allocate(std::move(tp));
asMutable(allocated)->owningArena = this;
return allocated;
}
TypePackId TypeArena::addTypePack(TypePackVar tp)
{
TypePackId allocated = typePacks.allocate(std::move(tp));
asMutable(allocated)->owningArena = this;
return allocated;
}
void freeze(TypeArena& arena)
{
if (!FFlag::DebugLuauFreezeArena)
return;
arena.typeVars.freeze();
arena.typePacks.freeze();
}
void unfreeze(TypeArena& arena)
{
if (!FFlag::DebugLuauFreezeArena)
return;
arena.typeVars.unfreeze();
arena.typePacks.unfreeze();
}
} // namespace Luau

File diff suppressed because it is too large Load diff

View file

@ -5,8 +5,6 @@
#include "Luau/ToString.h"
#include "Luau/TypeInfer.h"
LUAU_FASTFLAGVARIABLE(LuauTerminateCyclicMetatableIndexLookup, false)
namespace Luau
{
@ -55,13 +53,10 @@ std::optional<TypeId> findTablePropertyRespectingMeta(ErrorVec& errors, TypeId t
{
TypeId index = follow(*mtIndex);
if (FFlag::LuauTerminateCyclicMetatableIndexLookup)
{
if (count >= 100)
return std::nullopt;
if (count >= 100)
return std::nullopt;
++count;
}
++count;
if (const auto& itt = getTableType(index))
{

View file

@ -23,10 +23,7 @@ LUAU_FASTFLAG(DebugLuauFreezeArena)
LUAU_FASTINTVARIABLE(LuauTypeMaximumStringifierLength, 500)
LUAU_FASTINTVARIABLE(LuauTableTypeMaximumStringifierLength, 0)
LUAU_FASTINT(LuauTypeInferRecursionLimit)
LUAU_FASTFLAG(LuauErrorRecoveryType)
LUAU_FASTFLAG(LuauSubtypingAddOptPropsToUnsealedTables)
LUAU_FASTFLAG(LuauDiscriminableUnions2)
LUAU_FASTFLAGVARIABLE(LuauAnyInIsOptionalIsOptional, false)
LUAU_FASTFLAGVARIABLE(LuauClassDefinitionModuleInError, false)
namespace Luau
@ -205,14 +202,14 @@ bool isOptional(TypeId ty)
ty = follow(ty);
if (FFlag::LuauAnyInIsOptionalIsOptional && get<AnyTypeVar>(ty))
if (get<AnyTypeVar>(ty))
return true;
auto utv = get<UnionTypeVar>(ty);
if (!utv)
return false;
return std::any_of(begin(utv), end(utv), FFlag::LuauAnyInIsOptionalIsOptional ? isOptional : isNil);
return std::any_of(begin(utv), end(utv), isOptional);
}
bool isTableIntersection(TypeId ty)
@ -379,8 +376,7 @@ bool hasLength(TypeId ty, DenseHashSet<TypeId>& seen, int* recursionCount)
if (seen.contains(ty))
return true;
bool isStr = FFlag::LuauDiscriminableUnions2 ? isString(ty) : isPrim(ty, PrimitiveTypeVar::String);
if (isStr || get<AnyTypeVar>(ty) || get<TableTypeVar>(ty) || get<MetatableTypeVar>(ty))
if (isString(ty) || get<AnyTypeVar>(ty) || get<TableTypeVar>(ty) || get<MetatableTypeVar>(ty))
return true;
if (auto uty = get<UnionTypeVar>(ty))
@ -775,18 +771,12 @@ TypePackId SingletonTypes::errorRecoveryTypePack()
TypeId SingletonTypes::errorRecoveryType(TypeId guess)
{
if (FFlag::LuauErrorRecoveryType)
return guess;
else
return &errorType_;
return guess;
}
TypePackId SingletonTypes::errorRecoveryTypePack(TypePackId guess)
{
if (FFlag::LuauErrorRecoveryType)
return guess;
else
return &errorTypePack_;
return guess;
}
SingletonTypes& getSingletonTypes()

View file

@ -21,19 +21,12 @@ LUAU_FASTFLAGVARIABLE(LuauTableSubtypingVariance2, false);
LUAU_FASTFLAG(LuauLowerBoundsCalculation);
LUAU_FASTFLAG(LuauErrorRecoveryType);
LUAU_FASTFLAGVARIABLE(LuauSubtypingAddOptPropsToUnsealedTables, false)
LUAU_FASTFLAGVARIABLE(LuauWidenIfSupertypeIsFree2, false)
LUAU_FASTFLAGVARIABLE(LuauDifferentOrderOfUnificationDoesntMatter, false)
LUAU_FASTFLAGVARIABLE(LuauTxnLogSeesTypePacks2, false)
LUAU_FASTFLAGVARIABLE(LuauTxnLogCheckForInvalidation, false)
LUAU_FASTFLAGVARIABLE(LuauTxnLogRefreshFunctionPointers, false)
LUAU_FASTFLAGVARIABLE(LuauTxnLogDontRetryForIndexers, false)
LUAU_FASTFLAG(LuauAnyInIsOptionalIsOptional)
LUAU_FASTFLAG(LuauTypecheckOptPass)
namespace Luau
{
struct PromoteTypeLevels
struct PromoteTypeLevels final : TypeVarOnceVisitor
{
TxnLog& log;
const TypeArena* typeArena = nullptr;
@ -56,13 +49,34 @@ struct PromoteTypeLevels
}
}
// TODO cycle and operator() need to be clipped when FFlagLuauUseVisitRecursionLimit is clipped
template<typename TID>
void cycle(TID)
{
}
template<typename TID, typename T>
bool operator()(TID ty, const T&)
{
return visit(ty);
}
bool operator()(TypeId ty, const FreeTypeVar& ftv)
{
return visit(ty, ftv);
}
bool operator()(TypeId ty, const FunctionTypeVar& ftv)
{
return visit(ty, ftv);
}
bool operator()(TypeId ty, const TableTypeVar& ttv)
{
return visit(ty, ttv);
}
bool operator()(TypePackId tp, const FreeTypePack& ftp)
{
return visit(tp, ftp);
}
bool visit(TypeId ty) override
{
// Type levels of types from other modules are already global, so we don't need to promote anything inside
if (ty->owningArena != typeArena)
@ -71,7 +85,16 @@ struct PromoteTypeLevels
return true;
}
bool operator()(TypeId ty, const FreeTypeVar&)
bool visit(TypePackId tp) override
{
// Type levels of types from other modules are already global, so we don't need to promote anything inside
if (tp->owningArena != typeArena)
return false;
return true;
}
bool visit(TypeId ty, const FreeTypeVar&) override
{
// Surprise, it's actually a BoundTypeVar that hasn't been committed yet.
// Calling getMutable on this will trigger an assertion.
@ -82,7 +105,7 @@ struct PromoteTypeLevels
return true;
}
bool operator()(TypeId ty, const FunctionTypeVar&)
bool visit(TypeId ty, const FunctionTypeVar&) override
{
// Type levels of types from other modules are already global, so we don't need to promote anything inside
if (ty->owningArena != typeArena)
@ -92,7 +115,7 @@ struct PromoteTypeLevels
return true;
}
bool operator()(TypeId ty, const TableTypeVar& ttv)
bool visit(TypeId ty, const TableTypeVar& ttv) override
{
// Type levels of types from other modules are already global, so we don't need to promote anything inside
if (ty->owningArena != typeArena)
@ -105,7 +128,7 @@ struct PromoteTypeLevels
return true;
}
bool operator()(TypePackId tp, const FreeTypePack&)
bool visit(TypePackId tp, const FreeTypePack&) override
{
// Surprise, it's actually a BoundTypePack that hasn't been committed yet.
// Calling getMutable on this will trigger an assertion.
@ -124,8 +147,7 @@ static void promoteTypeLevels(TxnLog& log, const TypeArena* typeArena, TypeLevel
return;
PromoteTypeLevels ptl{log, typeArena, minLevel};
DenseHashSet<void*> seen{nullptr};
visitTypeVarOnce(ty, ptl, seen);
ptl.traverse(ty);
}
void promoteTypeLevels(TxnLog& log, const TypeArena* typeArena, TypeLevel minLevel, TypePackId tp)
@ -135,11 +157,10 @@ void promoteTypeLevels(TxnLog& log, const TypeArena* typeArena, TypeLevel minLev
return;
PromoteTypeLevels ptl{log, typeArena, minLevel};
DenseHashSet<void*> seen{nullptr};
visitTypeVarOnce(tp, ptl, seen);
ptl.traverse(tp);
}
struct SkipCacheForType
struct SkipCacheForType final : TypeVarOnceVisitor
{
SkipCacheForType(const DenseHashMap<TypeId, bool>& skipCacheForType, const TypeArena* typeArena)
: skipCacheForType(skipCacheForType)
@ -147,28 +168,25 @@ struct SkipCacheForType
{
}
void cycle(TypeId) {}
void cycle(TypePackId) {}
bool operator()(TypeId ty, const FreeTypeVar& ftv)
bool visit(TypeId, const FreeTypeVar&) override
{
result = true;
return false;
}
bool operator()(TypeId ty, const BoundTypeVar& btv)
bool visit(TypeId, const BoundTypeVar&) override
{
result = true;
return false;
}
bool operator()(TypeId ty, const GenericTypeVar& btv)
bool visit(TypeId, const GenericTypeVar&) override
{
result = true;
return false;
}
bool operator()(TypeId ty, const TableTypeVar&)
bool visit(TypeId ty, const TableTypeVar&) override
{
// Types from other modules don't contain mutable elements and are ok to cache
if (ty->owningArena != typeArena)
@ -191,8 +209,7 @@ struct SkipCacheForType
return true;
}
template<typename T>
bool operator()(TypeId ty, const T& t)
bool visit(TypeId ty) override
{
// Types from other modules don't contain mutable elements and are ok to cache
if (ty->owningArena != typeArena)
@ -209,8 +226,7 @@ struct SkipCacheForType
return true;
}
template<typename T>
bool operator()(TypePackId tp, const T&)
bool visit(TypePackId tp) override
{
// Types from other modules don't contain mutable elements and are ok to cache
if (tp->owningArena != typeArena)
@ -219,19 +235,19 @@ struct SkipCacheForType
return true;
}
bool operator()(TypePackId tp, const FreeTypePack& ftp)
bool visit(TypePackId tp, const FreeTypePack&) override
{
result = true;
return false;
}
bool operator()(TypePackId tp, const BoundTypePack& ftp)
bool visit(TypePackId tp, const BoundTypePack&) override
{
result = true;
return false;
}
bool operator()(TypePackId tp, const GenericTypePack& ftp)
bool visit(TypePackId tp, const GenericTypePack&) override
{
result = true;
return false;
@ -278,6 +294,16 @@ bool Widen::ignoreChildren(TypeId ty)
return !log->is<UnionTypeVar>(ty);
}
TypeId Widen::operator()(TypeId ty)
{
return substitute(ty).value_or(ty);
}
TypePackId Widen::operator()(TypePackId tp)
{
return substitute(tp).value_or(tp);
}
static std::optional<TypeError> hasUnificationTooComplex(const ErrorVec& errors)
{
auto isUnificationTooComplex = [](const TypeError& te) {
@ -317,19 +343,6 @@ Unifier::Unifier(TypeArena* types, Mode mode, const Location& location, Variance
LUAU_ASSERT(sharedState.iceHandler);
}
Unifier::Unifier(TypeArena* types, Mode mode, std::vector<std::pair<TypeOrPackId, TypeOrPackId>>* sharedSeen, const Location& location,
Variance variance, UnifierSharedState& sharedState, TxnLog* parentLog)
: types(types)
, mode(mode)
, log(parentLog, sharedSeen)
, location(location)
, variance(variance)
, sharedState(sharedState)
{
LUAU_ASSERT(!FFlag::LuauTypecheckOptPass);
LUAU_ASSERT(sharedState.iceHandler);
}
void Unifier::tryUnify(TypeId subTy, TypeId superTy, bool isFunctionCall, bool isIntersection)
{
sharedState.counters.iterationCount = 0;
@ -425,6 +438,8 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
if (!occursFailed)
{
promoteTypeLevels(log, types, superLevel, subTy);
Widen widen{types};
log.replace(superTy, BoundTypeVar(widen(subTy)));
}
@ -562,9 +577,6 @@ void Unifier::tryUnifyUnionWithType(TypeId subTy, const UnionTypeVar* uv, TypeId
std::optional<TypeError> unificationTooComplex;
std::optional<TypeError> firstFailedOption;
size_t count = uv->options.size();
size_t i = 0;
for (TypeId type : uv->options)
{
Unifier innerState = makeChildUnifier();
@ -580,52 +592,44 @@ void Unifier::tryUnifyUnionWithType(TypeId subTy, const UnionTypeVar* uv, TypeId
failed = true;
}
if (FFlag::LuauDifferentOrderOfUnificationDoesntMatter)
{
}
else
{
if (i == count - 1)
{
log.concat(std::move(innerState.log));
}
++i;
}
}
// even if A | B <: T fails, we want to bind some options of T with A | B iff A | B was a subtype of that option.
if (FFlag::LuauDifferentOrderOfUnificationDoesntMatter)
{
auto tryBind = [this, subTy](TypeId superOption) {
superOption = log.follow(superOption);
auto tryBind = [this, subTy](TypeId superOption) {
superOption = log.follow(superOption);
// just skip if the superOption is not free-ish.
auto ttv = log.getMutable<TableTypeVar>(superOption);
if (!log.is<FreeTypeVar>(superOption) && (!ttv || ttv->state != TableState::Free))
return;
// just skip if the superOption is not free-ish.
auto ttv = log.getMutable<TableTypeVar>(superOption);
if (!log.is<FreeTypeVar>(superOption) && (!ttv || ttv->state != TableState::Free))
return;
// Since we have already checked if S <: T, checking it again will not queue up the type for replacement.
// So we'll have to do it ourselves. We assume they unified cleanly if they are still in the seen set.
if (log.haveSeen(subTy, superOption))
{
// TODO: would it be nice for TxnLog::replace to do this?
if (log.is<TableTypeVar>(superOption))
log.bindTable(superOption, subTy);
else
log.replace(superOption, *subTy);
}
};
if (auto utv = log.getMutable<UnionTypeVar>(superTy))
// If superOption is already present in subTy, do nothing. Nothing new has been learned, but the subtype
// test is successful.
if (auto subUnion = get<UnionTypeVar>(subTy))
{
for (TypeId ty : utv)
tryBind(ty);
if (end(subUnion) != std::find(begin(subUnion), end(subUnion), superOption))
return;
}
else
tryBind(superTy);
// Since we have already checked if S <: T, checking it again will not queue up the type for replacement.
// So we'll have to do it ourselves. We assume they unified cleanly if they are still in the seen set.
if (log.haveSeen(subTy, superOption))
{
// TODO: would it be nice for TxnLog::replace to do this?
if (log.is<TableTypeVar>(superOption))
log.bindTable(superOption, subTy);
else
log.replace(superOption, *subTy);
}
};
if (auto utv = log.getMutable<UnionTypeVar>(superTy))
{
for (TypeId ty : utv)
tryBind(ty);
}
else
tryBind(superTy);
if (unificationTooComplex)
reportError(*unificationTooComplex);
@ -825,7 +829,7 @@ bool Unifier::canCacheResult(TypeId subTy, TypeId superTy)
auto skipCacheFor = [this](TypeId ty) {
SkipCacheForType visitor{sharedState.skipCacheForType, types};
visitTypeVarOnce(ty, visitor, sharedState.seenAny);
visitor.traverse(ty);
sharedState.skipCacheForType[ty] = visitor.result;
@ -1021,7 +1025,7 @@ void Unifier::tryUnify_(TypePackId subTp, TypePackId superTp, bool isFunctionCal
if (superTp == subTp)
return;
if (FFlag::LuauTxnLogSeesTypePacks2 && log.haveSeen(superTp, subTp))
if (log.haveSeen(superTp, subTp))
return;
if (log.getMutable<Unifiable::Free>(superTp))
@ -1030,6 +1034,7 @@ void Unifier::tryUnify_(TypePackId subTp, TypePackId superTp, bool isFunctionCal
if (!log.getMutable<ErrorTypeVar>(superTp))
{
Widen widen{types};
log.replace(superTp, Unifiable::Bound<TypePackId>(widen(subTp)));
}
}
@ -1146,14 +1151,6 @@ void Unifier::tryUnify_(TypePackId subTp, TypePackId superTp, bool isFunctionCal
continue;
}
// In nonstrict mode, any also marks an optional argument.
else if (!FFlag::LuauAnyInIsOptionalIsOptional && superIter.good() && isNonstrictMode() &&
log.getMutable<AnyTypeVar>(log.follow(*superIter)))
{
superIter.advance();
continue;
}
if (log.getMutable<VariadicTypePack>(superIter.packId))
{
tryUnifyVariadics(subIter.packId, superIter.packId, false, int(subIter.index));
@ -1265,12 +1262,9 @@ void Unifier::tryUnifyFunctions(TypeId subTy, TypeId superTy, bool isFunctionCal
log.pushSeen(superFunction->generics[i], subFunction->generics[i]);
}
if (FFlag::LuauTxnLogSeesTypePacks2)
for (size_t i = 0; i < numGenericPacks; i++)
{
for (size_t i = 0; i < numGenericPacks; i++)
{
log.pushSeen(superFunction->genericPacks[i], subFunction->genericPacks[i]);
}
log.pushSeen(superFunction->genericPacks[i], subFunction->genericPacks[i]);
}
CountMismatch::Context context = ctx;
@ -1330,12 +1324,9 @@ void Unifier::tryUnifyFunctions(TypeId subTy, TypeId superTy, bool isFunctionCal
ctx = context;
if (FFlag::LuauTxnLogSeesTypePacks2)
for (int i = int(numGenericPacks) - 1; 0 <= i; i--)
{
for (int i = int(numGenericPacks) - 1; 0 <= i; i--)
{
log.popSeen(superFunction->genericPacks[i], subFunction->genericPacks[i]);
}
log.popSeen(superFunction->genericPacks[i], subFunction->genericPacks[i]);
}
for (int i = int(numGenerics) - 1; 0 <= i; i--)
@ -1387,21 +1378,9 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
{
auto subIter = subTable->props.find(propName);
if (FFlag::LuauAnyInIsOptionalIsOptional)
{
if (subIter == subTable->props.end() &&
(!FFlag::LuauSubtypingAddOptPropsToUnsealedTables || subTable->state == TableState::Unsealed) && !isOptional(superProp.type))
missingProperties.push_back(propName);
}
else
{
bool isAny = log.getMutable<AnyTypeVar>(log.follow(superProp.type));
if (subIter == subTable->props.end() &&
(!FFlag::LuauSubtypingAddOptPropsToUnsealedTables || subTable->state == TableState::Unsealed) && !isOptional(superProp.type) &&
!isAny)
missingProperties.push_back(propName);
}
if (subIter == subTable->props.end() && (!FFlag::LuauSubtypingAddOptPropsToUnsealedTables || subTable->state == TableState::Unsealed) &&
!isOptional(superProp.type))
missingProperties.push_back(propName);
}
if (!missingProperties.empty())
@ -1418,18 +1397,8 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
{
auto superIter = superTable->props.find(propName);
if (FFlag::LuauAnyInIsOptionalIsOptional)
{
if (superIter == superTable->props.end() && (FFlag::LuauSubtypingAddOptPropsToUnsealedTables || !isOptional(subProp.type)))
extraProperties.push_back(propName);
}
else
{
bool isAny = log.is<AnyTypeVar>(log.follow(subProp.type));
if (superIter == superTable->props.end() &&
(FFlag::LuauSubtypingAddOptPropsToUnsealedTables || (!isOptional(subProp.type) && !isAny)))
extraProperties.push_back(propName);
}
if (superIter == superTable->props.end() && (FFlag::LuauSubtypingAddOptPropsToUnsealedTables || !isOptional(subProp.type)))
extraProperties.push_back(propName);
}
if (!extraProperties.empty())
@ -1473,21 +1442,12 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
if (innerState.errors.empty())
log.concat(std::move(innerState.log));
}
else if (FFlag::LuauAnyInIsOptionalIsOptional &&
(!FFlag::LuauSubtypingAddOptPropsToUnsealedTables || subTable->state == TableState::Unsealed) && isOptional(prop.type))
else if ((!FFlag::LuauSubtypingAddOptPropsToUnsealedTables || subTable->state == TableState::Unsealed) && isOptional(prop.type))
// This is sound because unsealed table types are precise, so `{ p : T } <: { p : T, q : U? }`
// since if `t : { p : T }` then we are guaranteed that `t.q` is `nil`.
// TODO: if the supertype is written to, the subtype may no longer be precise (alias analysis?)
{
}
else if ((!FFlag::LuauSubtypingAddOptPropsToUnsealedTables || subTable->state == TableState::Unsealed) &&
(isOptional(prop.type) || get<AnyTypeVar>(follow(prop.type))))
// This is sound because unsealed table types are precise, so `{ p : T } <: { p : T, q : U? }`
// since if `t : { p : T }` then we are guaranteed that `t.q` is `nil`.
// TODO: should isOptional(anyType) be true?
// TODO: if the supertype is written to, the subtype may no longer be precise (alias analysis?)
{
}
else if (subTable->state == TableState::Free)
{
PendingType* pendingSub = log.queue(subTy);
@ -1499,20 +1459,17 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
else
missingProperties.push_back(name);
if (FFlag::LuauTxnLogCheckForInvalidation)
// Recursive unification can change the txn log, and invalidate the old
// table. If we detect that this has happened, we start over, with the updated
// txn log.
TableTypeVar* newSuperTable = log.getMutable<TableTypeVar>(superTy);
TableTypeVar* newSubTable = log.getMutable<TableTypeVar>(subTy);
if (superTable != newSuperTable || subTable != newSubTable)
{
// Recursive unification can change the txn log, and invalidate the old
// table. If we detect that this has happened, we start over, with the updated
// txn log.
TableTypeVar* newSuperTable = log.getMutable<TableTypeVar>(superTy);
TableTypeVar* newSubTable = log.getMutable<TableTypeVar>(subTy);
if (superTable != newSuperTable || subTable != newSubTable)
{
if (errors.empty())
return tryUnifyTables(subTy, superTy, isIntersection);
else
return;
}
if (errors.empty())
return tryUnifyTables(subTy, superTy, isIntersection);
else
return;
}
}
@ -1554,10 +1511,7 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
else if (variance == Covariant)
{
}
else if (FFlag::LuauAnyInIsOptionalIsOptional && !FFlag::LuauSubtypingAddOptPropsToUnsealedTables && isOptional(prop.type))
{
}
else if (!FFlag::LuauSubtypingAddOptPropsToUnsealedTables && (isOptional(prop.type) || get<AnyTypeVar>(follow(prop.type))))
else if (!FFlag::LuauSubtypingAddOptPropsToUnsealedTables && isOptional(prop.type))
{
}
else if (superTable->state == TableState::Free)
@ -1570,20 +1524,17 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
else
extraProperties.push_back(name);
if (FFlag::LuauTxnLogCheckForInvalidation)
// Recursive unification can change the txn log, and invalidate the old
// table. If we detect that this has happened, we start over, with the updated
// txn log.
TableTypeVar* newSuperTable = log.getMutable<TableTypeVar>(superTy);
TableTypeVar* newSubTable = log.getMutable<TableTypeVar>(subTy);
if (superTable != newSuperTable || subTable != newSubTable)
{
// Recursive unification can change the txn log, and invalidate the old
// table. If we detect that this has happened, we start over, with the updated
// txn log.
TableTypeVar* newSuperTable = log.getMutable<TableTypeVar>(superTy);
TableTypeVar* newSubTable = log.getMutable<TableTypeVar>(subTy);
if (superTable != newSuperTable || subTable != newSubTable)
{
if (errors.empty())
return tryUnifyTables(subTy, superTy, isIntersection);
else
return;
}
if (errors.empty())
return tryUnifyTables(subTy, superTy, isIntersection);
else
return;
}
}
@ -1630,27 +1581,9 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
}
}
if (FFlag::LuauTxnLogDontRetryForIndexers)
{
// Changing the indexer can invalidate the table pointers.
superTable = log.getMutable<TableTypeVar>(superTy);
subTable = log.getMutable<TableTypeVar>(subTy);
}
else if (FFlag::LuauTxnLogCheckForInvalidation)
{
// Recursive unification can change the txn log, and invalidate the old
// table. If we detect that this has happened, we start over, with the updated
// txn log.
TableTypeVar* newSuperTable = log.getMutable<TableTypeVar>(superTy);
TableTypeVar* newSubTable = log.getMutable<TableTypeVar>(subTy);
if (superTable != newSuperTable || subTable != newSubTable)
{
if (errors.empty())
return tryUnifyTables(subTy, superTy, isIntersection);
else
return;
}
}
// Changing the indexer can invalidate the table pointers.
superTable = log.getMutable<TableTypeVar>(superTy);
subTable = log.getMutable<TableTypeVar>(subTy);
if (!missingProperties.empty())
{
@ -1685,34 +1618,10 @@ void Unifier::tryUnifyTables(TypeId subTy, TypeId superTy, bool isIntersection)
}
}
TypeId Unifier::widen(TypeId ty)
{
if (!FFlag::LuauWidenIfSupertypeIsFree2)
return ty;
Widen widen{types};
std::optional<TypeId> result = widen.substitute(ty);
// TODO: what does it mean for substitution to fail to widen?
return result.value_or(ty);
}
TypePackId Unifier::widen(TypePackId tp)
{
if (!FFlag::LuauWidenIfSupertypeIsFree2)
return tp;
Widen widen{types};
std::optional<TypePackId> result = widen.substitute(tp);
// TODO: what does it mean for substitution to fail to widen?
return result.value_or(tp);
}
TypeId Unifier::deeplyOptional(TypeId ty, std::unordered_map<TypeId, TypeId> seen)
{
ty = follow(ty);
if (!FFlag::LuauAnyInIsOptionalIsOptional && get<AnyTypeVar>(ty))
return ty;
else if (isOptional(ty))
if (isOptional(ty))
return ty;
else if (const TableTypeVar* ttv = get<TableTypeVar>(ty))
{
@ -1825,10 +1734,7 @@ void Unifier::tryUnifyFreeTable(TypeId subTy, TypeId superTy)
{
if (auto subProp = findTablePropertyRespectingMeta(subTy, freeName))
{
if (FFlag::LuauWidenIfSupertypeIsFree2)
tryUnify_(*subProp, freeProp.type);
else
tryUnify_(freeProp.type, *subProp);
tryUnify_(*subProp, freeProp.type);
/*
* TypeVars are commonly cyclic, so it is entirely possible
@ -2623,14 +2529,7 @@ void Unifier::occursCheck(DenseHashSet<TypePackId>& seen, TypePackId needle, Typ
Unifier Unifier::makeChildUnifier()
{
if (FFlag::LuauTypecheckOptPass)
{
Unifier u = Unifier{types, mode, location, variance, sharedState, &log};
u.anyIsTop = anyIsTop;
return u;
}
Unifier u = Unifier{types, mode, log.sharedSeen, location, variance, sharedState, &log};
Unifier u = Unifier{types, mode, location, variance, sharedState, &log};
u.anyIsTop = anyIsTop;
return u;
}

View file

@ -313,7 +313,7 @@ template<typename T>
struct AstArray
{
T* data;
std::size_t size;
size_t size;
const T* begin() const
{

View file

@ -1,7 +1,7 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Common.h"
#include "Luau/Common.h"
#include <vector>

View file

@ -6,8 +6,6 @@
#include <limits.h>
LUAU_FASTFLAGVARIABLE(LuauParseLocationIgnoreCommentSkip, false)
namespace Luau
{
@ -361,7 +359,7 @@ const Lexeme& Lexer::next(bool skipComments, bool updatePrevLocation)
while (isSpace(peekch()))
consume();
if (!FFlag::LuauParseLocationIgnoreCommentSkip || updatePrevLocation)
if (updatePrevLocation)
prevLocation = lexeme.location;
lexeme = readNext();

View file

@ -10,8 +10,8 @@
// See docs/SyntaxChanges.md for an explanation.
LUAU_FASTINTVARIABLE(LuauRecursionLimit, 1000)
LUAU_FASTINTVARIABLE(LuauParseErrorLimit, 100)
LUAU_FASTFLAGVARIABLE(LuauParseRecoverUnexpectedPack, false)
LUAU_FASTFLAGVARIABLE(LuauParseLocationIgnoreCommentSkipInCapture, false)
LUAU_FASTFLAGVARIABLE(LuauParserFunctionKeywordAsTypeHelp, false)
namespace Luau
{
@ -1430,7 +1430,7 @@ AstType* Parser::parseTypeAnnotation(TempVector<AstType*>& parts, const Location
parts.push_back(parseSimpleTypeAnnotation(/* allowPack= */ false).type);
isIntersection = true;
}
else if (FFlag::LuauParseRecoverUnexpectedPack && c == Lexeme::Dot3)
else if (c == Lexeme::Dot3)
{
report(lexer.current().location, "Unexpected '...' after type annotation");
nextLexeme();
@ -1551,7 +1551,7 @@ AstTypeOrPack Parser::parseSimpleTypeAnnotation(bool allowPack)
prefix = name.name;
name = parseIndexName("field name", pointPosition);
}
else if (FFlag::LuauParseRecoverUnexpectedPack && lexer.current().type == Lexeme::Dot3)
else if (lexer.current().type == Lexeme::Dot3)
{
report(lexer.current().location, "Unexpected '...' after type name; type pack is not allowed in this context");
nextLexeme();
@ -1591,6 +1591,17 @@ AstTypeOrPack Parser::parseSimpleTypeAnnotation(bool allowPack)
{
return parseFunctionTypeAnnotation(allowPack);
}
else if (FFlag::LuauParserFunctionKeywordAsTypeHelp && lexer.current().type == Lexeme::ReservedFunction)
{
Location location = lexer.current().location;
nextLexeme();
return {reportTypeAnnotationError(location, {}, /*isMissing*/ false,
"Using 'function' as a type annotation is not supported, consider replacing with a function type annotation e.g. '(...any) -> "
"...any'"),
{}};
}
else
{
Location location = lexer.current().location;
@ -2822,7 +2833,7 @@ void Parser::nextLexeme()
hotcomments.push_back({hotcommentHeader, lexeme.location, std::string(text + 1, text + end)});
}
type = lexer.next(/* skipComments= */ false, !FFlag::LuauParseLocationIgnoreCommentSkipInCapture).type;
type = lexer.next(/* skipComments= */ false, /* updatePrevLocation= */ false).type;
}
}
else

View file

@ -240,7 +240,7 @@ std::optional<std::string> getParentPath(const std::string& path)
return std::nullopt;
#endif
std::string::size_type slash = path.find_last_of("\\/", path.size() - 1);
size_t slash = path.find_last_of("\\/", path.size() - 1);
if (slash == 0)
return "/";
@ -253,7 +253,7 @@ std::optional<std::string> getParentPath(const std::string& path)
static std::string getExtension(const std::string& path)
{
std::string::size_type dot = path.find_last_of(".\\/");
size_t dot = path.find_last_of(".\\/");
if (dot == std::string::npos || path[dot] != '.')
return "";

View file

@ -21,6 +21,8 @@
#include <fcntl.h>
#endif
#include <locale.h>
LUAU_FASTFLAG(DebugLuauTimeTracing)
enum class CliMode
@ -34,7 +36,8 @@ enum class CliMode
enum class CompileFormat
{
Text,
Binary
Binary,
Null
};
constexpr int MaxTraversalLimit = 50;
@ -434,6 +437,9 @@ static void runReplImpl(lua_State* L)
{
ic_set_default_completer(completeRepl, L);
// Reset the locale to C
setlocale(LC_ALL, "C");
// Make brace matching easier to see
ic_style_def("ic-bracematch", "teal");
@ -594,6 +600,8 @@ static bool compileFile(const char* name, CompileFormat format)
case CompileFormat::Binary:
fwrite(bcb.getBytecode().data(), 1, bcb.getBytecode().size(), stdout);
break;
case CompileFormat::Null:
break;
}
return true;
@ -716,6 +724,10 @@ int replMain(int argc, char** argv)
{
compileFormat = CompileFormat::Text;
}
else if (strcmp(argv[1], "--compile=null") == 0)
{
compileFormat = CompileFormat::Null;
}
else
{
fprintf(stderr, "Error: Unrecognized value for '--compile' specified.\n");

View file

@ -19,9 +19,11 @@ if(LUAU_STATIC_CRT)
endif()
project(Luau LANGUAGES CXX C)
add_library(Luau.Common INTERFACE)
add_library(Luau.Ast STATIC)
add_library(Luau.Compiler STATIC)
add_library(Luau.Analysis STATIC)
add_library(Luau.CodeGen STATIC)
add_library(Luau.VM STATIC)
add_library(isocline STATIC)
@ -48,8 +50,11 @@ endif()
include(Sources.cmake)
target_include_directories(Luau.Common INTERFACE Common/include)
target_compile_features(Luau.Ast PUBLIC cxx_std_17)
target_include_directories(Luau.Ast PUBLIC Ast/include)
target_link_libraries(Luau.Ast PUBLIC Luau.Common)
target_compile_features(Luau.Compiler PUBLIC cxx_std_17)
target_include_directories(Luau.Compiler PUBLIC Compiler/include)
@ -59,8 +64,13 @@ target_compile_features(Luau.Analysis PUBLIC cxx_std_17)
target_include_directories(Luau.Analysis PUBLIC Analysis/include)
target_link_libraries(Luau.Analysis PUBLIC Luau.Ast)
target_compile_features(Luau.CodeGen PRIVATE cxx_std_17)
target_include_directories(Luau.CodeGen PUBLIC CodeGen/include)
target_link_libraries(Luau.CodeGen PUBLIC Luau.Common)
target_compile_features(Luau.VM PRIVATE cxx_std_11)
target_include_directories(Luau.VM PUBLIC VM/include)
target_link_libraries(Luau.VM PUBLIC Luau.Common)
target_include_directories(isocline PUBLIC extern/isocline/include)
@ -101,6 +111,7 @@ endif()
target_compile_options(Luau.Ast PRIVATE ${LUAU_OPTIONS})
target_compile_options(Luau.Analysis PRIVATE ${LUAU_OPTIONS})
target_compile_options(Luau.CodeGen PRIVATE ${LUAU_OPTIONS})
target_compile_options(Luau.VM PRIVATE ${LUAU_OPTIONS})
target_compile_options(isocline PRIVATE ${LUAU_OPTIONS} ${ISOCLINE_OPTIONS})
@ -110,10 +121,17 @@ if (MSVC AND MSVC_VERSION GREATER_EQUAL 1924)
set_source_files_properties(VM/src/lvmexecute.cpp PROPERTIES COMPILE_FLAGS /d2ssa-pre-)
endif()
if(MSVC AND LUAU_BUILD_CLI)
# the default stack size that MSVC linker uses is 1 MB; we need more stack space in Debug because stack frames are larger
set_target_properties(Luau.Analyze.CLI PROPERTIES LINK_FLAGS_DEBUG /STACK:2097152)
set_target_properties(Luau.Repl.CLI PROPERTIES LINK_FLAGS_DEBUG /STACK:2097152)
endif()
# embed .natvis inside the library debug information
if(MSVC)
target_link_options(Luau.Ast INTERFACE /NATVIS:${CMAKE_CURRENT_SOURCE_DIR}/tools/natvis/Ast.natvis)
target_link_options(Luau.Analysis INTERFACE /NATVIS:${CMAKE_CURRENT_SOURCE_DIR}/tools/natvis/Analysis.natvis)
target_link_options(Luau.CodeGen INTERFACE /NATVIS:${CMAKE_CURRENT_SOURCE_DIR}/tools/natvis/CodeGen.natvis)
target_link_options(Luau.VM INTERFACE /NATVIS:${CMAKE_CURRENT_SOURCE_DIR}/tools/natvis/VM.natvis)
endif()
@ -121,6 +139,7 @@ endif()
if(MSVC_IDE)
target_sources(Luau.Ast PRIVATE tools/natvis/Ast.natvis)
target_sources(Luau.Analysis PRIVATE tools/natvis/Analysis.natvis)
target_sources(Luau.CodeGen PRIVATE tools/natvis/CodeGen.natvis)
target_sources(Luau.VM PRIVATE tools/natvis/VM.natvis)
endif()
@ -148,7 +167,7 @@ endif()
if(LUAU_BUILD_TESTS)
target_compile_options(Luau.UnitTest PRIVATE ${LUAU_OPTIONS})
target_include_directories(Luau.UnitTest PRIVATE extern)
target_link_libraries(Luau.UnitTest PRIVATE Luau.Analysis Luau.Compiler)
target_link_libraries(Luau.UnitTest PRIVATE Luau.Analysis Luau.Compiler Luau.CodeGen)
target_compile_options(Luau.Conformance PRIVATE ${LUAU_OPTIONS})
target_include_directories(Luau.Conformance PRIVATE extern)

View file

@ -0,0 +1,169 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include "Luau/Condition.h"
#include "Luau/Label.h"
#include "Luau/OperandX64.h"
#include "Luau/RegisterX64.h"
#include <string>
#include <vector>
namespace Luau
{
namespace CodeGen
{
class AssemblyBuilderX64
{
public:
explicit AssemblyBuilderX64(bool logText);
~AssemblyBuilderX64();
// Base two operand instructions with 9 opcode selection
void add(OperandX64 lhs, OperandX64 rhs);
void sub(OperandX64 lhs, OperandX64 rhs);
void cmp(OperandX64 lhs, OperandX64 rhs);
void and_(OperandX64 lhs, OperandX64 rhs);
void or_(OperandX64 lhs, OperandX64 rhs);
void xor_(OperandX64 lhs, OperandX64 rhs);
// Binary shift instructions with special rhs handling
void sal(OperandX64 lhs, OperandX64 rhs);
void sar(OperandX64 lhs, OperandX64 rhs);
void shl(OperandX64 lhs, OperandX64 rhs);
void shr(OperandX64 lhs, OperandX64 rhs);
// Two operand mov instruction has additional specialized encodings
void mov(OperandX64 lhs, OperandX64 rhs);
void mov64(RegisterX64 lhs, int64_t imm);
// Base one operand instruction with 2 opcode selection
void div(OperandX64 op);
void idiv(OperandX64 op);
void mul(OperandX64 op);
void neg(OperandX64 op);
void not_(OperandX64 op);
void test(OperandX64 lhs, OperandX64 rhs);
void lea(OperandX64 lhs, OperandX64 rhs);
void push(OperandX64 op);
void pop(OperandX64 op);
void ret();
// Control flow
void jcc(Condition cond, Label& label);
void jmp(Label& label);
void jmp(OperandX64 op);
// AVX
void vaddpd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vaddps(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vaddsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vaddss(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vsqrtpd(OperandX64 dst, OperandX64 src);
void vsqrtps(OperandX64 dst, OperandX64 src);
void vsqrtsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vsqrtss(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vmovsd(OperandX64 dst, OperandX64 src);
void vmovsd(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vmovss(OperandX64 dst, OperandX64 src);
void vmovss(OperandX64 dst, OperandX64 src1, OperandX64 src2);
void vmovapd(OperandX64 dst, OperandX64 src);
void vmovaps(OperandX64 dst, OperandX64 src);
void vmovupd(OperandX64 dst, OperandX64 src);
void vmovups(OperandX64 dst, OperandX64 src);
// Run final checks
void finalize();
// Places a label at current location and returns it
Label setLabel();
// Assigns label position to the current location
void setLabel(Label& label);
// Constant allocation (uses rip-relative addressing)
OperandX64 i64(int64_t value);
OperandX64 f32(float value);
OperandX64 f64(double value);
OperandX64 f32x4(float x, float y, float z, float w);
// Resulting data and code that need to be copied over one after the other
// The *end* of 'data' has to be aligned to 16 bytes, this will also align 'code'
std::vector<uint8_t> data;
std::vector<uint8_t> code;
std::string text;
private:
// Instruction archetypes
void placeBinary(const char* name, OperandX64 lhs, OperandX64 rhs, uint8_t codeimm8, uint8_t codeimm, uint8_t codeimmImm8, uint8_t code8rev,
uint8_t coderev, uint8_t code8, uint8_t code, uint8_t opreg);
void placeBinaryRegMemAndImm(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code, uint8_t codeImm8, uint8_t opreg);
void placeBinaryRegAndRegMem(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code);
void placeBinaryRegMemAndReg(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code);
void placeUnaryModRegMem(const char* name, OperandX64 op, uint8_t code8, uint8_t code, uint8_t opreg);
void placeShift(const char* name, OperandX64 lhs, OperandX64 rhs, uint8_t opreg);
void placeJcc(const char* name, Label& label, uint8_t cc);
void placeAvx(const char* name, OperandX64 dst, OperandX64 src, uint8_t code, bool setW, uint8_t mode, uint8_t prefix);
void placeAvx(const char* name, OperandX64 dst, OperandX64 src, uint8_t code, uint8_t coderev, bool setW, uint8_t mode, uint8_t prefix);
void placeAvx(const char* name, OperandX64 dst, OperandX64 src1, OperandX64 src2, uint8_t code, bool setW, uint8_t mode, uint8_t prefix);
// Instruction components
void placeRegAndModRegMem(OperandX64 lhs, OperandX64 rhs);
void placeModRegMem(OperandX64 rhs, uint8_t regop);
void placeRex(RegisterX64 op);
void placeRex(OperandX64 op);
void placeRex(RegisterX64 lhs, OperandX64 rhs);
void placeVex(OperandX64 dst, OperandX64 src1, OperandX64 src2, bool setW, uint8_t mode, uint8_t prefix);
void placeImm8Or32(int32_t imm);
void placeImm8(int32_t imm);
void placeImm32(int32_t imm);
void placeImm64(int64_t imm);
void placeLabel(Label& label);
void place(uint8_t byte);
void commit();
LUAU_NOINLINE void extend();
uint32_t getCodeSize();
// Data
size_t allocateData(size_t size, size_t align);
// Logging of assembly in text form (Intel asm with VS disassembly formatting)
LUAU_NOINLINE void log(const char* opcode);
LUAU_NOINLINE void log(const char* opcode, OperandX64 op);
LUAU_NOINLINE void log(const char* opcode, OperandX64 op1, OperandX64 op2);
LUAU_NOINLINE void log(const char* opcode, OperandX64 op1, OperandX64 op2, OperandX64 op3);
LUAU_NOINLINE void log(Label label);
LUAU_NOINLINE void log(const char* opcode, Label label);
void log(OperandX64 op);
void logAppend(const char* fmt, ...);
const char* getSizeName(SizeX64 size);
const char* getRegisterName(RegisterX64 reg);
uint32_t nextLabel = 1;
std::vector<Label> pendingLabels;
std::vector<uint32_t> labelLocations;
bool logText = false;
bool finalized = false;
size_t dataPos = 0;
uint8_t* codePos = nullptr;
uint8_t* codeEnd = nullptr;
};
} // namespace CodeGen
} // namespace Luau

View file

@ -0,0 +1,46 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
namespace Luau
{
namespace CodeGen
{
enum class Condition
{
Overflow,
NoOverflow,
Carry,
NoCarry,
Below,
BelowEqual,
Above,
AboveEqual,
Equal,
Less,
LessEqual,
Greater,
GreaterEqual,
NotBelow,
NotBelowEqual,
NotAbove,
NotAboveEqual,
NotEqual,
NotLess,
NotLessEqual,
NotGreater,
NotGreaterEqual,
Zero,
NotZero,
// TODO: ordered and unordered floating-point conditions
Count
};
} // namespace CodeGen
} // namespace Luau

View file

@ -0,0 +1,18 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
struct Label
{
uint32_t id = 0;
uint32_t location = ~0u;
};
} // namespace CodeGen
} // namespace Luau

View file

@ -0,0 +1,136 @@
#pragma once
#include "Luau/Common.h"
#include "Luau/RegisterX64.h"
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
enum class CategoryX64 : uint8_t
{
reg,
mem,
imm,
};
struct OperandX64
{
constexpr OperandX64(RegisterX64 reg)
: cat(CategoryX64::reg)
, index(noreg)
, base(reg)
, memSize(SizeX64::none)
, scale(1)
, imm(0)
{
}
constexpr OperandX64(int32_t imm)
: cat(CategoryX64::imm)
, index(noreg)
, base(noreg)
, memSize(SizeX64::none)
, scale(1)
, imm(imm)
{
}
constexpr explicit OperandX64(SizeX64 size, RegisterX64 index, uint8_t scale, RegisterX64 base, int32_t disp)
: cat(CategoryX64::mem)
, index(index)
, base(base)
, memSize(size)
, scale(scale)
, imm(disp)
{
}
// Fields are carefully placed to make this struct fit into an 8 byte register
CategoryX64 cat;
RegisterX64 index;
RegisterX64 base;
SizeX64 memSize : 4;
uint8_t scale : 4;
int32_t imm;
constexpr OperandX64 operator[](OperandX64&& addr) const
{
LUAU_ASSERT(cat == CategoryX64::mem);
LUAU_ASSERT(memSize != SizeX64::none && index == noreg && scale == 1 && base == noreg && imm == 0);
LUAU_ASSERT(addr.memSize == SizeX64::none);
addr.cat = CategoryX64::mem;
addr.memSize = memSize;
return addr;
}
};
constexpr OperandX64 byte{SizeX64::byte, noreg, 1, noreg, 0};
constexpr OperandX64 word{SizeX64::word, noreg, 1, noreg, 0};
constexpr OperandX64 dword{SizeX64::dword, noreg, 1, noreg, 0};
constexpr OperandX64 qword{SizeX64::qword, noreg, 1, noreg, 0};
constexpr OperandX64 xmmword{SizeX64::xmmword, noreg, 1, noreg, 0};
constexpr OperandX64 ymmword{SizeX64::ymmword, noreg, 1, noreg, 0};
constexpr OperandX64 ptr{sizeof(void*) == 4 ? SizeX64::dword : SizeX64::qword, noreg, 1, noreg, 0};
constexpr OperandX64 operator*(RegisterX64 reg, uint8_t scale)
{
if (scale == 1)
return OperandX64(reg);
LUAU_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8);
LUAU_ASSERT(reg.index != 0b100 && "can't scale SP");
return OperandX64(SizeX64::none, reg, scale, noreg, 0);
}
constexpr OperandX64 operator+(RegisterX64 reg, int32_t disp)
{
return OperandX64(SizeX64::none, noreg, 1, reg, disp);
}
constexpr OperandX64 operator+(RegisterX64 base, RegisterX64 index)
{
LUAU_ASSERT(index.index != 4 && "sp cannot be used as index");
LUAU_ASSERT(base.size == index.size);
return OperandX64(SizeX64::none, index, 1, base, 0);
}
constexpr OperandX64 operator+(OperandX64 op, int32_t disp)
{
LUAU_ASSERT(op.cat == CategoryX64::mem);
LUAU_ASSERT(op.memSize == SizeX64::none);
op.imm += disp;
return op;
}
constexpr OperandX64 operator+(OperandX64 op, RegisterX64 base)
{
LUAU_ASSERT(op.cat == CategoryX64::mem);
LUAU_ASSERT(op.memSize == SizeX64::none);
LUAU_ASSERT(op.base == noreg);
LUAU_ASSERT(op.index == noreg || op.index.size == base.size);
op.base = base;
return op;
}
constexpr OperandX64 operator+(RegisterX64 base, OperandX64 op)
{
LUAU_ASSERT(op.cat == CategoryX64::mem);
LUAU_ASSERT(op.memSize == SizeX64::none);
LUAU_ASSERT(op.base == noreg);
LUAU_ASSERT(op.index == noreg || op.index.size == base.size);
op.base = base;
return op;
}
} // namespace CodeGen
} // namespace Luau

View file

@ -0,0 +1,116 @@
#pragma once
#include "Luau/Common.h"
#include <stdint.h>
namespace Luau
{
namespace CodeGen
{
enum class SizeX64 : uint8_t
{
none,
byte,
word,
dword,
qword,
xmmword,
ymmword,
};
struct RegisterX64
{
SizeX64 size : 3;
uint8_t index : 5;
constexpr bool operator==(RegisterX64 rhs) const
{
return size == rhs.size && index == rhs.index;
}
constexpr bool operator!=(RegisterX64 rhs) const
{
return !(*this == rhs);
}
};
constexpr RegisterX64 noreg{SizeX64::none, 16};
constexpr RegisterX64 rip{SizeX64::none, 0};
constexpr RegisterX64 al{SizeX64::byte, 0};
constexpr RegisterX64 cl{SizeX64::byte, 1};
constexpr RegisterX64 dl{SizeX64::byte, 2};
constexpr RegisterX64 bl{SizeX64::byte, 3};
constexpr RegisterX64 eax{SizeX64::dword, 0};
constexpr RegisterX64 ecx{SizeX64::dword, 1};
constexpr RegisterX64 edx{SizeX64::dword, 2};
constexpr RegisterX64 ebx{SizeX64::dword, 3};
constexpr RegisterX64 esp{SizeX64::dword, 4};
constexpr RegisterX64 ebp{SizeX64::dword, 5};
constexpr RegisterX64 esi{SizeX64::dword, 6};
constexpr RegisterX64 edi{SizeX64::dword, 7};
constexpr RegisterX64 r8d{SizeX64::dword, 8};
constexpr RegisterX64 r9d{SizeX64::dword, 9};
constexpr RegisterX64 r10d{SizeX64::dword, 10};
constexpr RegisterX64 r11d{SizeX64::dword, 11};
constexpr RegisterX64 r12d{SizeX64::dword, 12};
constexpr RegisterX64 r13d{SizeX64::dword, 13};
constexpr RegisterX64 r14d{SizeX64::dword, 14};
constexpr RegisterX64 r15d{SizeX64::dword, 15};
constexpr RegisterX64 rax{SizeX64::qword, 0};
constexpr RegisterX64 rcx{SizeX64::qword, 1};
constexpr RegisterX64 rdx{SizeX64::qword, 2};
constexpr RegisterX64 rbx{SizeX64::qword, 3};
constexpr RegisterX64 rsp{SizeX64::qword, 4};
constexpr RegisterX64 rbp{SizeX64::qword, 5};
constexpr RegisterX64 rsi{SizeX64::qword, 6};
constexpr RegisterX64 rdi{SizeX64::qword, 7};
constexpr RegisterX64 r8{SizeX64::qword, 8};
constexpr RegisterX64 r9{SizeX64::qword, 9};
constexpr RegisterX64 r10{SizeX64::qword, 10};
constexpr RegisterX64 r11{SizeX64::qword, 11};
constexpr RegisterX64 r12{SizeX64::qword, 12};
constexpr RegisterX64 r13{SizeX64::qword, 13};
constexpr RegisterX64 r14{SizeX64::qword, 14};
constexpr RegisterX64 r15{SizeX64::qword, 15};
constexpr RegisterX64 xmm0{SizeX64::xmmword, 0};
constexpr RegisterX64 xmm1{SizeX64::xmmword, 1};
constexpr RegisterX64 xmm2{SizeX64::xmmword, 2};
constexpr RegisterX64 xmm3{SizeX64::xmmword, 3};
constexpr RegisterX64 xmm4{SizeX64::xmmword, 4};
constexpr RegisterX64 xmm5{SizeX64::xmmword, 5};
constexpr RegisterX64 xmm6{SizeX64::xmmword, 6};
constexpr RegisterX64 xmm7{SizeX64::xmmword, 7};
constexpr RegisterX64 xmm8{SizeX64::xmmword, 8};
constexpr RegisterX64 xmm9{SizeX64::xmmword, 9};
constexpr RegisterX64 xmm10{SizeX64::xmmword, 10};
constexpr RegisterX64 xmm11{SizeX64::xmmword, 11};
constexpr RegisterX64 xmm12{SizeX64::xmmword, 12};
constexpr RegisterX64 xmm13{SizeX64::xmmword, 13};
constexpr RegisterX64 xmm14{SizeX64::xmmword, 14};
constexpr RegisterX64 xmm15{SizeX64::xmmword, 15};
constexpr RegisterX64 ymm0{SizeX64::ymmword, 0};
constexpr RegisterX64 ymm1{SizeX64::ymmword, 1};
constexpr RegisterX64 ymm2{SizeX64::ymmword, 2};
constexpr RegisterX64 ymm3{SizeX64::ymmword, 3};
constexpr RegisterX64 ymm4{SizeX64::ymmword, 4};
constexpr RegisterX64 ymm5{SizeX64::ymmword, 5};
constexpr RegisterX64 ymm6{SizeX64::ymmword, 6};
constexpr RegisterX64 ymm7{SizeX64::ymmword, 7};
constexpr RegisterX64 ymm8{SizeX64::ymmword, 8};
constexpr RegisterX64 ymm9{SizeX64::ymmword, 9};
constexpr RegisterX64 ymm10{SizeX64::ymmword, 10};
constexpr RegisterX64 ymm11{SizeX64::ymmword, 11};
constexpr RegisterX64 ymm12{SizeX64::ymmword, 12};
constexpr RegisterX64 ymm13{SizeX64::ymmword, 13};
constexpr RegisterX64 ymm14{SizeX64::ymmword, 14};
constexpr RegisterX64 ymm15{SizeX64::ymmword, 15};
} // namespace CodeGen
} // namespace Luau

File diff suppressed because it is too large Load diff

View file

@ -353,6 +353,11 @@ enum LuauOpcode
// AUX: constant index
LOP_FASTCALL2K,
// FORGPREP: prepare loop variables for a generic for loop, jump to the loop backedge unconditionally
// A: target register; generic for loops assume a register layout [generator, state, index, variables...]
// D: jump offset (-32768..32767)
LOP_FORGPREP,
// Enum entry for number of opcodes, not a valid opcode by itself!
LOP__COUNT
};

View file

@ -224,6 +224,7 @@ private:
DenseHashMap<ConstantKey, int32_t, ConstantKeyHash> constantMap;
DenseHashMap<TableShape, int32_t, TableShapeHash> tableShapeMap;
DenseHashMap<uint32_t, int16_t> protoMap;
int debugLine = 0;
@ -232,7 +233,7 @@ private:
DenseHashMap<StringRef, unsigned int, StringRefHash> stringTable;
DenseHashMap<uint32_t, uint32_t> debugRemarks;
std::vector<std::pair<uint32_t, uint32_t>> debugRemarks;
std::string debugRemarkBuffer;
BytecodeEncoder* encoder = nullptr;
@ -246,7 +247,7 @@ private:
void validate() const;
std::string dumpCurrentFunction() const;
const uint32_t* dumpInstruction(const uint32_t* opcode, std::string& output) const;
void dumpInstruction(const uint32_t* opcode, std::string& output, int targetLabel) const;
void writeFunction(std::string& ss, uint32_t id) const;
void writeLineInfo(std::string& ss) const;

View file

@ -6,6 +6,8 @@
#include <algorithm>
#include <string.h>
LUAU_FASTFLAG(LuauCompileNestedClosureO2)
namespace Luau
{
@ -96,6 +98,7 @@ inline bool isJumpD(LuauOpcode op)
case LOP_JUMPIFNOTLT:
case LOP_FORNPREP:
case LOP_FORNLOOP:
case LOP_FORGPREP:
case LOP_FORGLOOP:
case LOP_FORGPREP_INEXT:
case LOP_FORGLOOP_INEXT:
@ -127,6 +130,20 @@ inline bool isSkipC(LuauOpcode op)
}
}
static int getJumpTarget(uint32_t insn, uint32_t pc)
{
LuauOpcode op = LuauOpcode(LUAU_INSN_OP(insn));
if (isJumpD(op))
return int(pc + LUAU_INSN_D(insn) + 1);
else if (isSkipC(op) && LUAU_INSN_C(insn))
return int(pc + LUAU_INSN_C(insn) + 1);
else if (op == LOP_JUMPX)
return int(pc + LUAU_INSN_E(insn) + 1);
else
return -1;
}
bool BytecodeBuilder::StringRef::operator==(const StringRef& other) const
{
return (data && other.data) ? (length == other.length && memcmp(data, other.data, length) == 0) : (data == other.data);
@ -180,8 +197,8 @@ size_t BytecodeBuilder::TableShapeHash::operator()(const TableShape& v) const
BytecodeBuilder::BytecodeBuilder(BytecodeEncoder* encoder)
: constantMap({Constant::Type_Nil, ~0ull})
, tableShapeMap(TableShape())
, protoMap(~0u)
, stringTable({nullptr, 0})
, debugRemarks(~0u)
, encoder(encoder)
{
LUAU_ASSERT(stringTable.find(StringRef{"", 0}) == nullptr);
@ -250,6 +267,7 @@ void BytecodeBuilder::endFunction(uint8_t maxstacksize, uint8_t numupvalues)
constantMap.clear();
tableShapeMap.clear();
protoMap.clear();
debugRemarks.clear();
debugRemarkBuffer.clear();
@ -257,6 +275,8 @@ void BytecodeBuilder::endFunction(uint8_t maxstacksize, uint8_t numupvalues)
void BytecodeBuilder::setMainFunction(uint32_t fid)
{
LUAU_ASSERT(fid < functions.size());
mainFunction = fid;
}
@ -370,11 +390,17 @@ int32_t BytecodeBuilder::addConstantClosure(uint32_t fid)
int16_t BytecodeBuilder::addChildFunction(uint32_t fid)
{
if (FFlag::LuauCompileNestedClosureO2)
if (int16_t* cache = protoMap.find(fid))
return *cache;
uint32_t id = uint32_t(protos.size());
if (id >= kMaxClosureCount)
return -1;
if (FFlag::LuauCompileNestedClosureO2)
protoMap[fid] = int16_t(id);
protos.push_back(fid);
return int16_t(id);
@ -531,7 +557,7 @@ void BytecodeBuilder::addDebugRemark(const char* format, ...)
// we null-terminate all remarks to avoid storing remark length
debugRemarkBuffer += '\0';
debugRemarks[uint32_t(insns.size())] = uint32_t(offset);
debugRemarks.emplace_back(uint32_t(insns.size()), uint32_t(offset));
}
void BytecodeBuilder::finalize()
@ -1268,6 +1294,11 @@ void BytecodeBuilder::validate() const
VJUMP(LUAU_INSN_D(insn));
break;
case LOP_FORGPREP:
VREG(LUAU_INSN_A(insn) + 2 + 1); // forg loop protocol: A, A+1, A+2 are used for iteration protocol; A+3, ... are loop variables
VJUMP(LUAU_INSN_D(insn));
break;
case LOP_FORGLOOP:
VREG(
LUAU_INSN_A(insn) + 2 + insns[i + 1]); // forg loop protocol: A, A+1, A+2 are used for iteration protocol; A+3, ... are loop variables
@ -1391,7 +1422,7 @@ void BytecodeBuilder::validate() const
}
#endif
const uint32_t* BytecodeBuilder::dumpInstruction(const uint32_t* code, std::string& result) const
void BytecodeBuilder::dumpInstruction(const uint32_t* code, std::string& result, int targetLabel) const
{
uint32_t insn = *code++;
@ -1486,39 +1517,39 @@ const uint32_t* BytecodeBuilder::dumpInstruction(const uint32_t* code, std::stri
break;
case LOP_JUMP:
formatAppend(result, "JUMP %+d\n", LUAU_INSN_D(insn));
formatAppend(result, "JUMP L%d\n", targetLabel);
break;
case LOP_JUMPIF:
formatAppend(result, "JUMPIF R%d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_D(insn));
formatAppend(result, "JUMPIF R%d L%d\n", LUAU_INSN_A(insn), targetLabel);
break;
case LOP_JUMPIFNOT:
formatAppend(result, "JUMPIFNOT R%d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_D(insn));
formatAppend(result, "JUMPIFNOT R%d L%d\n", LUAU_INSN_A(insn), targetLabel);
break;
case LOP_JUMPIFEQ:
formatAppend(result, "JUMPIFEQ R%d R%d %+d\n", LUAU_INSN_A(insn), *code++, LUAU_INSN_D(insn));
formatAppend(result, "JUMPIFEQ R%d R%d L%d\n", LUAU_INSN_A(insn), *code++, targetLabel);
break;
case LOP_JUMPIFLE:
formatAppend(result, "JUMPIFLE R%d R%d %+d\n", LUAU_INSN_A(insn), *code++, LUAU_INSN_D(insn));
formatAppend(result, "JUMPIFLE R%d R%d L%d\n", LUAU_INSN_A(insn), *code++, targetLabel);
break;
case LOP_JUMPIFLT:
formatAppend(result, "JUMPIFLT R%d R%d %+d\n", LUAU_INSN_A(insn), *code++, LUAU_INSN_D(insn));
formatAppend(result, "JUMPIFLT R%d R%d L%d\n", LUAU_INSN_A(insn), *code++, targetLabel);
break;
case LOP_JUMPIFNOTEQ:
formatAppend(result, "JUMPIFNOTEQ R%d R%d %+d\n", LUAU_INSN_A(insn), *code++, LUAU_INSN_D(insn));
formatAppend(result, "JUMPIFNOTEQ R%d R%d L%d\n", LUAU_INSN_A(insn), *code++, targetLabel);
break;
case LOP_JUMPIFNOTLE:
formatAppend(result, "JUMPIFNOTLE R%d R%d %+d\n", LUAU_INSN_A(insn), *code++, LUAU_INSN_D(insn));
formatAppend(result, "JUMPIFNOTLE R%d R%d L%d\n", LUAU_INSN_A(insn), *code++, targetLabel);
break;
case LOP_JUMPIFNOTLT:
formatAppend(result, "JUMPIFNOTLT R%d R%d %+d\n", LUAU_INSN_A(insn), *code++, LUAU_INSN_D(insn));
formatAppend(result, "JUMPIFNOTLT R%d R%d L%d\n", LUAU_INSN_A(insn), *code++, targetLabel);
break;
case LOP_ADD:
@ -1614,31 +1645,35 @@ const uint32_t* BytecodeBuilder::dumpInstruction(const uint32_t* code, std::stri
break;
case LOP_FORNPREP:
formatAppend(result, "FORNPREP R%d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_D(insn));
formatAppend(result, "FORNPREP R%d L%d\n", LUAU_INSN_A(insn), targetLabel);
break;
case LOP_FORNLOOP:
formatAppend(result, "FORNLOOP R%d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_D(insn));
formatAppend(result, "FORNLOOP R%d L%d\n", LUAU_INSN_A(insn), targetLabel);
break;
case LOP_FORGPREP:
formatAppend(result, "FORGPREP R%d L%d\n", LUAU_INSN_A(insn), targetLabel);
break;
case LOP_FORGLOOP:
formatAppend(result, "FORGLOOP R%d %+d %d\n", LUAU_INSN_A(insn), LUAU_INSN_D(insn), *code++);
formatAppend(result, "FORGLOOP R%d L%d %d\n", LUAU_INSN_A(insn), targetLabel, *code++);
break;
case LOP_FORGPREP_INEXT:
formatAppend(result, "FORGPREP_INEXT R%d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_D(insn));
formatAppend(result, "FORGPREP_INEXT R%d L%d\n", LUAU_INSN_A(insn), targetLabel);
break;
case LOP_FORGLOOP_INEXT:
formatAppend(result, "FORGLOOP_INEXT R%d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_D(insn));
formatAppend(result, "FORGLOOP_INEXT R%d L%d\n", LUAU_INSN_A(insn), targetLabel);
break;
case LOP_FORGPREP_NEXT:
formatAppend(result, "FORGPREP_NEXT R%d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_D(insn));
formatAppend(result, "FORGPREP_NEXT R%d L%d\n", LUAU_INSN_A(insn), targetLabel);
break;
case LOP_FORGLOOP_NEXT:
formatAppend(result, "FORGLOOP_NEXT R%d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_D(insn));
formatAppend(result, "FORGLOOP_NEXT R%d L%d\n", LUAU_INSN_A(insn), targetLabel);
break;
case LOP_GETVARARGS:
@ -1654,7 +1689,7 @@ const uint32_t* BytecodeBuilder::dumpInstruction(const uint32_t* code, std::stri
break;
case LOP_JUMPBACK:
formatAppend(result, "JUMPBACK %+d\n", LUAU_INSN_D(insn));
formatAppend(result, "JUMPBACK L%d\n", targetLabel);
break;
case LOP_LOADKX:
@ -1662,26 +1697,26 @@ const uint32_t* BytecodeBuilder::dumpInstruction(const uint32_t* code, std::stri
break;
case LOP_JUMPX:
formatAppend(result, "JUMPX %+d\n", LUAU_INSN_E(insn));
formatAppend(result, "JUMPX L%d\n", targetLabel);
break;
case LOP_FASTCALL:
formatAppend(result, "FASTCALL %d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_C(insn));
formatAppend(result, "FASTCALL %d L%d\n", LUAU_INSN_A(insn), targetLabel);
break;
case LOP_FASTCALL1:
formatAppend(result, "FASTCALL1 %d R%d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_B(insn), LUAU_INSN_C(insn));
formatAppend(result, "FASTCALL1 %d R%d L%d\n", LUAU_INSN_A(insn), LUAU_INSN_B(insn), targetLabel);
break;
case LOP_FASTCALL2:
{
uint32_t aux = *code++;
formatAppend(result, "FASTCALL2 %d R%d R%d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_B(insn), aux, LUAU_INSN_C(insn));
formatAppend(result, "FASTCALL2 %d R%d R%d L%d\n", LUAU_INSN_A(insn), LUAU_INSN_B(insn), aux, targetLabel);
break;
}
case LOP_FASTCALL2K:
{
uint32_t aux = *code++;
formatAppend(result, "FASTCALL2K %d R%d K%d %+d\n", LUAU_INSN_A(insn), LUAU_INSN_B(insn), aux, LUAU_INSN_C(insn));
formatAppend(result, "FASTCALL2K %d R%d K%d L%d\n", LUAU_INSN_A(insn), LUAU_INSN_B(insn), aux, targetLabel);
break;
}
@ -1691,23 +1726,24 @@ const uint32_t* BytecodeBuilder::dumpInstruction(const uint32_t* code, std::stri
case LOP_CAPTURE:
formatAppend(result, "CAPTURE %s %c%d\n",
LUAU_INSN_A(insn) == LCT_UPVAL ? "UPVAL" : LUAU_INSN_A(insn) == LCT_REF ? "REF" : LUAU_INSN_A(insn) == LCT_VAL ? "VAL" : "",
LUAU_INSN_A(insn) == LCT_UPVAL ? "UPVAL"
: LUAU_INSN_A(insn) == LCT_REF ? "REF"
: LUAU_INSN_A(insn) == LCT_VAL ? "VAL"
: "",
LUAU_INSN_A(insn) == LCT_UPVAL ? 'U' : 'R', LUAU_INSN_B(insn));
break;
case LOP_JUMPIFEQK:
formatAppend(result, "JUMPIFEQK R%d K%d %+d\n", LUAU_INSN_A(insn), *code++, LUAU_INSN_D(insn));
formatAppend(result, "JUMPIFEQK R%d K%d L%d\n", LUAU_INSN_A(insn), *code++, targetLabel);
break;
case LOP_JUMPIFNOTEQK:
formatAppend(result, "JUMPIFNOTEQK R%d K%d %+d\n", LUAU_INSN_A(insn), *code++, LUAU_INSN_D(insn));
formatAppend(result, "JUMPIFNOTEQK R%d K%d L%d\n", LUAU_INSN_A(insn), *code++, targetLabel);
break;
default:
LUAU_ASSERT(!"Unsupported opcode");
}
return code;
}
std::string BytecodeBuilder::dumpCurrentFunction() const
@ -1715,10 +1751,8 @@ std::string BytecodeBuilder::dumpCurrentFunction() const
if ((dumpFlags & Dump_Code) == 0)
return std::string();
const uint32_t* code = insns.data();
const uint32_t* codeEnd = insns.data() + insns.size();
int lastLine = -1;
size_t nextRemark = 0;
std::string result;
@ -1738,28 +1772,54 @@ std::string BytecodeBuilder::dumpCurrentFunction() const
}
}
while (code != codeEnd)
std::vector<int> labels(insns.size(), -1);
// annotate valid jump targets with 0
for (size_t i = 0; i < insns.size();)
{
int target = getJumpTarget(insns[i], uint32_t(i));
if (target >= 0)
{
LUAU_ASSERT(size_t(target) < insns.size());
labels[target] = 0;
}
i += getOpLength(LuauOpcode(LUAU_INSN_OP(insns[i])));
LUAU_ASSERT(i <= insns.size());
}
int nextLabel = 0;
// compute label ids (sequential integers for all jump targets)
for (size_t i = 0; i < labels.size(); ++i)
if (labels[i] == 0)
labels[i] = nextLabel++;
for (size_t i = 0; i < insns.size();)
{
const uint32_t* code = &insns[i];
uint8_t op = LUAU_INSN_OP(*code);
if (op == LOP_PREPVARARGS)
{
// Don't emit function header in bytecode - it's used for call dispatching and doesn't contain "interesting" information
code++;
i++;
continue;
}
if (dumpFlags & Dump_Remarks)
{
const uint32_t* remark = debugRemarks.find(uint32_t(code - insns.data()));
if (remark)
formatAppend(result, "REMARK %s\n", debugRemarkBuffer.c_str() + *remark);
while (nextRemark < debugRemarks.size() && debugRemarks[nextRemark].first == i)
{
formatAppend(result, "REMARK %s\n", debugRemarkBuffer.c_str() + debugRemarks[nextRemark].second);
nextRemark++;
}
}
if (dumpFlags & Dump_Source)
{
int line = lines[code - insns.data()];
int line = lines[i];
if (line > 0 && line != lastLine)
{
@ -1770,11 +1830,17 @@ std::string BytecodeBuilder::dumpCurrentFunction() const
}
if (dumpFlags & Dump_Lines)
{
formatAppend(result, "%d: ", lines[code - insns.data()]);
}
formatAppend(result, "%d: ", lines[i]);
code = dumpInstruction(code, result);
if (labels[i] != -1)
formatAppend(result, "L%d: ", labels[i]);
int target = getJumpTarget(*code, uint32_t(i));
dumpInstruction(code, result, target >= 0 ? labels[target] : -1);
i += getOpLength(LuauOpcode(op));
LUAU_ASSERT(i <= insns.size());
}
return result;
@ -1784,11 +1850,11 @@ void BytecodeBuilder::setDumpSource(const std::string& source)
{
dumpSource.clear();
std::string::size_type pos = 0;
size_t pos = 0;
while (pos != std::string::npos)
{
std::string::size_type next = source.find('\n', pos);
size_t next = source.find('\n', pos);
if (next == std::string::npos)
{

File diff suppressed because it is too large Load diff

View file

@ -193,12 +193,16 @@ struct ConstantVisitor : AstVisitor
DenseHashMap<AstLocal*, Variable>& variables;
DenseHashMap<AstLocal*, Constant>& locals;
bool wasEmpty = false;
ConstantVisitor(
DenseHashMap<AstExpr*, Constant>& constants, DenseHashMap<AstLocal*, Variable>& variables, DenseHashMap<AstLocal*, Constant>& locals)
: constants(constants)
, variables(variables)
, locals(locals)
{
// since we do a single pass over the tree, if the initial state was empty we don't need to clear out old entries
wasEmpty = constants.empty() && locals.empty();
}
Constant analyze(AstExpr* node)
@ -314,12 +318,35 @@ struct ConstantVisitor : AstVisitor
LUAU_ASSERT(!"Unknown expression type");
}
if (result.type != Constant::Type_Unknown)
constants[node] = result;
recordConstant(constants, node, result);
return result;
}
template<typename T>
void recordConstant(DenseHashMap<T, Constant>& map, T key, const Constant& value)
{
if (value.type != Constant::Type_Unknown)
map[key] = value;
else if (wasEmpty)
;
else if (Constant* old = map.find(key))
old->type = Constant::Type_Unknown;
}
void recordValue(AstLocal* local, const Constant& value)
{
// note: we rely on trackValues to have been run before us
Variable* v = variables.find(local);
LUAU_ASSERT(v);
if (!v->written)
{
v->constant = (value.type != Constant::Type_Unknown);
recordConstant(locals, local, value);
}
}
bool visit(AstExpr* node) override
{
// note: we short-circuit the visitor traversal through any expression trees by returning false
@ -336,18 +363,7 @@ struct ConstantVisitor : AstVisitor
{
Constant arg = analyze(node->values.data[i]);
if (arg.type != Constant::Type_Unknown)
{
// note: we rely on trackValues to have been run before us
Variable* v = variables.find(node->vars.data[i]);
LUAU_ASSERT(v);
if (!v->written)
{
locals[node->vars.data[i]] = arg;
v->constant = true;
}
}
recordValue(node->vars.data[i], arg);
}
if (node->vars.size > node->values.size)
@ -361,15 +377,8 @@ struct ConstantVisitor : AstVisitor
{
for (size_t i = node->values.size; i < node->vars.size; ++i)
{
// note: we rely on trackValues to have been run before us
Variable* v = variables.find(node->vars.data[i]);
LUAU_ASSERT(v);
if (!v->written)
{
locals[node->vars.data[i]].type = Constant::Type_Nil;
v->constant = true;
}
Constant nil = {Constant::Type_Nil};
recordValue(node->vars.data[i], nil);
}
}
}

View file

@ -4,6 +4,8 @@
#include "Luau/Common.h"
#include "Luau/DenseHash.h"
#include <limits.h>
namespace Luau
{
namespace Compile
@ -11,10 +13,49 @@ namespace Compile
inline uint64_t parallelAddSat(uint64_t x, uint64_t y)
{
uint64_t s = x + y;
uint64_t m = s & 0x8080808080808080ull; // saturation mask
uint64_t r = x + y;
uint64_t s = r & 0x8080808080808080ull; // saturation mask
return (s ^ m) | (m - (m >> 7));
return (r ^ s) | (s - (s >> 7));
}
static uint64_t parallelMulSat(uint64_t a, int b)
{
int bs = (b < 127) ? b : 127;
// multiply every other value by b, yielding 14-bit products
uint64_t l = bs * ((a >> 0) & 0x007f007f007f007full);
uint64_t h = bs * ((a >> 8) & 0x007f007f007f007full);
// each product is 14-bit, so adding 32768-128 sets high bit iff the sum is 128 or larger without an overflow
uint64_t ls = l + 0x7f807f807f807f80ull;
uint64_t hs = h + 0x7f807f807f807f80ull;
// we now merge saturation bits as well as low 7-bits of each product into one
uint64_t s = (hs & 0x8000800080008000ull) | ((ls & 0x8000800080008000ull) >> 8);
uint64_t r = ((h & 0x007f007f007f007full) << 8) | (l & 0x007f007f007f007full);
// the low bits are now correct for values that didn't saturate, and we simply need to mask them if high bit is 1
return r | (s - (s >> 7));
}
inline bool getNumber(AstExpr* node, double& result)
{
// since constant model doesn't use constant folding atm, we perform the basic extraction that's sufficient to handle positive/negative literals
if (AstExprConstantNumber* ne = node->as<AstExprConstantNumber>())
{
result = ne->value;
return true;
}
if (AstExprUnary* ue = node->as<AstExprUnary>(); ue && ue->op == AstExprUnary::Minus)
if (AstExprConstantNumber* ne = ue->expr->as<AstExprConstantNumber>())
{
result = -ne->value;
return true;
}
return false;
}
struct Cost
@ -46,6 +87,13 @@ struct Cost
return *this;
}
Cost operator*(int other) const
{
Cost result;
result.model = parallelMulSat(model, other);
return result;
}
static Cost fold(const Cost& x, const Cost& y)
{
uint64_t newmodel = parallelAddSat(x.model, y.model);
@ -173,6 +221,16 @@ struct CostVisitor : AstVisitor
*i = 0;
}
void loop(AstStatBlock* body, Cost iterCost, int factor = 3)
{
Cost before = result;
result = Cost();
body->visit(this);
result = before + (result + iterCost) * factor;
}
bool visit(AstExpr* node) override
{
// note: we short-circuit the visitor traversal through any expression trees by returning false
@ -182,12 +240,52 @@ struct CostVisitor : AstVisitor
return false;
}
bool visit(AstStatFor* node) override
{
result += model(node->from);
result += model(node->to);
if (node->step)
result += model(node->step);
int tripCount = -1;
double from, to, step = 1;
if (getNumber(node->from, from) && getNumber(node->to, to) && (!node->step || getNumber(node->step, step)))
tripCount = getTripCount(from, to, step);
loop(node->body, 1, tripCount < 0 ? 3 : tripCount);
return false;
}
bool visit(AstStatForIn* node) override
{
for (size_t i = 0; i < node->values.size; ++i)
result += model(node->values.data[i]);
loop(node->body, 1);
return false;
}
bool visit(AstStatWhile* node) override
{
Cost condition = model(node->condition);
loop(node->body, condition);
return false;
}
bool visit(AstStatRepeat* node) override
{
Cost condition = model(node->condition);
loop(node->body, condition);
return false;
}
bool visit(AstStat* node) override
{
if (node->is<AstStatIf>())
result += 2;
else if (node->is<AstStatWhile>() || node->is<AstStatRepeat>() || node->is<AstStatFor>() || node->is<AstStatForIn>())
result += 2;
else if (node->is<AstStatBreak>() || node->is<AstStatContinue>())
result += 1;
@ -249,10 +347,26 @@ int computeCost(uint64_t model, const bool* varsConst, size_t varCount)
return cost;
for (size_t i = 0; i < varCount && i < 7; ++i)
cost -= int((model >> (8 * i + 8)) & 0x7f) * varsConst[i];
cost -= int((model >> (i * 8 + 8)) & 0x7f) * varsConst[i];
return cost;
}
int getTripCount(double from, double to, double step)
{
// we compute trip count in integers because that way we know that the loop math (repeated addition) is precise
int fromi = (from >= -32767 && from <= 32767 && double(int(from)) == from) ? int(from) : INT_MIN;
int toi = (to >= -32767 && to <= 32767 && double(int(to)) == to) ? int(to) : INT_MIN;
int stepi = (step >= -32767 && step <= 32767 && double(int(step)) == step) ? int(step) : INT_MIN;
if (fromi == INT_MIN || toi == INT_MIN || stepi == INT_MIN || stepi == 0)
return -1;
if ((stepi < 0 && toi > fromi) || (stepi > 0 && toi < fromi))
return 0;
return (toi - fromi) / stepi + 1;
}
} // namespace Compile
} // namespace Luau

View file

@ -14,5 +14,8 @@ uint64_t modelCost(AstNode* root, AstLocal* const* vars, size_t varCount);
// cost is computed as B - sum(Di * Ci), where B is baseline cost, Di is the discount for each variable and Ci is 1 when variable #i is constant
int computeCost(uint64_t model, const bool* varsConst, size_t varCount);
// get loop trip count or -1 if we can't compute it precisely
int getTripCount(double from, double to, double step);
} // namespace Compile
} // namespace Luau

View file

@ -19,6 +19,10 @@ ANALYSIS_SOURCES=$(wildcard Analysis/src/*.cpp)
ANALYSIS_OBJECTS=$(ANALYSIS_SOURCES:%=$(BUILD)/%.o)
ANALYSIS_TARGET=$(BUILD)/libluauanalysis.a
CODEGEN_SOURCES=$(wildcard CodeGen/src/*.cpp)
CODEGEN_OBJECTS=$(CODEGEN_SOURCES:%=$(BUILD)/%.o)
CODEGEN_TARGET=$(BUILD)/libluaucodegen.a
VM_SOURCES=$(wildcard VM/src/*.cpp)
VM_OBJECTS=$(VM_SOURCES:%=$(BUILD)/%.o)
VM_TARGET=$(BUILD)/libluauvm.a
@ -47,7 +51,7 @@ ifneq ($(flags),)
TESTS_ARGS+=--fflags=$(flags)
endif
OBJECTS=$(AST_OBJECTS) $(COMPILER_OBJECTS) $(ANALYSIS_OBJECTS) $(VM_OBJECTS) $(ISOCLINE_OBJECTS) $(TESTS_OBJECTS) $(CLI_OBJECTS) $(FUZZ_OBJECTS)
OBJECTS=$(AST_OBJECTS) $(COMPILER_OBJECTS) $(ANALYSIS_OBJECTS) $(CODEGEN_OBJECTS) $(VM_OBJECTS) $(ISOCLINE_OBJECTS) $(TESTS_OBJECTS) $(CLI_OBJECTS) $(FUZZ_OBJECTS)
# common flags
CXXFLAGS=-g -Wall
@ -90,15 +94,16 @@ ifeq ($(config),fuzz)
endif
# target-specific flags
$(AST_OBJECTS): CXXFLAGS+=-std=c++17 -IAst/include
$(COMPILER_OBJECTS): CXXFLAGS+=-std=c++17 -ICompiler/include -IAst/include
$(ANALYSIS_OBJECTS): CXXFLAGS+=-std=c++17 -IAst/include -IAnalysis/include
$(VM_OBJECTS): CXXFLAGS+=-std=c++11 -IVM/include
$(AST_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include
$(COMPILER_OBJECTS): CXXFLAGS+=-std=c++17 -ICompiler/include -ICommon/include -IAst/include
$(ANALYSIS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -IAnalysis/include
$(CODEGEN_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -ICodeGen/include
$(VM_OBJECTS): CXXFLAGS+=-std=c++11 -ICommon/include -IVM/include
$(ISOCLINE_OBJECTS): CXXFLAGS+=-Wno-unused-function -Iextern/isocline/include
$(TESTS_OBJECTS): CXXFLAGS+=-std=c++17 -IAst/include -ICompiler/include -IAnalysis/include -IVM/include -ICLI -Iextern
$(REPL_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -IAst/include -ICompiler/include -IVM/include -Iextern -Iextern/isocline/include
$(ANALYZE_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -IAst/include -IAnalysis/include -Iextern
$(FUZZ_OBJECTS): CXXFLAGS+=-std=c++17 -IAst/include -ICompiler/include -IAnalysis/include -IVM/include
$(TESTS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -ICodeGen/include -IVM/include -ICLI -Iextern
$(REPL_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IVM/include -Iextern -Iextern/isocline/include
$(ANALYZE_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -IAnalysis/include -Iextern
$(FUZZ_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -IVM/include
$(TESTS_TARGET): LDFLAGS+=-lpthread
$(REPL_CLI_TARGET): LDFLAGS+=-lpthread
@ -126,7 +131,7 @@ coverage: $(TESTS_TARGET)
llvm-cov export -ignore-filename-regex=\(tests\|extern\|CLI\)/.* -format lcov --instr-profile default.profdata build/coverage/luau-tests >coverage.info
format:
find . -name '*.h' -or -name '*.cpp' | xargs clang-format -i
find . -name '*.h' -or -name '*.cpp' | xargs clang-format-11 -i
luau-size: luau
nm --print-size --demangle luau | grep ' t void luau_execute<false>' | awk -F ' ' '{sum += strtonum("0x" $$2)} END {print sum " interpreter" }'
@ -140,7 +145,7 @@ luau-analyze: $(ANALYZE_CLI_TARGET)
ln -fs $^ $@
# executable targets
$(TESTS_TARGET): $(TESTS_OBJECTS) $(ANALYSIS_TARGET) $(COMPILER_TARGET) $(AST_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET)
$(TESTS_TARGET): $(TESTS_OBJECTS) $(ANALYSIS_TARGET) $(COMPILER_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET)
$(REPL_CLI_TARGET): $(REPL_CLI_OBJECTS) $(COMPILER_TARGET) $(AST_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET)
$(ANALYZE_CLI_TARGET): $(ANALYZE_CLI_OBJECTS) $(ANALYSIS_TARGET) $(AST_TARGET)
@ -158,10 +163,11 @@ fuzz-prototest: $(BUILD)/fuzz/prototest.cpp.o $(BUILD)/fuzz/protoprint.cpp.o $(B
$(AST_TARGET): $(AST_OBJECTS)
$(COMPILER_TARGET): $(COMPILER_OBJECTS)
$(ANALYSIS_TARGET): $(ANALYSIS_OBJECTS)
$(CODEGEN_TARGET): $(CODEGEN_OBJECTS)
$(VM_TARGET): $(VM_OBJECTS)
$(ISOCLINE_TARGET): $(ISOCLINE_OBJECTS)
$(AST_TARGET) $(COMPILER_TARGET) $(ANALYSIS_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET):
$(AST_TARGET) $(COMPILER_TARGET) $(ANALYSIS_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET):
ar rcs $@ $^
# object file targets

View file

@ -1,7 +1,15 @@
# Luau.Common Sources
# Note: Until 3.19, INTERFACE targets couldn't have SOURCES property set
if(NOT ${CMAKE_VERSION} VERSION_LESS "3.19")
target_sources(Luau.Common PRIVATE
Common/include/Luau/Common.h
Common/include/Luau/Bytecode.h
)
endif()
# Luau.Ast Sources
target_sources(Luau.Ast PRIVATE
Ast/include/Luau/Ast.h
Ast/include/Luau/Common.h
Ast/include/Luau/Confusables.h
Ast/include/Luau/DenseHash.h
Ast/include/Luau/Lexer.h
@ -23,7 +31,6 @@ target_sources(Luau.Ast PRIVATE
# Luau.Compiler Sources
target_sources(Luau.Compiler PRIVATE
Compiler/include/Luau/Bytecode.h
Compiler/include/Luau/BytecodeBuilder.h
Compiler/include/Luau/Compiler.h
Compiler/include/luacode.h
@ -43,6 +50,17 @@ target_sources(Luau.Compiler PRIVATE
Compiler/src/ValueTracking.h
)
# Luau.CodeGen Sources
target_sources(Luau.CodeGen PRIVATE
CodeGen/include/Luau/AssemblyBuilderX64.h
CodeGen/include/Luau/Condition.h
CodeGen/include/Luau/Label.h
CodeGen/include/Luau/OperandX64.h
CodeGen/include/Luau/RegisterX64.h
CodeGen/src/AssemblyBuilderX64.cpp
)
# Luau.Analysis Sources
target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/AstQuery.h
@ -54,6 +72,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/Error.h
Analysis/include/Luau/FileResolver.h
Analysis/include/Luau/Frontend.h
Analysis/include/Luau/Instantiation.h
Analysis/include/Luau/IostreamHelpers.h
Analysis/include/Luau/JsonEncoder.h
Analysis/include/Luau/Linter.h
@ -73,6 +92,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/ToString.h
Analysis/include/Luau/Transpiler.h
Analysis/include/Luau/TxnLog.h
Analysis/include/Luau/TypeArena.h
Analysis/include/Luau/TypeAttach.h
Analysis/include/Luau/TypedAllocator.h
Analysis/include/Luau/TypeInfer.h
@ -92,6 +112,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/Clone.cpp
Analysis/src/Error.cpp
Analysis/src/Frontend.cpp
Analysis/src/Instantiation.cpp
Analysis/src/IostreamHelpers.cpp
Analysis/src/JsonEncoder.cpp
Analysis/src/Linter.cpp
@ -108,6 +129,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/ToString.cpp
Analysis/src/Transpiler.cpp
Analysis/src/TxnLog.cpp
Analysis/src/TypeArena.cpp
Analysis/src/TypeAttach.cpp
Analysis/src/TypedAllocator.cpp
Analysis/src/TypeInfer.cpp
@ -220,8 +242,8 @@ if(TARGET Luau.UnitTest)
tests/Autocomplete.test.cpp
tests/BuiltinDefinitions.test.cpp
tests/Compiler.test.cpp
tests/CostModel.test.cpp
tests/Config.test.cpp
tests/CostModel.test.cpp
tests/Error.test.cpp
tests/Frontend.test.cpp
tests/JsonEncoder.test.cpp
@ -232,6 +254,7 @@ if(TARGET Luau.UnitTest)
tests/Normalize.test.cpp
tests/Parser.test.cpp
tests/RequireTracer.test.cpp
tests/RuntimeLimits.test.cpp
tests/StringUtils.test.cpp
tests/Symbol.test.cpp
tests/ToDot.test.cpp
@ -263,6 +286,8 @@ if(TARGET Luau.UnitTest)
tests/TypePack.test.cpp
tests/TypeVar.test.cpp
tests/Variant.test.cpp
tests/VisitTypeVar.test.cpp
tests/AssemblyBuilderX64.test.cpp
tests/main.cpp)
endif()

View file

@ -148,6 +148,7 @@ LUA_API const char* lua_tostringatom(lua_State* L, int idx, int* atom);
LUA_API const char* lua_namecallatom(lua_State* L, int* atom);
LUA_API int lua_objlen(lua_State* L, int idx);
LUA_API lua_CFunction lua_tocfunction(lua_State* L, int idx);
LUA_API void* lua_tolightuserdata(lua_State* L, int idx);
LUA_API void* lua_touserdata(lua_State* L, int idx);
LUA_API void* lua_touserdatatagged(lua_State* L, int idx, int tag);
LUA_API int lua_userdatatag(lua_State* L, int idx);
@ -299,7 +300,7 @@ LUA_API uintptr_t lua_encodepointer(lua_State* L, uintptr_t p);
LUA_API double lua_clock();
LUA_API void lua_setuserdatadtor(lua_State* L, int tag, void (*dtor)(void*));
LUA_API void lua_setuserdatadtor(lua_State* L, int tag, void (*dtor)(lua_State*, void*));
LUA_API void lua_clonefunction(lua_State* L, int idx);

View file

@ -478,18 +478,21 @@ lua_CFunction lua_tocfunction(lua_State* L, int idx)
return (!iscfunction(o)) ? NULL : cast_to(lua_CFunction, clvalue(o)->c.f);
}
void* lua_tolightuserdata(lua_State* L, int idx)
{
StkId o = index2addr(L, idx);
return (!ttislightuserdata(o)) ? NULL : pvalue(o);
}
void* lua_touserdata(lua_State* L, int idx)
{
StkId o = index2addr(L, idx);
switch (ttype(o))
{
case LUA_TUSERDATA:
return uvalue(o)->data;
case LUA_TLIGHTUSERDATA:
return pvalue(o);
default:
return NULL;
}
if (ttisuserdata(o))
return uvalue(o)->data;
else if (ttislightuserdata(o))
return pvalue(o);
else
return NULL;
}
void* lua_touserdatatagged(lua_State* L, int idx, int tag)
@ -524,8 +527,9 @@ const void* lua_topointer(lua_State* L, int idx)
case LUA_TTHREAD:
return thvalue(o);
case LUA_TUSERDATA:
return uvalue(o)->data;
case LUA_TLIGHTUSERDATA:
return lua_touserdata(L, idx);
return pvalue(o);
default:
return NULL;
}
@ -1270,7 +1274,7 @@ const char* lua_setupvalue(lua_State* L, int funcindex, int n)
L->top--;
setobj(L, val, L->top);
luaC_barrier(L, clvalue(fi), L->top);
luaC_upvalbarrier(L, NULL, val);
luaC_upvalbarrier(L, cast_to(UpVal*, NULL), val);
}
return name;
}
@ -1323,7 +1327,7 @@ void lua_unref(lua_State* L, int ref)
return;
}
void lua_setuserdatadtor(lua_State* L, int tag, void (*dtor)(void*))
void lua_setuserdatadtor(lua_State* L, int tag, void (*dtor)(lua_State*, void*))
{
api_check(L, unsigned(tag) < LUA_UTAG_LIMIT);
L->global->udatagc[tag] = dtor;

View file

@ -1003,7 +1003,7 @@ static int luauF_tunpack(lua_State* L, StkId res, TValue* arg0, int nresults, St
else if (nparams == 3 && ttisnumber(args) && ttisnumber(args + 1) && nvalue(args) == 1.0)
n = int(nvalue(args + 1));
if (n >= 0 && n <= t->sizearray && cast_int(L->stack_last - res) >= n)
if (n >= 0 && n <= t->sizearray && cast_int(L->stack_last - res) >= n && n + nparams <= LUAI_MAXCSTACK)
{
TValue* array = t->array;
for (int i = 0; i < n; ++i)

View file

@ -3,7 +3,4 @@
#pragma once
// This is a forwarding header for Luau bytecode definition
// Luau consists of several components, including compiler (Ast, Compiler) and VM (virtual machine)
// These components are fully independent, but they both need the bytecode format defined in this header
// so it needs to be shared.
#include "../../Compiler/include/Luau/Bytecode.h"
#include "Luau/Bytecode.h"

View file

@ -7,11 +7,7 @@
#include "luaconf.h"
// This is a forwarding header for Luau common definition (assertions, flags)
// Luau consists of several components, including compiler (Ast, Compiler) and VM (virtual machine)
// These components are fully independent, but they need a common set of utilities defined in this header
// so it needs to be shared.
#include "../../Ast/include/Luau/Common.h"
#include "Luau/Common.h"
typedef LUAI_USER_ALIGNMENT_T L_Umaxalign;

View file

@ -213,6 +213,14 @@ CallInfo* luaD_growCI(lua_State* L)
return ++L->ci;
}
void luaD_checkCstack(lua_State *L)
{
if (L->nCcalls == LUAI_MAXCCALLS)
luaG_runerror(L, "C stack overflow");
else if (L->nCcalls >= (LUAI_MAXCCALLS + (LUAI_MAXCCALLS >> 3)))
luaD_throw(L, LUA_ERRERR); /* error while handling stack error */
}
/*
** Call a function (C or Lua). The function to be called is at *func.
** The arguments are on the stack, right after the function.
@ -222,12 +230,8 @@ CallInfo* luaD_growCI(lua_State* L)
void luaD_call(lua_State* L, StkId func, int nResults)
{
if (++L->nCcalls >= LUAI_MAXCCALLS)
{
if (L->nCcalls == LUAI_MAXCCALLS)
luaG_runerror(L, "C stack overflow");
else if (L->nCcalls >= (LUAI_MAXCCALLS + (LUAI_MAXCCALLS >> 3)))
luaD_throw(L, LUA_ERRERR); /* error while handing stack error */
}
luaD_checkCstack(L);
if (luau_precall(L, func, nResults) == PCRLUA)
{ /* is a Lua function? */
L->ci->flags |= LUA_CALLINFO_RETURN; /* luau_execute will stop after returning from the stack frame */
@ -241,6 +245,7 @@ void luaD_call(lua_State* L, StkId func, int nResults)
if (!oldactive)
resetbit(L->stackstate, THREAD_ACTIVEBIT);
}
L->nCcalls--;
luaC_checkGC(L);
}

View file

@ -49,6 +49,7 @@ LUAI_FUNC int luaD_pcall(lua_State* L, Pfunc func, void* u, ptrdiff_t oldtop, pt
LUAI_FUNC void luaD_reallocCI(lua_State* L, int newsize);
LUAI_FUNC void luaD_reallocstack(lua_State* L, int newsize);
LUAI_FUNC void luaD_growstack(lua_State* L, int n);
LUAI_FUNC void luaD_checkCstack(lua_State* L);
LUAI_FUNC l_noret luaD_throw(lua_State* L, int errcode);
LUAI_FUNC int luaD_rawrunprotected(lua_State* L, Pfunc f, void* ud);

View file

@ -120,7 +120,7 @@
#define luaC_upvalbarrier(L, uv, tv) \
{ \
if (iscollectable(tv) && iswhite(gcvalue(tv)) && (!(uv) || ((UpVal*)uv)->v != &((UpVal*)uv)->u.value)) \
if (iscollectable(tv) && iswhite(gcvalue(tv)) && (!(uv) || (uv)->v != &(uv)->u.value)) \
luaC_barrierupval(L, gcvalue(tv)); \
}

View file

@ -200,7 +200,7 @@ typedef struct global_State
uint64_t rngstate; /* PCG random number generator state */
uint64_t ptrenckey[4]; /* pointer encoding key for display */
void (*udatagc[LUA_UTAG_LIMIT])(void*); /* for each userdata tag, a gc callback to be called immediately before freeing memory */
void (*udatagc[LUA_UTAG_LIMIT])(lua_State*, void*); /* for each userdata tag, a gc callback to be called immediately before freeing memory */
lua_Callbacks cb;

View file

@ -33,9 +33,6 @@
#include <string.h>
LUAU_FASTFLAGVARIABLE(LuauTableRehashRework, false)
LUAU_FASTFLAGVARIABLE(LuauTableNewBoundary2, false)
// max size of both array and hash part is 2^MAXBITS
#define MAXBITS 26
#define MAXSIZE (1 << MAXBITS)
@ -400,16 +397,9 @@ static void resize(lua_State* L, Table* t, int nasize, int nhsize)
{
if (!ttisnil(&t->array[i]))
{
if (FFlag::LuauTableRehashRework)
{
TValue ok;
setnvalue(&ok, cast_num(i + 1));
setobjt2t(L, newkey(L, t, &ok), &t->array[i]);
}
else
{
setobjt2t(L, luaH_setnum(L, t, i + 1), &t->array[i]);
}
TValue ok;
setnvalue(&ok, cast_num(i + 1));
setobjt2t(L, newkey(L, t, &ok), &t->array[i]);
}
}
/* shrink array */
@ -418,30 +408,14 @@ static void resize(lua_State* L, Table* t, int nasize, int nhsize)
/* used for the migration check at the end */
TValue* anew = t->array;
/* re-insert elements from hash part */
if (FFlag::LuauTableRehashRework)
for (int i = twoto(oldhsize) - 1; i >= 0; i--)
{
for (int i = twoto(oldhsize) - 1; i >= 0; i--)
LuaNode* old = nold + i;
if (!ttisnil(gval(old)))
{
LuaNode* old = nold + i;
if (!ttisnil(gval(old)))
{
TValue ok;
getnodekey(L, &ok, old);
setobjt2t(L, arrayornewkey(L, t, &ok), gval(old));
}
}
}
else
{
for (int i = twoto(oldhsize) - 1; i >= 0; i--)
{
LuaNode* old = nold + i;
if (!ttisnil(gval(old)))
{
TValue ok;
getnodekey(L, &ok, old);
setobjt2t(L, luaH_set(L, t, &ok), gval(old));
}
TValue ok;
getnodekey(L, &ok, old);
setobjt2t(L, arrayornewkey(L, t, &ok), gval(old));
}
}
@ -455,7 +429,6 @@ static void resize(lua_State* L, Table* t, int nasize, int nhsize)
static int adjustasize(Table* t, int size, const TValue* ek)
{
LUAU_ASSERT(FFlag::LuauTableNewBoundary2);
bool tbound = t->node != dummynode || size < t->sizearray;
int ekindex = ek && ttisnumber(ek) ? arrayindex(nvalue(ek)) : -1;
/* move the array size up until the boundary is guaranteed to be inside the array part */
@ -467,7 +440,7 @@ static int adjustasize(Table* t, int size, const TValue* ek)
void luaH_resizearray(lua_State* L, Table* t, int nasize)
{
int nsize = (t->node == dummynode) ? 0 : sizenode(t);
int asize = FFlag::LuauTableNewBoundary2 ? adjustasize(t, nasize, NULL) : nasize;
int asize = adjustasize(t, nasize, NULL);
resize(L, t, asize, nsize);
}
@ -492,8 +465,7 @@ static void rehash(lua_State* L, Table* t, const TValue* ek)
int na = computesizes(nums, &nasize);
int nh = totaluse - na;
/* enforce the boundary invariant; for performance, only do hash lookups if we must */
if (FFlag::LuauTableNewBoundary2)
nasize = adjustasize(t, nasize, ek);
nasize = adjustasize(t, nasize, ek);
/* resize the table to new computed sizes */
resize(L, t, nasize, nh);
}
@ -555,11 +527,11 @@ static LuaNode* getfreepos(Table* t)
static TValue* newkey(lua_State* L, Table* t, const TValue* key)
{
/* enforce boundary invariant */
if (FFlag::LuauTableNewBoundary2 && ttisnumber(key) && nvalue(key) == t->sizearray + 1)
if (ttisnumber(key) && nvalue(key) == t->sizearray + 1)
{
rehash(L, t, key); /* grow table */
// after rehash, numeric keys might be located in the new array part, but won't be found in the node part
/* after rehash, numeric keys might be located in the new array part, but won't be found in the node part */
return arrayornewkey(L, t, key);
}
@ -571,15 +543,8 @@ static TValue* newkey(lua_State* L, Table* t, const TValue* key)
{ /* cannot find a free place? */
rehash(L, t, key); /* grow table */
if (!FFlag::LuauTableRehashRework)
{
return luaH_set(L, t, key); /* re-insert key into grown table */
}
else
{
// after rehash, numeric keys might be located in the new array part, but won't be found in the node part
return arrayornewkey(L, t, key);
}
/* after rehash, numeric keys might be located in the new array part, but won't be found in the node part */
return arrayornewkey(L, t, key);
}
LUAU_ASSERT(n != dummynode);
TValue mk;
@ -744,37 +709,6 @@ TValue* luaH_setstr(lua_State* L, Table* t, TString* key)
}
}
static LUAU_NOINLINE int unbound_search(Table* t, unsigned int j)
{
LUAU_ASSERT(!FFlag::LuauTableNewBoundary2);
unsigned int i = j; /* i is zero or a present index */
j++;
/* find `i' and `j' such that i is present and j is not */
while (!ttisnil(luaH_getnum(t, j)))
{
i = j;
j *= 2;
if (j > cast_to(unsigned int, INT_MAX))
{ /* overflow? */
/* table was built with bad purposes: resort to linear search */
i = 1;
while (!ttisnil(luaH_getnum(t, i)))
i++;
return i - 1;
}
}
/* now do a binary search between them */
while (j - i > 1)
{
unsigned int m = (i + j) / 2;
if (ttisnil(luaH_getnum(t, m)))
j = m;
else
i = m;
}
return i;
}
static int updateaboundary(Table* t, int boundary)
{
if (boundary < t->sizearray && ttisnil(&t->array[boundary - 1]))
@ -831,17 +765,12 @@ int luaH_getn(Table* t)
maybesetaboundary(t, boundary);
return boundary;
}
else if (FFlag::LuauTableNewBoundary2)
else
{
/* validate boundary invariant */
LUAU_ASSERT(t->node == dummynode || ttisnil(luaH_getnum(t, j + 1)));
return j;
}
/* else must find a boundary in hash part */
else if (t->node == dummynode) /* hash part is empty? */
return j; /* that is easy... */
else
return unbound_search(t, j);
}
Table* luaH_clone(lua_State* L, Table* tt)

View file

@ -10,10 +10,6 @@
#include "ldebug.h"
#include "lvm.h"
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauTableMoveTelemetry2, false)
void (*lua_table_move_telemetry)(lua_State* L, int f, int e, int t, int nf, int nt);
static int foreachi(lua_State* L)
{
luaL_checktype(L, 1, LUA_TTABLE);
@ -199,29 +195,6 @@ static int tmove(lua_State* L)
int tt = !lua_isnoneornil(L, 5) ? 5 : 1; /* destination table */
luaL_checktype(L, tt, LUA_TTABLE);
void (*telemetrycb)(lua_State * L, int f, int e, int t, int nf, int nt) = lua_table_move_telemetry;
if (DFFlag::LuauTableMoveTelemetry2 && telemetrycb && e >= f)
{
int nf = lua_objlen(L, 1);
int nt = lua_objlen(L, tt);
bool report = false;
// source index range must be in bounds in source table unless the table is empty (permits 1..#t moves)
if (!(f == 1 || (f >= 1 && f <= nf)))
report = true;
if (!(e == nf || (e >= 1 && e <= nf)))
report = true;
// destination index must be in bounds in dest table or be exactly at the first empty element (permits concats)
if (!(t == nt + 1 || (t >= 1 && t <= nt)))
report = true;
if (report)
telemetrycb(L, f, e, t, nf, nt);
}
if (e >= f)
{ /* otherwise, nothing to move */
luaL_argcheck(L, f > 0 || e < INT_MAX + f, 3, "too many elements to move");

View file

@ -37,6 +37,8 @@ const char* const luaT_eventname[] = {
"__newindex",
"__mode",
"__namecall",
"__call",
"__iter",
"__eq",
@ -54,13 +56,13 @@ const char* const luaT_eventname[] = {
"__lt",
"__le",
"__concat",
"__call",
"__type",
};
// clang-format on
static_assert(sizeof(luaT_typenames) / sizeof(luaT_typenames[0]) == LUA_T_COUNT, "luaT_typenames size mismatch");
static_assert(sizeof(luaT_eventname) / sizeof(luaT_eventname[0]) == TM_N, "luaT_eventname size mismatch");
static_assert(TM_EQ < 8, "fasttm optimization stores a bitfield with metamethods in a byte");
void luaT_init(lua_State* L)
{

View file

@ -16,6 +16,8 @@ typedef enum
TM_NEWINDEX,
TM_MODE,
TM_NAMECALL,
TM_CALL,
TM_ITER,
TM_EQ, /* last tag method with `fast' access */
@ -33,7 +35,6 @@ typedef enum
TM_LT,
TM_LE,
TM_CONCAT,
TM_CALL,
TM_TYPE,
TM_N /* number of elements in the enum */

View file

@ -22,14 +22,21 @@ Udata* luaU_newudata(lua_State* L, size_t s, int tag)
void luaU_freeudata(lua_State* L, Udata* u, lua_Page* page)
{
void (*dtor)(void*) = nullptr;
if (u->tag < LUA_UTAG_LIMIT)
{
void (*dtor)(lua_State*, void*) = nullptr;
dtor = L->global->udatagc[u->tag];
if (dtor)
dtor(L, u->data);
}
else if (u->tag == UTAG_IDTOR)
{
void (*dtor)(void*) = nullptr;
memcpy(&dtor, &u->data + u->len - sizeof(dtor), sizeof(dtor));
if (dtor)
dtor(u->data);
}
if (dtor)
dtor(u->data);
luaM_freegco(L, u, sizeudata(u->len), u->memcat, page);
}

View file

@ -16,7 +16,7 @@
#include <string.h>
LUAU_FASTFLAG(LuauTableNewBoundary2)
LUAU_FASTFLAGVARIABLE(LuauIter, false)
// Disable c99-designator to avoid the warning in CGOTO dispatch table
#ifdef __clang__
@ -110,7 +110,7 @@ LUAU_FASTFLAG(LuauTableNewBoundary2)
VM_DISPATCH_OP(LOP_FORGLOOP_NEXT), VM_DISPATCH_OP(LOP_GETVARARGS), VM_DISPATCH_OP(LOP_DUPCLOSURE), VM_DISPATCH_OP(LOP_PREPVARARGS), \
VM_DISPATCH_OP(LOP_LOADKX), VM_DISPATCH_OP(LOP_JUMPX), VM_DISPATCH_OP(LOP_FASTCALL), VM_DISPATCH_OP(LOP_COVERAGE), \
VM_DISPATCH_OP(LOP_CAPTURE), VM_DISPATCH_OP(LOP_JUMPIFEQK), VM_DISPATCH_OP(LOP_JUMPIFNOTEQK), VM_DISPATCH_OP(LOP_FASTCALL1), \
VM_DISPATCH_OP(LOP_FASTCALL2), VM_DISPATCH_OP(LOP_FASTCALL2K),
VM_DISPATCH_OP(LOP_FASTCALL2), VM_DISPATCH_OP(LOP_FASTCALL2K), VM_DISPATCH_OP(LOP_FORGPREP),
#if defined(__GNUC__) || defined(__clang__)
#define VM_USE_CGOTO 1
@ -150,8 +150,9 @@ LUAU_NOINLINE static void luau_prepareFORN(lua_State* L, StkId plimit, StkId pst
LUAU_NOINLINE static bool luau_loopFORG(lua_State* L, int a, int c)
{
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
StkId ra = &L->base[a];
LUAU_ASSERT(ra + 6 <= L->top);
LUAU_ASSERT(ra + 3 <= L->top);
setobjs2s(L, ra + 3 + 2, ra + 2);
setobjs2s(L, ra + 3 + 1, ra + 1);
@ -180,7 +181,7 @@ LUAU_NOINLINE static void luau_callTM(lua_State* L, int nparams, int res)
++L->nCcalls;
if (L->nCcalls >= LUAI_MAXCCALLS)
luaG_runerror(L, "C stack overflow");
luaD_checkCstack(L);
luaD_checkstack(L, LUA_MINSTACK);
@ -693,7 +694,7 @@ static void luau_execute(lua_State* L)
}
else
{
// slow-path, may invoke Lua calls via __index metamethod
// slow-path, may invoke Lua calls via __newindex metamethod
L->cachedslot = slot;
VM_PROTECT(luaV_settable(L, rb, kv, ra));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
@ -703,7 +704,7 @@ static void luau_execute(lua_State* L)
}
else
{
// fast-path: user data with C __index TM
// fast-path: user data with C __newindex TM
const TValue* fn = 0;
if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_NEWINDEX)) && ttisfunction(fn) && clvalue(fn)->isC)
{
@ -724,7 +725,7 @@ static void luau_execute(lua_State* L)
}
else
{
// slow-path, may invoke Lua calls via __index metamethod
// slow-path, may invoke Lua calls via __newindex metamethod
VM_PROTECT(luaV_settable(L, rb, kv, ra));
VM_NEXT();
}
@ -2204,20 +2205,149 @@ static void luau_execute(lua_State* L)
}
}
VM_CASE(LOP_FORGPREP)
{
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
if (ttisfunction(ra))
{
/* will be called during FORGLOOP */
}
else if (FFlag::LuauIter)
{
Table* mt = ttistable(ra) ? hvalue(ra)->metatable : ttisuserdata(ra) ? uvalue(ra)->metatable : cast_to(Table*, NULL);
if (const TValue* fn = fasttm(L, mt, TM_ITER))
{
setobj2s(L, ra + 1, ra);
setobj2s(L, ra, fn);
L->top = ra + 2; /* func + self arg */
LUAU_ASSERT(L->top <= L->stack_last);
VM_PROTECT(luaD_call(L, ra, 3));
L->top = L->ci->top;
}
else if (fasttm(L, mt, TM_CALL))
{
/* table or userdata with __call, will be called during FORGLOOP */
/* TODO: we might be able to stop supporting this depending on whether it's used in practice */
}
else if (ttistable(ra))
{
/* set up registers for builtin iteration */
setobj2s(L, ra + 1, ra);
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)));
setnilvalue(ra);
}
else
{
VM_PROTECT(luaG_typeerror(L, ra, "iterate over"));
}
}
pc += LUAU_INSN_D(insn);
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
VM_NEXT();
}
VM_CASE(LOP_FORGLOOP)
{
VM_INTERRUPT();
Instruction insn = *pc++;
StkId ra = VM_REG(LUAU_INSN_A(insn));
uint32_t aux = *pc;
// note: this is a slow generic path, fast-path is FORGLOOP_INEXT/NEXT
bool stop;
VM_PROTECT(stop = luau_loopFORG(L, LUAU_INSN_A(insn), aux));
if (!FFlag::LuauIter)
{
bool stop;
VM_PROTECT(stop = luau_loopFORG(L, LUAU_INSN_A(insn), aux));
// note that we need to increment pc by 1 to exit the loop since we need to skip over aux
pc += stop ? 1 : LUAU_INSN_D(insn);
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
VM_NEXT();
// note that we need to increment pc by 1 to exit the loop since we need to skip over aux
pc += stop ? 1 : LUAU_INSN_D(insn);
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
VM_NEXT();
}
// fast-path: builtin table iteration
if (ttisnil(ra) && ttistable(ra + 1) && ttislightuserdata(ra + 2))
{
Table* h = hvalue(ra + 1);
int index = int(reinterpret_cast<uintptr_t>(pvalue(ra + 2)));
int sizearray = h->sizearray;
int sizenode = 1 << h->lsizenode;
// clear extra variables since we might have more than two
if (LUAU_UNLIKELY(aux > 2))
for (int i = 2; i < int(aux); ++i)
setnilvalue(ra + 3 + i);
// first we advance index through the array portion
while (unsigned(index) < unsigned(sizearray))
{
if (!ttisnil(&h->array[index]))
{
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)));
setnvalue(ra + 3, double(index + 1));
setobj2s(L, ra + 4, &h->array[index]);
pc += LUAU_INSN_D(insn);
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
VM_NEXT();
}
index++;
}
// then we advance index through the hash portion
while (unsigned(index - sizearray) < unsigned(sizenode))
{
LuaNode* n = &h->node[index - sizearray];
if (!ttisnil(gval(n)))
{
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)));
getnodekey(L, ra + 3, n);
setobj2s(L, ra + 4, gval(n));
pc += LUAU_INSN_D(insn);
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
VM_NEXT();
}
index++;
}
// fallthrough to exit
pc++;
VM_NEXT();
}
else
{
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
setobjs2s(L, ra + 3 + 2, ra + 2);
setobjs2s(L, ra + 3 + 1, ra + 1);
setobjs2s(L, ra + 3, ra);
L->top = ra + 3 + 3; /* func + 2 args (state and index) */
LUAU_ASSERT(L->top <= L->stack_last);
VM_PROTECT(luaD_call(L, ra + 3, aux));
L->top = L->ci->top;
// recompute ra since stack might have been reallocated
ra = VM_REG(LUAU_INSN_A(insn));
// copy first variable back into the iteration index
setobjs2s(L, ra + 2, ra + 3);
// note that we need to increment pc by 1 to exit the loop since we need to skip over aux
pc += ttisnil(ra + 3) ? 1 : LUAU_INSN_D(insn);
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
VM_NEXT();
}
}
VM_CASE(LOP_FORGPREP_INEXT)
@ -2228,8 +2358,14 @@ static void luau_execute(lua_State* L)
// fast-path: ipairs/inext
if (cl->env->safeenv && ttistable(ra + 1) && ttisnumber(ra + 2) && nvalue(ra + 2) == 0.0)
{
setnilvalue(ra);
/* ra+1 is already the table */
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)));
}
else if (FFlag::LuauIter && !ttisfunction(ra))
{
VM_PROTECT(luaG_typeerror(L, ra, "iterate over"));
}
pc += LUAU_INSN_D(insn);
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
@ -2243,7 +2379,7 @@ static void luau_execute(lua_State* L)
StkId ra = VM_REG(LUAU_INSN_A(insn));
// fast-path: ipairs/inext
if (ttistable(ra + 1) && ttislightuserdata(ra + 2))
if (ttisnil(ra) && ttistable(ra + 1) && ttislightuserdata(ra + 2))
{
Table* h = hvalue(ra + 1);
int index = int(reinterpret_cast<uintptr_t>(pvalue(ra + 2)));
@ -2268,23 +2404,9 @@ static void luau_execute(lua_State* L)
VM_NEXT();
}
}
else if (FFlag::LuauTableNewBoundary2 || (h->lsizenode == 0 && ttisnil(gval(h->node))))
{
// fallthrough to exit
VM_NEXT();
}
else
{
// the table has a hash part; index + 1 may appear in it in which case we need to iterate through the hash portion as well
const TValue* val = luaH_getnum(h, index + 1);
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(index + 1)));
setnvalue(ra + 3, double(index + 1));
setobj2s(L, ra + 4, val);
// note that nil elements inside the array terminate the traversal
pc += ttisnil(ra + 4) ? 0 : LUAU_INSN_D(insn);
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
// fallthrough to exit
VM_NEXT();
}
}
@ -2308,8 +2430,14 @@ static void luau_execute(lua_State* L)
// fast-path: pairs/next
if (cl->env->safeenv && ttistable(ra + 1) && ttisnil(ra + 2))
{
setnilvalue(ra);
/* ra+1 is already the table */
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)));
}
else if (FFlag::LuauIter && !ttisfunction(ra))
{
VM_PROTECT(luaG_typeerror(L, ra, "iterate over"));
}
pc += LUAU_INSN_D(insn);
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
@ -2323,7 +2451,7 @@ static void luau_execute(lua_State* L)
StkId ra = VM_REG(LUAU_INSN_A(insn));
// fast-path: pairs/next
if (ttistable(ra + 1) && ttislightuserdata(ra + 2))
if (ttisnil(ra) && ttistable(ra + 1) && ttislightuserdata(ra + 2))
{
Table* h = hvalue(ra + 1);
int index = int(reinterpret_cast<uintptr_t>(pvalue(ra + 2)));
@ -2704,7 +2832,7 @@ static void luau_execute(lua_State* L)
{
VM_PROTECT_PC();
int n = f(L, ra, arg, nresults, nullptr, nparams);
int n = f(L, ra, arg, nresults, NULL, nparams);
if (n >= 0)
{

View file

@ -0,0 +1,17 @@
local bench = script and require(script.Parent.bench_support) or require("bench_support")
function test()
local t = {}
for i=1,1000000 do t[i] = i end
local ts0 = os.clock()
local sum = 0
for k,v in t do sum = sum + v end
local ts1 = os.clock()
return ts1-ts0
end
bench.runCode(test, "LargeTableSum: for k,v in {}")

View file

@ -25,7 +25,7 @@ local DisplArea = {}
DisplArea.Width = 300;
DisplArea.Height = 300;
function DrawLine(From, To)
local function DrawLine(From, To)
local x1 = From.V[1];
local x2 = To.V[1];
local y1 = From.V[2];
@ -81,7 +81,7 @@ function DrawLine(From, To)
Q.LastPx = NumPix;
end
function CalcCross(V0, V1)
local function CalcCross(V0, V1)
local Cross = {};
Cross[1] = V0[2]*V1[3] - V0[3]*V1[2];
Cross[2] = V0[3]*V1[1] - V0[1]*V1[3];
@ -89,7 +89,7 @@ function CalcCross(V0, V1)
return Cross;
end
function CalcNormal(V0, V1, V2)
local function CalcNormal(V0, V1, V2)
local A = {}; local B = {};
for i = 1,3 do
A[i] = V0[i] - V1[i];
@ -102,14 +102,14 @@ function CalcNormal(V0, V1, V2)
return A;
end
function CreateP(X,Y,Z)
local function CreateP(X,Y,Z)
local result = {}
result.V = {X,Y,Z,1};
return result
end
-- multiplies two matrices
function MMulti(M1, M2)
local function MMulti(M1, M2)
local M = {{},{},{},{}};
for i = 1,4 do
for j = 1,4 do
@ -120,7 +120,7 @@ function MMulti(M1, M2)
end
-- multiplies matrix with vector
function VMulti(M, V)
local function VMulti(M, V)
local Vect = {};
for i = 1,4 do
Vect[i] = M[i][1] * V[1] + M[i][2] * V[2] + M[i][3] * V[3] + M[i][4] * V[4];
@ -128,7 +128,7 @@ function VMulti(M, V)
return Vect;
end
function VMulti2(M, V)
local function VMulti2(M, V)
local Vect = {};
for i = 1,3 do
Vect[i] = M[i][1] * V[1] + M[i][2] * V[2] + M[i][3] * V[3];
@ -137,7 +137,7 @@ function VMulti2(M, V)
end
-- add to matrices
function MAdd(M1, M2)
local function MAdd(M1, M2)
local M = {{},{},{},{}};
for i = 1,4 do
for j = 1,4 do
@ -147,7 +147,7 @@ function MAdd(M1, M2)
return M;
end
function Translate(M, Dx, Dy, Dz)
local function Translate(M, Dx, Dy, Dz)
local T = {
{1,0,0,Dx},
{0,1,0,Dy},
@ -157,7 +157,7 @@ function Translate(M, Dx, Dy, Dz)
return MMulti(T, M);
end
function RotateX(M, Phi)
local function RotateX(M, Phi)
local a = Phi;
a = a * math.pi / 180;
local Cos = math.cos(a);
@ -171,7 +171,7 @@ function RotateX(M, Phi)
return MMulti(R, M);
end
function RotateY(M, Phi)
local function RotateY(M, Phi)
local a = Phi;
a = a * math.pi / 180;
local Cos = math.cos(a);
@ -185,7 +185,7 @@ function RotateY(M, Phi)
return MMulti(R, M);
end
function RotateZ(M, Phi)
local function RotateZ(M, Phi)
local a = Phi;
a = a * math.pi / 180;
local Cos = math.cos(a);
@ -199,7 +199,7 @@ function RotateZ(M, Phi)
return MMulti(R, M);
end
function DrawQube()
local function DrawQube()
-- calc current normals
local CurN = {};
local i = 5;
@ -245,7 +245,7 @@ function DrawQube()
Q.LastPx = 0;
end
function Loop()
local function Loop()
if (Testing.LoopCount > Testing.LoopMax) then return; end
local TestingStr = tostring(Testing.LoopCount);
while (#TestingStr < 3) do TestingStr = "0" .. TestingStr; end
@ -265,7 +265,7 @@ function Loop()
Loop();
end
function Init(CubeSize)
local function Init(CubeSize)
-- init/reset vars
Origin.V = {150,150,20,1};
Testing.LoopCount = 0;

View file

@ -31,7 +31,7 @@ local loops = 15
local nx = 120
local nz = 120
function morph(a, f)
local function morph(a, f)
local PI2nx = math.pi * 8/nx
local sin = math.sin
local f30 = -(50 * sin(f*math.pi*2))

View file

@ -28,40 +28,40 @@ function test()
local size = 30
function createVector(x,y,z)
local function createVector(x,y,z)
return { x,y,z };
end
function sqrLengthVector(self)
local function sqrLengthVector(self)
return self[1] * self[1] + self[2] * self[2] + self[3] * self[3];
end
function lengthVector(self)
local function lengthVector(self)
return math.sqrt(self[1] * self[1] + self[2] * self[2] + self[3] * self[3]);
end
function addVector(self, v)
local function addVector(self, v)
self[1] = self[1] + v[1];
self[2] = self[2] + v[2];
self[3] = self[3] + v[3];
return self;
end
function subVector(self, v)
local function subVector(self, v)
self[1] = self[1] - v[1];
self[2] = self[2] - v[2];
self[3] = self[3] - v[3];
return self;
end
function scaleVector(self, scale)
local function scaleVector(self, scale)
self[1] = self[1] * scale;
self[2] = self[2] * scale;
self[3] = self[3] * scale;
return self;
end
function normaliseVector(self)
local function normaliseVector(self)
local len = math.sqrt(self[1] * self[1] + self[2] * self[2] + self[3] * self[3]);
self[1] = self[1] / len;
self[2] = self[2] / len;
@ -69,39 +69,39 @@ function normaliseVector(self)
return self;
end
function add(v1, v2)
local function add(v1, v2)
return { v1[1] + v2[1], v1[2] + v2[2], v1[3] + v2[3] };
end
function sub(v1, v2)
local function sub(v1, v2)
return { v1[1] - v2[1], v1[2] - v2[2], v1[3] - v2[3] };
end
function scalev(v1, v2)
local function scalev(v1, v2)
return { v1[1] * v2[1], v1[2] * v2[2], v1[3] * v2[3] };
end
function dot(v1, v2)
local function dot(v1, v2)
return v1[1] * v2[1] + v1[2] * v2[2] + v1[3] * v2[3];
end
function scale(v, scale)
local function scale(v, scale)
return { v[1] * scale, v[2] * scale, v[3] * scale };
end
function cross(v1, v2)
local function cross(v1, v2)
return { v1[2] * v2[3] - v1[3] * v2[2],
v1[3] * v2[1] - v1[1] * v2[3],
v1[1] * v2[2] - v1[2] * v2[1] };
end
function normalise(v)
local function normalise(v)
local len = lengthVector(v);
return { v[1] / len, v[2] / len, v[3] / len };
end
function transformMatrix(self, v)
local function transformMatrix(self, v)
local vals = self;
local x = vals[1] * v[1] + vals[2] * v[2] + vals[3] * v[3] + vals[4];
local y = vals[5] * v[1] + vals[6] * v[2] + vals[7] * v[3] + vals[8];
@ -109,7 +109,7 @@ function transformMatrix(self, v)
return { x, y, z };
end
function invertMatrix(self)
local function invertMatrix(self)
local temp = {}
local tx = -self[4];
local ty = -self[8];
@ -131,7 +131,7 @@ function invertMatrix(self)
end
-- Triangle intersection using barycentric coord method
function Triangle(p1, p2, p3)
local function Triangle(p1, p2, p3)
local this = {}
local edge1 = sub(p3, p1);
@ -205,7 +205,7 @@ function Triangle(p1, p2, p3)
return this
end
function Scene(a_triangles)
local function Scene(a_triangles)
local this = {}
this.triangles = a_triangles;
this.lights = {};
@ -302,7 +302,7 @@ local zero = { 0,0,0 };
-- this camera code is from notes i made ages ago, it is from *somewhere* -- i cannot remember where
-- that somewhere is
function Camera(origin, lookat, up)
local function Camera(origin, lookat, up)
local this = {}
local zaxis = normaliseVector(subVector(lookat, origin));
@ -357,7 +357,7 @@ function Camera(origin, lookat, up)
return this
end
function raytraceScene()
local function raytraceScene()
local startDate = 13154863;
local numTriangles = 2 * 6;
local triangles = {}; -- numTriangles);
@ -450,7 +450,7 @@ function raytraceScene()
return pixels;
end
function arrayToCanvasCommands(pixels)
local function arrayToCanvasCommands(pixels)
local s = {};
table.insert(s, '<!DOCTYPE html><html><head><title>Test</title></head><body><canvas id="renderCanvas" width="' .. size .. 'px" height="' .. size .. 'px"></canvas><scr' .. 'ipt>\nvar pixels = [');
for y = 0,size-1 do
@ -485,7 +485,7 @@ for (var y = 0; y < size; y++) {\n\
return table.concat(s);
end
testOutput = arrayToCanvasCommands(raytraceScene());
local testOutput = arrayToCanvasCommands(raytraceScene());
--local f = io.output("output.html")
--f:write(testOutput)

View file

@ -1,69 +0,0 @@
--[[
The Great Computer Language Shootout
http://shootout.alioth.debian.org/
contributed by Isaac Gouy
]]
local bench = script and require(script.Parent.bench_support) or require("bench_support")
function test()
function TreeNode(left,right,item)
local this = {}
this.left = left;
this.right = right;
this.item = item;
this.itemCheck = function(self)
if (self.left==nil) then return self.item;
else return self.item + self.left:itemCheck() - self.right:itemCheck(); end
end
return this
end
function bottomUpTree(item,depth)
if (depth>0) then
return TreeNode(
bottomUpTree(2*item-1, depth-1)
,bottomUpTree(2*item, depth-1)
,item
);
else
return TreeNode(nil,nil,item);
end
end
local ret = 0;
for n = 4,7,1 do
local minDepth = 4;
local maxDepth = math.max(minDepth + 2, n);
local stretchDepth = maxDepth + 1;
local check = bottomUpTree(0,stretchDepth):itemCheck();
local longLivedTree = bottomUpTree(0,maxDepth);
for depth = minDepth,maxDepth,2 do
local iterations = 2.0 ^ (maxDepth - depth + minDepth - 1) -- 1 << (maxDepth - depth + minDepth);
check = 0;
for i = 1,iterations do
check = check + bottomUpTree(i,depth):itemCheck();
check = check + bottomUpTree(-i,depth):itemCheck();
end
end
ret = ret + longLivedTree:itemCheck();
end
local expected = -4;
if (ret ~= expected) then
assert(false, "ERROR: bad result: expected " .. expected .. " but got " .. ret);
end
end
bench.runCode(test, "access-binary-trees")

View file

@ -7,18 +7,18 @@ local bench = script and require(script.Parent.bench_support) or require("bench_
function test()
function ack(m,n)
local function ack(m,n)
if (m==0) then return n+1; end
if (n==0) then return ack(m-1,1); end
return ack(m-1, ack(m,n-1) );
end
function fib(n)
local function fib(n)
if (n < 2) then return 1; end
return fib(n-2) + fib(n-1);
end
function tak(x,y,z)
local function tak(x,y,z)
if (y >= x) then return z; end
return tak(tak(x-1,y,z), tak(y-1,z,x), tak(z-1,x,y));
end
@ -27,7 +27,7 @@ local result = 0;
for i = 3,5 do
result = result + ack(3,i);
result = result + fib(17.0+i);
result = result + fib(17+i);
result = result + tak(3*i+3,2*i+2,i+1);
end

View file

@ -42,7 +42,68 @@ local Rcon = { { 0x00, 0x00, 0x00, 0x00 },
{0x1b, 0x00, 0x00, 0x00},
{0x36, 0x00, 0x00, 0x00} };
function Cipher(input, w) -- main Cipher function [§5.1]
local function SubBytes(s, Nb) -- apply SBox to state S [§5.1.1]
for r = 0,3 do
for c = 0,Nb-1 do s[r + 1][c + 1] = Sbox[s[r + 1][c + 1] + 1]; end
end
return s;
end
local function ShiftRows(s, Nb) -- shift row r of state S left by r bytes [§5.1.2]
local t = {};
for r = 1,3 do
for c = 0,3 do t[c + 1] = s[r + 1][((c + r) % Nb) + 1] end; -- shift into temp copy
for c = 0,3 do s[r + 1][c + 1] = t[c + 1]; end -- and copy back
end -- note that this will work for Nb=4,5,6, but not 7,8 (always 4 for AES):
return s; -- see fp.gladman.plus.com/cryptography_technology/rijndael/aes.spec.311.pdf
end
local function MixColumns(s, Nb) -- combine bytes of each col of state S [§5.1.3]
for c = 0,3 do
local a = {}; -- 'a' is a copy of the current column from 's'
local b = {}; -- 'b' is a•{02} in GF(2^8)
for i = 0,3 do
a[i + 1] = s[i + 1][c + 1];
if bit32.band(s[i + 1][c + 1], 0x80) ~= 0 then
b[i + 1] = bit32.bxor(bit32.lshift(s[i + 1][c + 1], 1), 0x011b);
else
b[i + 1] = bit32.lshift(s[i + 1][c + 1], 1);
end
end
-- a[n] ^ b[n] is a•{03} in GF(2^8)
s[1][c + 1] = bit32.bxor(b[1], a[2], b[2], a[3], a[4]); -- 2*a0 + 3*a1 + a2 + a3
s[2][c + 1] = bit32.bxor(a[1], b[2], a[3], b[3], a[4]); -- a0 * 2*a1 + 3*a2 + a3
s[3][c + 1] = bit32.bxor(a[1], a[2], b[3], a[4], b[4]); -- a0 + a1 + 2*a2 + 3*a3
s[4][c + 1] = bit32.bxor(a[1], b[1], a[2], a[3], b[4]); -- 3*a0 + a1 + a2 + 2*a3
end
return s;
end
local function SubWord(w) -- apply SBox to 4-byte word w
for i = 0,3 do w[i + 1] = Sbox[w[i + 1] + 1]; end
return w;
end
local function RotWord(w) -- rotate 4-byte word w left by one byte
w[5] = w[1];
for i = 0,3 do w[i + 1] = w[i + 2]; end
return w;
end
local function AddRoundKey(state, w, rnd, Nb) -- xor Round Key into state S [§5.1.4]
for r = 0,3 do
for c = 0,Nb-1 do state[r + 1][c + 1] = bit32.bxor(state[r + 1][c + 1], w[rnd*4+c + 1][r + 1]); end
end
return state;
end
local function Cipher(input, w) -- main Cipher function [§5.1]
local Nb = 4; -- block size (in words): no of columns in state (fixed at 4 for AES)
local Nr = #w / Nb - 1; -- no of rounds: 10/12/14 for 128/192/256-bit keys
@ -69,56 +130,7 @@ function Cipher(input, w) -- main Cipher function [§5.1]
end
function SubBytes(s, Nb) -- apply SBox to state S [§5.1.1]
for r = 0,3 do
for c = 0,Nb-1 do s[r + 1][c + 1] = Sbox[s[r + 1][c + 1] + 1]; end
end
return s;
end
function ShiftRows(s, Nb) -- shift row r of state S left by r bytes [§5.1.2]
local t = {};
for r = 1,3 do
for c = 0,3 do t[c + 1] = s[r + 1][((c + r) % Nb) + 1] end; -- shift into temp copy
for c = 0,3 do s[r + 1][c + 1] = t[c + 1]; end -- and copy back
end -- note that this will work for Nb=4,5,6, but not 7,8 (always 4 for AES):
return s; -- see fp.gladman.plus.com/cryptography_technology/rijndael/aes.spec.311.pdf
end
function MixColumns(s, Nb) -- combine bytes of each col of state S [§5.1.3]
for c = 0,3 do
local a = {}; -- 'a' is a copy of the current column from 's'
local b = {}; -- 'b' is a•{02} in GF(2^8)
for i = 0,3 do
a[i + 1] = s[i + 1][c + 1];
if bit32.band(s[i + 1][c + 1], 0x80) ~= 0 then
b[i + 1] = bit32.bxor(bit32.lshift(s[i + 1][c + 1], 1), 0x011b);
else
b[i + 1] = bit32.lshift(s[i + 1][c + 1], 1);
end
end
-- a[n] ^ b[n] is a•{03} in GF(2^8)
s[1][c + 1] = bit32.bxor(b[1], a[2], b[2], a[3], a[4]); -- 2*a0 + 3*a1 + a2 + a3
s[2][c + 1] = bit32.bxor(a[1], b[2], a[3], b[3], a[4]); -- a0 * 2*a1 + 3*a2 + a3
s[3][c + 1] = bit32.bxor(a[1], a[2], b[3], a[4], b[4]); -- a0 + a1 + 2*a2 + 3*a3
s[4][c + 1] = bit32.bxor(a[1], b[1], a[2], a[3], b[4]); -- 3*a0 + a1 + a2 + 2*a3
end
return s;
end
function AddRoundKey(state, w, rnd, Nb) -- xor Round Key into state S [§5.1.4]
for r = 0,3 do
for c = 0,Nb-1 do state[r + 1][c + 1] = bit32.bxor(state[r + 1][c + 1], w[rnd*4+c + 1][r + 1]); end
end
return state;
end
function KeyExpansion(key) -- generate Key Schedule (byte-array Nr+1 x Nb) from Key [§5.2]
local function KeyExpansion(key) -- generate Key Schedule (byte-array Nr+1 x Nb) from Key [§5.2]
local Nb = 4; -- block size (in words): no of columns in state (fixed at 4 for AES)
local Nk = #key / 4 -- key length (in words): 4/6/8 for 128/192/256-bit keys
local Nr = Nk + 6; -- no of rounds: 10/12/14 for 128/192/256-bit keys
@ -146,17 +158,17 @@ function KeyExpansion(key) -- generate Key Schedule (byte-array Nr+1 x Nb) from
return w;
end
function SubWord(w) -- apply SBox to 4-byte word w
for i = 0,3 do w[i + 1] = Sbox[w[i + 1] + 1]; end
return w;
local function escCtrlChars(str) -- escape control chars which might cause problems handling ciphertext
return string.gsub(str, "[\0\t\n\v\f\r\'\"!-]", function(c) return '!' .. string.byte(c, 1) .. '!'; end);
end
function RotWord(w) -- rotate 4-byte word w left by one byte
w[5] = w[1];
for i = 0,3 do w[i + 1] = w[i + 2]; end
return w;
end
local function unescCtrlChars(str) -- unescape potentially problematic control characters
return string.gsub(str, "!%d%d?%d?!", function(c)
local sc = string.sub(c, 2,-2)
return string.char(tonumber(sc));
end);
end
--[[
* Use AES to encrypt 'plaintext' with 'password' using 'nBits' key, in 'Counter' mode of operation
@ -166,7 +178,7 @@ end
* - cipherblock = plaintext xor outputblock
]]
function AESEncryptCtr(plaintext, password, nBits)
local function AESEncryptCtr(plaintext, password, nBits)
if (not (nBits==128 or nBits==192 or nBits==256)) then return ''; end -- standard allows 128/192/256 bit keys
-- for this example script, generate the key by applying Cipher to 1st 16/24/32 chars of password;
@ -243,7 +255,7 @@ end
* - cipherblock = plaintext xor outputblock
]]
function AESDecryptCtr(ciphertext, password, nBits)
local function AESDecryptCtr(ciphertext, password, nBits)
if (not (nBits==128 or nBits==192 or nBits==256)) then return ''; end -- standard allows 128/192/256 bit keys
local nBytes = nBits/8; -- no bytes in key
@ -300,19 +312,7 @@ function AESDecryptCtr(ciphertext, password, nBits)
return table.concat(plaintext)
end
function escCtrlChars(str) -- escape control chars which might cause problems handling ciphertext
return string.gsub(str, "[\0\t\n\v\f\r\'\"!-]", function(c) return '!' .. string.byte(c, 1) .. '!'; end);
end
function unescCtrlChars(str) -- unescape potentially problematic control characters
return string.gsub(str, "!%d%d?%d?!", function(c)
local sc = string.sub(c, 2,-2)
return string.char(tonumber(sc));
end);
end
function test()
local function test()
local plainText = "ROMEO: But, soft! what light through yonder window breaks?\n\
It is the east, and Juliet is the sun.\n\

View file

@ -31,15 +31,15 @@ function test()
local AG_CONST = 0.6072529350;
function FIXED(X)
local function FIXED(X)
return X * 65536.0;
end
function FLOAT(X)
local function FLOAT(X)
return X / 65536.0;
end
function DEG2RAD(X)
local function DEG2RAD(X)
return 0.017453 * (X);
end
@ -52,7 +52,7 @@ local Angles = {
local Target = 28.027;
function cordicsincos(Target)
local function cordicsincos(Target)
local X;
local Y;
local TargetAngle;
@ -85,7 +85,7 @@ end
local total = 0;
function cordic( runs )
local function cordic( runs )
for i = 1,runs do
total = total + cordicsincos(Target);
end

View file

@ -7,7 +7,7 @@ local bench = script and require(script.Parent.bench_support) or require("bench_
function test()
function partial(n)
local function partial(n)
local a1, a2, a3, a4, a5, a6, a7, a8, a9 = 0, 0, 0, 0, 0, 0, 0, 0, 0;
local twothirds = 2.0/3.0;
local alt = -1.0;

View file

@ -1,72 +0,0 @@
--[[
The Great Computer Language Shootout
http://shootout.alioth.debian.org/
contributed by Ian Osgood
]]
local bench = script and require(script.Parent.bench_support) or require("bench_support")
function test()
function A(i,j)
return 1/((i+j)*(i+j+1)/2+i+1);
end
function Au(u,v)
for i = 0,#u-1 do
local t = 0;
for j = 0,#u-1 do
t = t + A(i,j) * u[j + 1];
end
v[i + 1] = t;
end
end
function Atu(u,v)
for i = 0,#u-1 do
local t = 0;
for j = 0,#u-1 do
t = t + A(j,i) * u[j + 1];
end
v[i + 1] = t;
end
end
function AtAu(u,v,w)
Au(u,w);
Atu(w,v);
end
function spectralnorm(n)
local u, v, w, vv, vBv = {}, {}, {}, 0, 0;
for i = 1,n do
u[i] = 1; v[i] = 0; w[i] = 0;
end
for i = 0,9 do
AtAu(u,v,w);
AtAu(v,u,w);
end
for i = 1,n do
vBv = vBv + u[i]*v[i];
vv = vv + v[i]*v[i];
end
return math.sqrt(vBv/vv);
end
local total = 0;
local i = 6
while i <= 48 do
total = total + spectralnorm(i);
i = i * 2
end
local expected = 5.086694231303284;
if (total ~= expected) then
assert(false, "ERROR: bad result: expected " .. expected .. " but got " .. total)
end
end
bench.runCode(test, "math-spectral-norm")

View file

@ -10,7 +10,7 @@ logo: /assets/images/luau-88.png
plugins: ["jekyll-include-cache", "jekyll-feed"]
include: ["_pages"]
atom_feed:
path: feed.xml
path: "/feed.xml"
defaults:
# _docs

View file

@ -54,7 +54,7 @@ Sandboxing challenges are [covered in the dedicated section](sandbox).
| goto statement | ❌ | this complicates the compiler, makes control flow unstructured and doesn't address a significant need |
| finalizers for tables | ❌ | no `__gc` support due to sandboxing and performance/complexity |
| no more fenv for threads or functions | 😞 | we love this, but it breaks compatibility |
| tables honor the `__len` metamethod | | performance implications, no strong use cases
| tables honor the `__len` metamethod | 🤷‍♀️ | performance implications, no strong use cases
| hex and `\z` escapes in strings | ✔️ | |
| support for hexadecimal floats | 🤷‍♀️ | no strong use cases |
| order metamethods work for different types | ❌ | no strong use cases and more complicated semantics + compat |
@ -63,7 +63,7 @@ Sandboxing challenges are [covered in the dedicated section](sandbox).
| arguments for function called through `xpcall` | ✔️ | |
| optional base in `math.log` | ✔️ | |
| optional separator in `string.rep` | 🤷‍♀️ | no real use cases |
| new metamethods `__pairs` and `__ipairs` | ❌ | would like to reevaluate iteration design long term |
| new metamethods `__pairs` and `__ipairs` | ❌ | superseded by `__iter` |
| frontier patterns | ✔️ | |
| `%g` in patterns | ✔️ | |
| `\0` in patterns | ✔️ | |
@ -72,7 +72,7 @@ Sandboxing challenges are [covered in the dedicated section](sandbox).
Two things that are important to call out here are various new metamethods for tables and yielding in metamethods. In both cases, there are performance implications to supporting this - our implementation is *very* highly tuned for performance, so any changes that affect the core fundamentals of how Lua works have a price. To support yielding in metamethods we'd need to make the core of the VM more involved, since almost every single "interesting" opcode would need to learn how to be resumable - which also complicates future JIT/AOT story. Metamethods in general are important for extensibility, but very challenging to deal with in implementation, so we err on the side of not supporting any new metamethods unless a strong need arises.
For `__pairs`/`__ipairs`, we aren't sure that this is the right design choice - self-iterating tables via `__iter` are very appealing, and if we can resolve some challenges with array iteration order, that would make the language more accessible so we may go that route instead.
For `__pairs`/`__ipairs`, we felt that extending library functions to enable custom containers wasn't the right choice. Instead we revisited iteration design to allow for self-iterating objects via `__iter` metamethod, which results in a cleaner iteration design that also makes it easier to iterate over tables. As such, we have no plans to support `__pairs`/`__ipairs` as all use cases for it can now be solved by `__iter`.
Ephemeron tables may be implemented at some point since they do have valid uses and they make weak tables semantically cleaner, however the cleanup mechanism for these is expensive and complicated, and as such this can only be considered after the pending GC rework is complete.

View file

@ -92,12 +92,22 @@ As a result, builtin calls are very fast in Luau - they are still slightly slowe
## Optimized table iteration
Luau implements a fully generic iteration protocol; however, for iteration through tables it recognizes three common idioms (`for .. in ipairs(t)`, `for .. in pairs(t)` and `for .. in next, t`) and emits specialized bytecode that is carefully optimized using custom internal iterators.
Luau implements a fully generic iteration protocol; however, for iteration through tables in addition to generalized iteration (`for .. in t`) it recognizes three common idioms (`for .. in ipairs(t)`, `for .. in pairs(t)` and `for .. in next, t`) and emits specialized bytecode that is carefully optimized using custom internal iterators.
As a result, iteration through tables typically doesn't result in function calls for every iteration; the performance of iteration using `pairs` and `ipairs` is comparable, so it's recommended to pick the iteration style based on readability instead of performance.
As a result, iteration through tables typically doesn't result in function calls for every iteration; the performance of iteration using generalized iteration, `pairs` and `ipairs` is comparable, so generalized iteration (without the use of `pairs`/`ipairs`) is recommended unless the code needs to be compatible with vanilla Lua or the specific semantics of `ipairs` (which stops at the first `nil` element) is required. Additionally, using generalized iteration avoids calling `pairs` when the loop starts which can be noticeable when the table is very short.
Iterating through array-like tables using `for i=1,#t` tends to be slightly slower because of extra cost incurred when reading elements from the table.
## Optimized table length
Luau tables use a hybrid array/hash storage, like in Lua; in some sense "arrays" don't truly exist and are an internal optimization, but some operations, notably `#t` and functions that depend on it, like `table.insert`, are defined by the Luau/Lua language to allow internal optimizations. Luau takes advantage of that fact.
Unlike Lua, Luau guarantees that the element at index `#t` is stored in the array part of the table. This can accelerate various table operations that use indices limited by `#t`, and this makes `#t` worst-case complexity O(logN), unlike Lua where the worst case complexity is O(N). This also accelerates computation of this value for small tables like `{ [1] = 1 }` since we never need to look at the hash part.
The "default" implementation of `#t` in both Lua and Luau is a binary search. Luau uses a special branch-free (depending on the compiler...) implementation of the binary search which results in 50+% faster computation of table length when it needs to be computed from scratch.
Additionally, Luau can cache the length of the table and adjust it following operations like `table.insert`/`table.remove`; this means that in practice, `#t` is almost always a constant time operation.
## Creating and modifying tables
Luau implements several optimizations for table creation. When creating object-like tables, it's recommended to use table literals (`{ ... }`) and to specify all table fields in the literal in one go instead of assigning fields later; this triggers an optimization inspired by LuaJIT's "table templates" and results in higher performance when creating objects. When creating array-like tables, if the maximum size of the table is known up front, it's recommended to use `table.create` function which can create an empty table with preallocated storage, and optionally fill it with a given value.
@ -112,7 +122,7 @@ v.z = 3
return v
```
When appending elements to tables, it's recommended to use `table.insert` (which is the fastest method to append an element to a table if the table size is not known). In cases when a table is filled sequentially, however, it's much more efficient to use a known index for insertion - together with preallocating tables using `table.create` this can result in much faster code, for example this is the fastest way to build a table of squares:
When appending elements to tables, it's recommended to use `table.insert` (which is the fastest method to append an element to a table if the table size is not known). In cases when a table is filled sequentially, however, it can be more efficient to use a known index for insertion - together with preallocating tables using `table.create` this can result in much faster code, for example this is the fastest way to build a table of squares:
```lua
local t = table.create(N)

View file

@ -4,7 +4,7 @@ title: Sandboxing
toc: true
---
Luau is safe to embed. Broadly speaking, this means that even in the face of untrusted (and in Roblox case, actively malicious) code, the language and the standard library don't allow any unsafe access to the underlying system, and don't have any bugs that allow escaping out of the sandbox (e.g. to gain native code execution through ROP gadgets et al). Additionally, the VM provides extra features to implement isolation of privileged code from unprivileged code and protect one from the other; this is important if the embedding environment (Roblox) decides to expose some APIs that may not be safe to call from untrusted code, for example because they do provide controlled access to the underlying system or risk PII exposure through fingerprinting etc.
Luau is safe to embed. Broadly speaking, this means that even in the face of untrusted (and in Roblox case, actively malicious) code, the language and the standard library don't allow any unsafe access to the underlying system, and don't have any bugs that allow escaping out of the sandbox (e.g. to gain native code execution through ROP gadgets et al). Additionally, the VM provides extra features to implement isolation of privileged code from unprivileged code and protect one from the other; this is important if the embedding environment decides to expose some APIs that may not be safe to call from untrusted code, for example because they do provide controlled access to the underlying system or risk PII exposure through fingerprinting etc.
This safety is achieved through a combination of removing features from the standard library that are unsafe, adding features to the VM that make it possible to implement sandboxing and isolation, and making sure the implementation is safe from memory safety issues using fuzzing.
@ -19,7 +19,7 @@ The following libraries and global functions have been removed as a result:
- `io.` library has been removed entirely, as it gives access to files and allows running processes
- `package.` library has been removed entirely, as it gives access to files and allows loading native modules
- `os.` library has been cleaned up from file and environment access functions (`execute`, `exit`, etc.). The only supported functions in the library are `clock`, `date`, `difftime` and `time`.
- `debug.` library has been removed to a large extent, as it has functions that aren't memory safe and other functions break isolation; the only supported functions are `traceback` ~~and `getinfo` (with reduced functionality)~~.
- `debug.` library has been removed to a large extent, as it has functions that aren't memory safe and other functions break isolation; the only supported functions are `traceback` and `info` (which is similar to `debug.getinfo` but has a slightly different interface).
- `dofile` and `loadfile` allowed access to file system and have been removed.
To achieve memory safety, access to function bytecode has been removed. Bytecode is hard to validate and using untrusted bytecode may lead to exploits. Thus, `loadstring` doesn't work with bytecode inputs, and `string.dump`/`load` have been removed as they aren't necessary anymore. When embedding Luau, bytecode should be encrypted/signed to prevent MITM attacks as well, as the VM assumes that the bytecode was generated by the Luau compiler (which never produces invalid/unsafe bytecode).
@ -54,7 +54,7 @@ This mechanism is bad for performance, memory safety and isolation:
- In Lua 5.1, `__gc` support requires traversing userdata lists redundantly during garbage collection to filter out finalizable objects
- In later versions of Lua, userdata that implement `__gc` are split into separate lists; however, finalization prolongs the lifetime of the finalized objects which results in less prompt memory reclamation, and two-step destruction results in extra cache misses for userdata
- `__gc` runs during garbage collection in context of an arbitrary thread which makes the thread identity mechanism described above invalid
- `__gc` runs during garbage collection in context of an arbitrary thread which makes the thread identity mechanism used in Roblox to support trusted Luau code invalid
- Objects can be removed from weak tables *after* being finalized, which means that accessing these objects can result in memory safety bugs, unless all exposed userdata methods guard against use-after-gc.
- If `__gc` method ever leaks to scripts, they can call it directly on an object and use any method exposed by that object after that. This means that `__gc` and all other exposed methods must support memory safety when called on a destroyed object.

View file

@ -196,3 +196,26 @@ local sign = if x < 0 then -1 elseif x > 0 then 1 else 0
```
**Note:** In Luau, the `if-then-else` expression is preferred vs the standard Lua idiom of writing `a and b or c` (which roughly simulates a ternary operator). However, the Lua idiom may return an unexpected result if `b` evaluates to false. The `if-then-else` expression will behave as expected in all situations.
## Generalized iteration
Luau uses the standard Lua syntax for iterating through containers, `for vars in values`, but extends the semantics with support for generalized iteration. In Lua, to iterate over a table you need to use an iterator like `next` or a function that returns one like `pairs` or `ipairs`. In Luau, you can simply iterate over a table:
```lua
for k, v in {1, 4, 9} do
assert(k * k == v)
end
```
This works for tables but can also be extended for tables or userdata by implementing `__iter` metamethod that is called before the iteration begins, and should return an iterator function like `next` (or a custom one):
```lua
local obj = { items = {1, 4, 9} }
setmetatable(obj, { __iter = function(o) return next, o.items end })
for k, v in obj do
assert(k * k == v)
end
```
The default iteration order for tables is specified to be consecutive for elements `1..#t` and unordered after that, visiting every element; similarly to iteration using `pairs`, modifying the table entries for keys other than the current one results in unspecified behavior.

View file

@ -0,0 +1,51 @@
---
layout: single
title: "Luau Recap: April 2022"
---
Luau is our new language that you can read more about at [https://luau-lang.org](https://luau-lang.org).
[Cross-posted to the [Roblox Developer Forum](https://devforum.roblox.com/t/luau-recap-april-2022/).]
It's been a bit of a quiet month. We mostly have small optimizations and bugfixes for you.
It is now allowed to define functions on sealed tables that have string indexers. These functions will be typechecked against the indexer type. For example, the following is now valid:
```lua
local a : {[string]: () -> number} = {}
function a.y() return 4 end -- OK
```
Autocomplete will now provide string literal suggestions for singleton types. eg
```lua
local function f(x: "a" | "b") end
f("_") -- suggest "a" and "b"
```
Improve error recovery in the case where we encounter a type pack variable in a place where one is not allowed. eg `type Foo<A...> = { value: A... }`
When code does not pass enough arguments to a variadic function, the error feedback is now better.
For example, the following script now produces a much nicer error message:
```lua
type A = { [number]: number }
type B = { [number]: string }
local a: A = { 1, 2, 3 }
-- ERROR: Type 'A' could not be converted into 'B'
-- caused by:
-- Property '[indexer value]' is not compatible. Type 'number' could not be converted into 'string'
local b: B = a
```
If the following code were to error because `Hello` was undefined, we would erroneously include the comment in the span of the error. This is now fixed.
```lua
type Foo = Hello -- some comment over here
```
Fix a crash that could occur when strict scripts have cyclic require() dependencies.
Add an option to autocomplete to cause it to abort processing after a certain amount of time has elapsed.

View file

@ -575,6 +575,7 @@ ic_private const char* parse_tag_value( tag_t* tag, char* idbuf, const char* s,
}
// limit name and attr to 128 bytes
char valbuf[128];
valbuf[0] = 0; // fixes gcc uninitialized warning
ic_strncpy( idbuf, 128, id, idend - id);
ic_strncpy( valbuf, 128, val, valend - val);
ic_str_tolower(idbuf);

View file

@ -137,6 +137,21 @@ int registerTypes(Luau::TypeChecker& env)
return 0;
}
static void setupFrontend(Luau::Frontend& frontend)
{
registerTypes(frontend.typeChecker);
Luau::freeze(frontend.typeChecker.globalTypes);
registerTypes(frontend.typeCheckerForAutocomplete);
Luau::freeze(frontend.typeCheckerForAutocomplete.globalTypes);
frontend.iceHandler.onInternalError = [](const char* error) {
printf("ICE: %s\n", error);
LUAU_ASSERT(!"ICE");
};
}
struct FuzzFileResolver : Luau::FileResolver
{
std::optional<Luau::SourceCode> readSource(const Luau::ModuleName& name) override
@ -238,19 +253,11 @@ DEFINE_PROTO_FUZZER(const luau::ModuleSet& message)
if (kFuzzTypeck)
{
static FuzzFileResolver fileResolver;
static Luau::NullConfigResolver configResolver;
static FuzzConfigResolver configResolver;
static Luau::FrontendOptions options{true, true};
static Luau::Frontend frontend(&fileResolver, &configResolver, options);
static int once = registerTypes(frontend.typeChecker);
(void)once;
static int once2 = (Luau::freeze(frontend.typeChecker.globalTypes), 0);
(void)once2;
frontend.iceHandler.onInternalError = [](const char* error) {
printf("ICE: %s\n", error);
LUAU_ASSERT(!"ICE");
};
static int once = (setupFrontend(frontend), 0);
// restart
frontend.clear();

View file

@ -441,7 +441,7 @@ not-quite-set-theoretic-only-if : ∀ {S₁ T₁ S₂ T₂} →
s₂ Language S₂ s₂
-- This is the "only if" part of being a set-theoretic model
( Q Q Comp(Language S₁ Comp(Lift(Language T₁))) Q Comp(Language S₂ Comp(Lift(Language T₂))))
( Q Q Comp((Language S₁) Comp(Lift(Language T₁))) Q Comp((Language S₂) Comp(Lift(Language T₂))))
(Language (S₁ T₁) Language (S₂ T₂))
not-quite-set-theoretic-only-if {S₁} {T₁} {S₂} {T₂} s₂ S₂s₂ p = r where
@ -474,7 +474,7 @@ not-quite-set-theoretic-only-if {S₁} {T₁} {S₂} {T₂} s₂ S₂s₂ p = r
-- A counterexample when the argument type is empty.
set-theoretic-counterexample-one : ( Q Q Comp((Language never) Comp(Language number)) Q Comp((Language never) Comp(Language string)))
set-theoretic-counterexample-one : ( Q Q Comp((Language never) Comp(Lift(Language number))) Q Comp((Language never) Comp(Lift(Language string))))
set-theoretic-counterexample-one Q q ((scalar s) , u) Qtu (scalar () , p)
set-theoretic-counterexample-two : (never number) ≮: (never string)

View file

@ -15,12 +15,6 @@ This document tracks unimplemented RFCs.
**Status**: Needs implementation
## Sealed/unsealed typing changes
[RFC: Only strip optional properties from unsealed tables during subtyping](https://github.com/Roblox/luau/blob/master/rfcs/unsealed-table-subtyping-strips-optional-properties.md)
**Status**: Implemented but not fully rolled out yet.
## Safe navigation operator
[RFC: Safe navigation postfix operator (?)](https://github.com/Roblox/luau/blob/master/rfcs/syntax-safe-navigation-operator.md)
@ -39,10 +33,10 @@ This document tracks unimplemented RFCs.
[RFC: Generalized iteration](https://github.com/Roblox/luau/blob/master/rfcs/generalized-iteration.md)
**Status**: Needs implementation
**Status**: Implemented but not fully rolled out yet.
## Lower Bounds Calculation
[RFC: Lower bounds calculation](https://github.com/Roblox/luau/blob/master/rfcs/lower-bounds-calculation.md)
**Status**: Needs implementation
**Status**: Implemented but not fully rolled out yet.

View file

@ -1,5 +1,7 @@
# Only strip optional properties from unsealed tables during subtyping
**Status**: Implemented
## Summary
Currently subtyping allows optional properties to be stripped from table types during subtyping.

View file

@ -0,0 +1,410 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/StringUtils.h"
#include "doctest.h"
#include <functional>
#include <string.h>
using namespace Luau::CodeGen;
std::string bytecodeAsArray(const std::vector<uint8_t>& bytecode)
{
std::string result = "{";
for (size_t i = 0; i < bytecode.size(); i++)
Luau::formatAppend(result, "%s0x%02x", i == 0 ? "" : ", ", bytecode[i]);
return result.append("}");
}
class AssemblyBuilderX64Fixture
{
public:
void check(std::function<void(AssemblyBuilderX64& build)> f, std::vector<uint8_t> result)
{
AssemblyBuilderX64 build(/* logText= */ false);
f(build);
build.finalize();
if (build.code != result)
{
printf("Expected: %s\nReceived: %s\n", bytecodeAsArray(result).c_str(), bytecodeAsArray(build.code).c_str());
CHECK(false);
}
}
};
TEST_SUITE_BEGIN("x64Assembly");
#define SINGLE_COMPARE(inst, ...) \
check( \
[](AssemblyBuilderX64& build) { \
build.inst; \
}, \
{__VA_ARGS__})
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "BaseBinaryInstructionForms")
{
// reg, reg
SINGLE_COMPARE(add(rax, rcx), 0x48, 0x03, 0xc1);
SINGLE_COMPARE(add(rsp, r12), 0x49, 0x03, 0xe4);
SINGLE_COMPARE(add(r14, r10), 0x4d, 0x03, 0xf2);
// reg, imm
SINGLE_COMPARE(add(rax, 0), 0x48, 0x83, 0xc0, 0x00);
SINGLE_COMPARE(add(rax, 0x7f), 0x48, 0x83, 0xc0, 0x7f);
SINGLE_COMPARE(add(rax, 0x80), 0x48, 0x81, 0xc0, 0x80, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(r10, 0x7fffffff), 0x49, 0x81, 0xc2, 0xff, 0xff, 0xff, 0x7f);
// reg, [reg]
SINGLE_COMPARE(add(rax, qword[rax]), 0x48, 0x03, 0x00);
SINGLE_COMPARE(add(rax, qword[rbx]), 0x48, 0x03, 0x03);
SINGLE_COMPARE(add(rax, qword[rsp]), 0x48, 0x03, 0x04, 0x24);
SINGLE_COMPARE(add(rax, qword[rbp]), 0x48, 0x03, 0x45, 0x00);
SINGLE_COMPARE(add(rax, qword[r10]), 0x49, 0x03, 0x02);
SINGLE_COMPARE(add(rax, qword[r12]), 0x49, 0x03, 0x04, 0x24);
SINGLE_COMPARE(add(rax, qword[r13]), 0x49, 0x03, 0x45, 0x00);
SINGLE_COMPARE(add(r12, qword[rax]), 0x4c, 0x03, 0x20);
SINGLE_COMPARE(add(r12, qword[rbx]), 0x4c, 0x03, 0x23);
SINGLE_COMPARE(add(r12, qword[rsp]), 0x4c, 0x03, 0x24, 0x24);
SINGLE_COMPARE(add(r12, qword[rbp]), 0x4c, 0x03, 0x65, 0x00);
SINGLE_COMPARE(add(r12, qword[r10]), 0x4d, 0x03, 0x22);
SINGLE_COMPARE(add(r12, qword[r12]), 0x4d, 0x03, 0x24, 0x24);
SINGLE_COMPARE(add(r12, qword[r13]), 0x4d, 0x03, 0x65, 0x00);
// reg, [base+imm8]
SINGLE_COMPARE(add(rax, qword[rax + 0x1b]), 0x48, 0x03, 0x40, 0x1b);
SINGLE_COMPARE(add(rax, qword[rbx + 0x1b]), 0x48, 0x03, 0x43, 0x1b);
SINGLE_COMPARE(add(rax, qword[rsp + 0x1b]), 0x48, 0x03, 0x44, 0x24, 0x1b);
SINGLE_COMPARE(add(rax, qword[rbp + 0x1b]), 0x48, 0x03, 0x45, 0x1b);
SINGLE_COMPARE(add(rax, qword[r10 + 0x1b]), 0x49, 0x03, 0x42, 0x1b);
SINGLE_COMPARE(add(rax, qword[r12 + 0x1b]), 0x49, 0x03, 0x44, 0x24, 0x1b);
SINGLE_COMPARE(add(rax, qword[r13 + 0x1b]), 0x49, 0x03, 0x45, 0x1b);
SINGLE_COMPARE(add(r12, qword[rax + 0x1b]), 0x4c, 0x03, 0x60, 0x1b);
SINGLE_COMPARE(add(r12, qword[rbx + 0x1b]), 0x4c, 0x03, 0x63, 0x1b);
SINGLE_COMPARE(add(r12, qword[rsp + 0x1b]), 0x4c, 0x03, 0x64, 0x24, 0x1b);
SINGLE_COMPARE(add(r12, qword[rbp + 0x1b]), 0x4c, 0x03, 0x65, 0x1b);
SINGLE_COMPARE(add(r12, qword[r10 + 0x1b]), 0x4d, 0x03, 0x62, 0x1b);
SINGLE_COMPARE(add(r12, qword[r12 + 0x1b]), 0x4d, 0x03, 0x64, 0x24, 0x1b);
SINGLE_COMPARE(add(r12, qword[r13 + 0x1b]), 0x4d, 0x03, 0x65, 0x1b);
// reg, [base+imm32]
SINGLE_COMPARE(add(rax, qword[rax + 0xabab]), 0x48, 0x03, 0x80, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[rbx + 0xabab]), 0x48, 0x03, 0x83, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[rsp + 0xabab]), 0x48, 0x03, 0x84, 0x24, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[rbp + 0xabab]), 0x48, 0x03, 0x85, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[r10 + 0xabab]), 0x49, 0x03, 0x82, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[r12 + 0xabab]), 0x49, 0x03, 0x84, 0x24, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[r13 + 0xabab]), 0x49, 0x03, 0x85, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[rax + 0xabab]), 0x4c, 0x03, 0xa0, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[rbx + 0xabab]), 0x4c, 0x03, 0xa3, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[rsp + 0xabab]), 0x4c, 0x03, 0xa4, 0x24, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[rbp + 0xabab]), 0x4c, 0x03, 0xa5, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[r10 + 0xabab]), 0x4d, 0x03, 0xa2, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[r12 + 0xabab]), 0x4d, 0x03, 0xa4, 0x24, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[r13 + 0xabab]), 0x4d, 0x03, 0xa5, 0xab, 0xab, 0x00, 0x00);
// reg, [index*scale]
SINGLE_COMPARE(add(rax, qword[rax * 2]), 0x48, 0x03, 0x04, 0x45, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[rbx * 2]), 0x48, 0x03, 0x04, 0x5d, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[rbp * 2]), 0x48, 0x03, 0x04, 0x6d, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[r10 * 2]), 0x4a, 0x03, 0x04, 0x55, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[r12 * 2]), 0x4a, 0x03, 0x04, 0x65, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[r13 * 2]), 0x4a, 0x03, 0x04, 0x6d, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[rax * 2]), 0x4c, 0x03, 0x24, 0x45, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[rbx * 2]), 0x4c, 0x03, 0x24, 0x5d, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[rbp * 2]), 0x4c, 0x03, 0x24, 0x6d, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[r10 * 2]), 0x4e, 0x03, 0x24, 0x55, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[r12 * 2]), 0x4e, 0x03, 0x24, 0x65, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[r13 * 2]), 0x4e, 0x03, 0x24, 0x6d, 0x00, 0x00, 0x00, 0x00);
// reg, [base+index*scale+imm]
SINGLE_COMPARE(add(rax, qword[rax + rax * 2]), 0x48, 0x03, 0x04, 0x40);
SINGLE_COMPARE(add(rax, qword[rax + rbx * 2 + 0x1b]), 0x48, 0x03, 0x44, 0x58, 0x1b);
SINGLE_COMPARE(add(rax, qword[rax + rbp * 2]), 0x48, 0x03, 0x04, 0x68);
SINGLE_COMPARE(add(rax, qword[rax + rbp + 0xabab]), 0x48, 0x03, 0x84, 0x28, 0xAB, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[rax + r12 + 0x1b]), 0x4a, 0x03, 0x44, 0x20, 0x1b);
SINGLE_COMPARE(add(rax, qword[rax + r12 * 4 + 0xabab]), 0x4a, 0x03, 0x84, 0xa0, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[rax + r13 * 2 + 0x1b]), 0x4a, 0x03, 0x44, 0x68, 0x1b);
SINGLE_COMPARE(add(rax, qword[rax + r13 + 0xabab]), 0x4a, 0x03, 0x84, 0x28, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[rax + r12 * 2]), 0x4e, 0x03, 0x24, 0x60);
SINGLE_COMPARE(add(r12, qword[rax + r13 + 0xabab]), 0x4e, 0x03, 0xA4, 0x28, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(r12, qword[rax + rbp * 2 + 0x1b]), 0x4c, 0x03, 0x64, 0x68, 0x1b);
// reg, [imm32]
SINGLE_COMPARE(add(rax, qword[0]), 0x48, 0x03, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(add(rax, qword[0xabab]), 0x48, 0x03, 0x04, 0x25, 0xab, 0xab, 0x00, 0x00);
// [addr], reg
SINGLE_COMPARE(add(qword[rax], rax), 0x48, 0x01, 0x00);
SINGLE_COMPARE(add(qword[rax + rax * 4 + 0xabab], rax), 0x48, 0x01, 0x84, 0x80, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(qword[rbx + rax * 2 + 0x1b], rax), 0x48, 0x01, 0x44, 0x43, 0x1b);
SINGLE_COMPARE(add(qword[rbx + rbp * 2 + 0x1b], rax), 0x48, 0x01, 0x44, 0x6b, 0x1b);
SINGLE_COMPARE(add(qword[rbp + rbp * 4 + 0xabab], rax), 0x48, 0x01, 0x84, 0xad, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(qword[rbp + r12 + 0x1b], rax), 0x4a, 0x01, 0x44, 0x25, 0x1b);
SINGLE_COMPARE(add(qword[r12], rax), 0x49, 0x01, 0x04, 0x24);
SINGLE_COMPARE(add(qword[r13 + rbx + 0xabab], rax), 0x49, 0x01, 0x84, 0x1d, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(add(qword[rax + r13 * 2 + 0x1b], rsi), 0x4a, 0x01, 0x74, 0x68, 0x1b);
SINGLE_COMPARE(add(qword[rbp + rbx * 2], rsi), 0x48, 0x01, 0x74, 0x5d, 0x00);
SINGLE_COMPARE(add(qword[rsp + r10 * 2 + 0x1b], r10), 0x4e, 0x01, 0x54, 0x54, 0x1b);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "BaseUnaryInstructionForms")
{
SINGLE_COMPARE(div(rcx), 0x48, 0xf7, 0xf1);
SINGLE_COMPARE(idiv(qword[rax]), 0x48, 0xf7, 0x38);
SINGLE_COMPARE(mul(qword[rax + rbx]), 0x48, 0xf7, 0x24, 0x18);
SINGLE_COMPARE(neg(r9), 0x49, 0xf7, 0xd9);
SINGLE_COMPARE(not_(r12), 0x49, 0xf7, 0xd4);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "FormsOfMov")
{
SINGLE_COMPARE(mov(rcx, 1), 0x48, 0xb9, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(mov64(rcx, 0x1234567812345678ll), 0x48, 0xb9, 0x78, 0x56, 0x34, 0x12, 0x78, 0x56, 0x34, 0x12);
SINGLE_COMPARE(mov(ecx, 2), 0xb9, 0x02, 0x00, 0x00, 0x00);
SINGLE_COMPARE(mov(cl, 2), 0xb1, 0x02);
SINGLE_COMPARE(mov(rcx, qword[rdi]), 0x48, 0x8b, 0x0f);
SINGLE_COMPARE(mov(dword[rax], 0xabcd), 0xc7, 0x00, 0xcd, 0xab, 0x00, 0x00);
SINGLE_COMPARE(mov(r13, 1), 0x49, 0xbd, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
SINGLE_COMPARE(mov64(r13, 0x1234567812345678ll), 0x49, 0xbd, 0x78, 0x56, 0x34, 0x12, 0x78, 0x56, 0x34, 0x12);
SINGLE_COMPARE(mov(r13d, 2), 0x41, 0xbd, 0x02, 0x00, 0x00, 0x00);
SINGLE_COMPARE(mov(r13, qword[r12]), 0x4d, 0x8b, 0x2c, 0x24);
SINGLE_COMPARE(mov(dword[r13], 0xabcd), 0x41, 0xc7, 0x45, 0x00, 0xcd, 0xab, 0x00, 0x00);
SINGLE_COMPARE(mov(qword[rdx], r9), 0x4c, 0x89, 0x0a);
SINGLE_COMPARE(mov(byte[rsi], 0x3), 0xc6, 0x06, 0x03);
SINGLE_COMPARE(mov(byte[rsi], al), 0x88, 0x06);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "FormsOfTest")
{
SINGLE_COMPARE(test(al, 8), 0xf6, 0xc0, 0x08);
SINGLE_COMPARE(test(eax, 8), 0xf7, 0xc0, 0x08, 0x00, 0x00, 0x00);
SINGLE_COMPARE(test(rax, 8), 0x48, 0xf7, 0xc0, 0x08, 0x00, 0x00, 0x00);
SINGLE_COMPARE(test(rcx, 0xabab), 0x48, 0xf7, 0xc1, 0xab, 0xab, 0x00, 0x00);
SINGLE_COMPARE(test(rcx, rax), 0x48, 0x85, 0xc8);
SINGLE_COMPARE(test(rax, qword[rcx]), 0x48, 0x85, 0x01);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "FormsOfShift")
{
SINGLE_COMPARE(shl(al, 1), 0xd0, 0xe0);
SINGLE_COMPARE(shl(al, cl), 0xd2, 0xe0);
SINGLE_COMPARE(shr(al, 4), 0xc0, 0xe8, 0x04);
SINGLE_COMPARE(shr(eax, 1), 0xd1, 0xe8);
SINGLE_COMPARE(sal(eax, cl), 0xd3, 0xe0);
SINGLE_COMPARE(sal(eax, 4), 0xc1, 0xe0, 0x04);
SINGLE_COMPARE(sar(rax, 4), 0x48, 0xc1, 0xf8, 0x04);
SINGLE_COMPARE(sar(r11, 1), 0x49, 0xd1, 0xfb);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "FormsOfLea")
{
SINGLE_COMPARE(lea(rax, qword[rdx + rcx]), 0x48, 0x8d, 0x04, 0x0a);
SINGLE_COMPARE(lea(rax, qword[rdx + rax * 4]), 0x48, 0x8d, 0x04, 0x82);
SINGLE_COMPARE(lea(rax, qword[r13 + r12 * 4 + 4]), 0x4b, 0x8d, 0x44, 0xa5, 0x04);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "ControlFlow")
{
// Jump back
check(
[](AssemblyBuilderX64& build) {
Label start = build.setLabel();
build.add(rsi, 1);
build.cmp(rsi, rdi);
build.jcc(Condition::Equal, start);
},
{0x48, 0x83, 0xc6, 0x01, 0x48, 0x3b, 0xf7, 0x0f, 0x84, 0xf3, 0xff, 0xff, 0xff});
// Jump back, but the label is set before use
check(
[](AssemblyBuilderX64& build) {
Label start;
build.add(rsi, 1);
build.setLabel(start);
build.cmp(rsi, rdi);
build.jcc(Condition::Equal, start);
},
{0x48, 0x83, 0xc6, 0x01, 0x48, 0x3b, 0xf7, 0x0f, 0x84, 0xf7, 0xff, 0xff, 0xff});
// Jump forward
check(
[](AssemblyBuilderX64& build) {
Label skip;
build.cmp(rsi, rdi);
build.jcc(Condition::Greater, skip);
build.or_(rdi, 0x3e);
build.setLabel(skip);
},
{0x48, 0x3b, 0xf7, 0x0f, 0x8f, 0x04, 0x00, 0x00, 0x00, 0x48, 0x83, 0xcf, 0x3e});
// Regular jump
check(
[](AssemblyBuilderX64& build) {
Label skip;
build.jmp(skip);
build.and_(rdi, 0x3e);
build.setLabel(skip);
},
{0xe9, 0x04, 0x00, 0x00, 0x00, 0x48, 0x83, 0xe7, 0x3e});
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AVXBinaryInstructionForms")
{
SINGLE_COMPARE(vaddpd(xmm8, xmm10, xmm14), 0xc4, 0x41, 0xa9, 0x58, 0xc6);
SINGLE_COMPARE(vaddpd(xmm8, xmm10, xmmword[r9]), 0xc4, 0x41, 0xa9, 0x58, 0x01);
SINGLE_COMPARE(vaddpd(ymm8, ymm10, ymm14), 0xc4, 0x41, 0xad, 0x58, 0xc6);
SINGLE_COMPARE(vaddpd(ymm8, ymm10, ymmword[r9]), 0xc4, 0x41, 0xad, 0x58, 0x01);
SINGLE_COMPARE(vaddps(xmm8, xmm10, xmm14), 0xc4, 0x41, 0xa8, 0x58, 0xc6);
SINGLE_COMPARE(vaddps(xmm8, xmm10, xmmword[r9]), 0xc4, 0x41, 0xa8, 0x58, 0x01);
SINGLE_COMPARE(vaddsd(xmm8, xmm10, xmm14), 0xc4, 0x41, 0xab, 0x58, 0xc6);
SINGLE_COMPARE(vaddsd(xmm8, xmm10, qword[r9]), 0xc4, 0x41, 0xab, 0x58, 0x01);
SINGLE_COMPARE(vaddss(xmm8, xmm10, xmm14), 0xc4, 0x41, 0xaa, 0x58, 0xc6);
SINGLE_COMPARE(vaddss(xmm8, xmm10, dword[r9]), 0xc4, 0x41, 0xaa, 0x58, 0x01);
SINGLE_COMPARE(vaddps(xmm1, xmm2, xmm3), 0xc4, 0xe1, 0xe8, 0x58, 0xcb);
SINGLE_COMPARE(vaddps(xmm9, xmm12, xmmword[r9 + r14 * 2 + 0x1c]), 0xc4, 0x01, 0x98, 0x58, 0x4c, 0x71, 0x1c);
SINGLE_COMPARE(vaddps(ymm1, ymm2, ymm3), 0xc4, 0xe1, 0xec, 0x58, 0xcb);
SINGLE_COMPARE(vaddps(ymm9, ymm12, ymmword[r9 + r14 * 2 + 0x1c]), 0xc4, 0x01, 0x9c, 0x58, 0x4c, 0x71, 0x1c);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AVXUnaryMergeInstructionForms")
{
SINGLE_COMPARE(vsqrtpd(xmm8, xmm10), 0xc4, 0x41, 0xf9, 0x51, 0xc2);
SINGLE_COMPARE(vsqrtpd(xmm8, xmmword[r9]), 0xc4, 0x41, 0xf9, 0x51, 0x01);
SINGLE_COMPARE(vsqrtpd(ymm8, ymm10), 0xc4, 0x41, 0xfd, 0x51, 0xc2);
SINGLE_COMPARE(vsqrtpd(ymm8, ymmword[r9]), 0xc4, 0x41, 0xfd, 0x51, 0x01);
SINGLE_COMPARE(vsqrtps(xmm8, xmm10), 0xc4, 0x41, 0xf8, 0x51, 0xc2);
SINGLE_COMPARE(vsqrtps(xmm8, xmmword[r9]), 0xc4, 0x41, 0xf8, 0x51, 0x01);
SINGLE_COMPARE(vsqrtsd(xmm8, xmm10, xmm14), 0xc4, 0x41, 0xab, 0x51, 0xc6);
SINGLE_COMPARE(vsqrtsd(xmm8, xmm10, qword[r9]), 0xc4, 0x41, 0xab, 0x51, 0x01);
SINGLE_COMPARE(vsqrtss(xmm8, xmm10, xmm14), 0xc4, 0x41, 0xaa, 0x51, 0xc6);
SINGLE_COMPARE(vsqrtss(xmm8, xmm10, dword[r9]), 0xc4, 0x41, 0xaa, 0x51, 0x01);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AVXMoveInstructionForms")
{
SINGLE_COMPARE(vmovsd(qword[r9], xmm10), 0xc4, 0x41, 0xfb, 0x11, 0x11);
SINGLE_COMPARE(vmovsd(xmm8, qword[r9]), 0xc4, 0x41, 0xfb, 0x10, 0x01);
SINGLE_COMPARE(vmovsd(xmm8, xmm10, xmm14), 0xc4, 0x41, 0xab, 0x10, 0xc6);
SINGLE_COMPARE(vmovss(dword[r9], xmm10), 0xc4, 0x41, 0xfa, 0x11, 0x11);
SINGLE_COMPARE(vmovss(xmm8, dword[r9]), 0xc4, 0x41, 0xfa, 0x10, 0x01);
SINGLE_COMPARE(vmovss(xmm8, xmm10, xmm14), 0xc4, 0x41, 0xaa, 0x10, 0xc6);
SINGLE_COMPARE(vmovapd(xmm8, xmmword[r9]), 0xc4, 0x41, 0xf9, 0x28, 0x01);
SINGLE_COMPARE(vmovapd(xmmword[r9], xmm10), 0xc4, 0x41, 0xf9, 0x29, 0x11);
SINGLE_COMPARE(vmovapd(ymm8, ymmword[r9]), 0xc4, 0x41, 0xfd, 0x28, 0x01);
SINGLE_COMPARE(vmovaps(xmm8, xmmword[r9]), 0xc4, 0x41, 0xf8, 0x28, 0x01);
SINGLE_COMPARE(vmovaps(xmmword[r9], xmm10), 0xc4, 0x41, 0xf8, 0x29, 0x11);
SINGLE_COMPARE(vmovaps(ymm8, ymmword[r9]), 0xc4, 0x41, 0xfc, 0x28, 0x01);
SINGLE_COMPARE(vmovupd(xmm8, xmmword[r9]), 0xc4, 0x41, 0xf9, 0x10, 0x01);
SINGLE_COMPARE(vmovupd(xmmword[r9], xmm10), 0xc4, 0x41, 0xf9, 0x11, 0x11);
SINGLE_COMPARE(vmovupd(ymm8, ymmword[r9]), 0xc4, 0x41, 0xfd, 0x10, 0x01);
SINGLE_COMPARE(vmovups(xmm8, xmmword[r9]), 0xc4, 0x41, 0xf8, 0x10, 0x01);
SINGLE_COMPARE(vmovups(xmmword[r9], xmm10), 0xc4, 0x41, 0xf8, 0x11, 0x11);
SINGLE_COMPARE(vmovups(ymm8, ymmword[r9]), 0xc4, 0x41, 0xfc, 0x10, 0x01);
}
TEST_CASE("LogTest")
{
AssemblyBuilderX64 build(/* logText= */ true);
build.push(r12);
build.add(rax, rdi);
build.add(rcx, 8);
build.sub(dword[rax], 0x1fdc);
build.and_(dword[rcx], 0x37);
build.mov(rdi, qword[rax + rsi * 2]);
build.vaddss(xmm0, xmm0, dword[rax + r14 * 2 + 0x1c]);
Label start = build.setLabel();
build.cmp(rsi, rdi);
build.jcc(Condition::Equal, start);
build.jmp(qword[rdx]);
build.vaddps(ymm9, ymm12, ymmword[rbp + 0xc]);
build.vaddpd(ymm2, ymm7, build.f64(2.5));
build.neg(qword[rbp + r12 * 2]);
build.mov64(r10, 0x1234567812345678ll);
build.vmovapd(xmmword[rax], xmm11);
build.pop(r12);
build.ret();
build.finalize();
bool same = "\n" + build.text == R"(
push r12
add rax,rdi
add rcx,8
sub dword ptr [rax],1FDCh
and dword ptr [rcx],37h
mov rdi,qword ptr [rax+rsi*2]
vaddss xmm0,xmm0,dword ptr [rax+r14*2+01Ch]
.L1:
cmp rsi,rdi
je .L1
jmp qword ptr [rdx]
vaddps ymm9,ymm12,ymmword ptr [rbp+0Ch]
vaddpd ymm2,ymm7,qword ptr [.start-8]
neg qword ptr [rbp+r12*2]
mov r10,1234567812345678h
vmovapd xmmword ptr [rax],xmm11
pop r12
ret
)";
CHECK(same);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "Constants")
{
// clang-format off
check(
[](AssemblyBuilderX64& build) {
build.xor_(rax, rax);
build.add(rax, build.i64(0x1234567887654321));
build.vmovss(xmm2, build.f32(1.0f));
build.vmovsd(xmm3, build.f64(1.0));
build.vmovaps(xmm4, build.f32x4(1.0f, 2.0f, 4.0f, 8.0f));
build.ret();
},
{
0x48, 0x33, 0xc0,
0x48, 0x03, 0x05, 0xee, 0xff, 0xff, 0xff,
0xc4, 0xe1, 0xfa, 0x10, 0x15, 0xe1, 0xff, 0xff, 0xff,
0xc4, 0xe1, 0xfb, 0x10, 0x1d, 0xcc, 0xff, 0xff, 0xff,
0xc4, 0xe1, 0xf8, 0x28, 0x25, 0xab, 0xff, 0xff, 0xff,
0xc3
});
// clang-format on
}
TEST_CASE("ConstantStorage")
{
AssemblyBuilderX64 build(/* logText= */ false);
for (int i = 0; i <= 3000; i++)
build.vaddss(xmm0, xmm0, build.f32(float(i)));
build.finalize();
LUAU_ASSERT(build.data.size() == 12004);
for (int i = 0; i <= 3000; i++)
{
float v;
memcpy(&v, &build.data[build.data.size() - (i + 1) * sizeof(float)], sizeof(v));
LUAU_ASSERT(v == float(i));
}
}
TEST_SUITE_END();

View file

@ -7,7 +7,7 @@
using namespace Luau;
struct DocumentationSymbolFixture : Fixture
struct DocumentationSymbolFixture : BuiltinsFixture
{
std::optional<DocumentationSymbol> getDocSymbol(const std::string& source, Position position)
{
@ -92,4 +92,17 @@ bar(foo())
CHECK_EQ("number", toString(*expectedOty));
}
TEST_CASE_FIXTURE(Fixture, "ast_ancestry_at_eof")
{
check(R"(
if true then
)");
std::vector<AstNode*> ancestry = findAstAncestryOfPosition(*getMainSourceModule(), Position(2, 4));
REQUIRE_GE(ancestry.size(), 2);
AstStat* parentStat = ancestry[ancestry.size() - 2]->asStat();
REQUIRE(bool(parentStat));
REQUIRE(parentStat->is<AstStatIf>());
}
TEST_SUITE_END();

View file

@ -14,7 +14,6 @@
LUAU_FASTFLAG(LuauTraceTypesInNonstrictMode2)
LUAU_FASTFLAG(LuauSetMetatableDoesNotTimeTravel)
LUAU_FASTFLAG(LuauSeparateTypechecks)
using namespace Luau;
@ -27,7 +26,7 @@ template<class BaseType>
struct ACFixtureImpl : BaseType
{
ACFixtureImpl()
: Fixture(true, true)
: BaseType(true, true)
{
}
@ -83,20 +82,13 @@ struct ACFixtureImpl : BaseType
LoadDefinitionFileResult loadDefinition(const std::string& source)
{
if (FFlag::LuauSeparateTypechecks)
{
TypeChecker& typeChecker = this->frontend.typeCheckerForAutocomplete;
unfreeze(typeChecker.globalTypes);
LoadDefinitionFileResult result = loadDefinitionFile(typeChecker, typeChecker.globalScope, source, "@test");
freeze(typeChecker.globalTypes);
TypeChecker& typeChecker = this->frontend.typeCheckerForAutocomplete;
unfreeze(typeChecker.globalTypes);
LoadDefinitionFileResult result = loadDefinitionFile(typeChecker, typeChecker.globalScope, source, "@test");
freeze(typeChecker.globalTypes);
REQUIRE_MESSAGE(result.success, "loadDefinition: unable to load definition file");
return result;
}
else
{
return BaseType::loadDefinition(source);
}
REQUIRE_MESSAGE(result.success, "loadDefinition: unable to load definition file");
return result;
}
const Position& getPosition(char marker) const
@ -111,6 +103,18 @@ struct ACFixtureImpl : BaseType
};
struct ACFixture : ACFixtureImpl<Fixture>
{
ACFixture()
: ACFixtureImpl<Fixture>()
{
addGlobalBinding(frontend.typeChecker, "table", Binding{typeChecker.anyType});
addGlobalBinding(frontend.typeChecker, "math", Binding{typeChecker.anyType});
addGlobalBinding(frontend.typeCheckerForAutocomplete, "table", Binding{typeChecker.anyType});
addGlobalBinding(frontend.typeCheckerForAutocomplete, "math", Binding{typeChecker.anyType});
}
};
struct ACBuiltinsFixture : ACFixtureImpl<BuiltinsFixture>
{
};
@ -277,7 +281,7 @@ TEST_CASE_FIXTURE(ACFixture, "function_parameters")
CHECK(ac.entryMap.count("test"));
}
TEST_CASE_FIXTURE(ACFixture, "get_member_completions")
TEST_CASE_FIXTURE(ACBuiltinsFixture, "get_member_completions")
{
check(R"(
local a = table.@1
@ -376,7 +380,7 @@ TEST_CASE_FIXTURE(ACFixture, "table_intersection")
CHECK(ac.entryMap.count("c3"));
}
TEST_CASE_FIXTURE(ACFixture, "get_string_completions")
TEST_CASE_FIXTURE(ACBuiltinsFixture, "get_string_completions")
{
check(R"(
local a = ("foo"):@1
@ -427,7 +431,7 @@ TEST_CASE_FIXTURE(ACFixture, "method_call_inside_function_body")
CHECK(!ac.entryMap.count("math"));
}
TEST_CASE_FIXTURE(ACFixture, "method_call_inside_if_conditional")
TEST_CASE_FIXTURE(ACBuiltinsFixture, "method_call_inside_if_conditional")
{
check(R"(
if table: @1
@ -1884,7 +1888,7 @@ ex.b(function(x:
CHECK(!ac.entryMap.count("(done) -> number"));
}
TEST_CASE_FIXTURE(ACFixture, "suggest_external_module_type")
TEST_CASE_FIXTURE(ACBuiltinsFixture, "suggest_external_module_type")
{
fileResolver.source["Module/A"] = R"(
export type done = { x: number, y: number }
@ -2235,7 +2239,7 @@ local a: aaa.do
CHECK(ac.entryMap.count("other"));
}
TEST_CASE_FIXTURE(ACFixture, "autocompleteSource")
TEST_CASE_FIXTURE(ACBuiltinsFixture, "autocompleteSource")
{
std::string_view source = R"(
local a = table. -- Line 1
@ -2269,7 +2273,7 @@ TEST_CASE_FIXTURE(ACFixture, "autocompleteSource_comments")
CHECK_EQ(0, ac.entryMap.size());
}
TEST_CASE_FIXTURE(ACFixture, "autocompleteProp_index_function_metamethod_is_variadic")
TEST_CASE_FIXTURE(ACBuiltinsFixture, "autocompleteProp_index_function_metamethod_is_variadic")
{
std::string_view source = R"(
type Foo = {x: number}
@ -2720,7 +2724,7 @@ type A<T... = ...@1> = () -> T
CHECK(ac.entryMap.count("string"));
}
TEST_CASE_FIXTURE(ACFixture, "autocomplete_oop_implicit_self")
TEST_CASE_FIXTURE(ACBuiltinsFixture, "autocomplete_oop_implicit_self")
{
check(R"(
--!strict
@ -2728,15 +2732,15 @@ local Class = {}
Class.__index = Class
type Class = typeof(setmetatable({} :: { x: number }, Class))
function Class.new(x: number): Class
return setmetatable({x = x}, Class)
return setmetatable({x = x}, Class)
end
function Class.getx(self: Class)
return self.x
return self.x
end
function test()
local c = Class.new(42)
local n = c:@1
print(n)
local c = Class.new(42)
local n = c:@1
print(n)
end
)");
@ -2745,7 +2749,7 @@ end
CHECK(ac.entryMap.count("getx"));
}
TEST_CASE_FIXTURE(ACFixture, "autocomplete_on_string_singletons")
TEST_CASE_FIXTURE(ACBuiltinsFixture, "autocomplete_on_string_singletons")
{
check(R"(
--!strict
@ -2760,8 +2764,7 @@ TEST_CASE_FIXTURE(ACFixture, "autocomplete_on_string_singletons")
TEST_CASE_FIXTURE(ACFixture, "autocomplete_string_singletons")
{
ScopedFastFlag luauAutocompleteSingletonTypes{"LuauAutocompleteSingletonTypes", true};
ScopedFastFlag luauExpectedTypesOfProperties{"LuauExpectedTypesOfProperties", true};
ScopedFastFlag sff{"LuauTwoPassAliasDefinitionFix", true};
check(R"(
type tag = "cat" | "dog"
@ -2799,8 +2802,6 @@ TEST_CASE_FIXTURE(ACFixture, "autocomplete_string_singletons")
TEST_CASE_FIXTURE(ACFixture, "autocomplete_string_singleton_equality")
{
ScopedFastFlag luauAutocompleteSingletonTypes{"LuauAutocompleteSingletonTypes", true};
check(R"(
type tagged = {tag:"cat", fieldx:number} | {tag:"dog", fieldy:number}
local x: tagged = {tag="cat", fieldx=2}
@ -2822,8 +2823,6 @@ TEST_CASE_FIXTURE(ACFixture, "autocomplete_string_singleton_equality")
TEST_CASE_FIXTURE(ACFixture, "autocomplete_boolean_singleton")
{
ScopedFastFlag luauAutocompleteSingletonTypes{"LuauAutocompleteSingletonTypes", true};
check(R"(
local function f(x: true) end
f(@1)
@ -2839,7 +2838,7 @@ f(@1)
TEST_CASE_FIXTURE(ACFixture, "autocomplete_string_singleton_escape")
{
ScopedFastFlag luauAutocompleteSingletonTypes{"LuauAutocompleteSingletonTypes", true};
ScopedFastFlag sff{"LuauTwoPassAliasDefinitionFix", true};
check(R"(
type tag = "strange\t\"cat\"" | 'nice\t"dog"'
@ -2998,7 +2997,7 @@ s.@1
CHECK(ac.entryMap["sub"].wrongIndexType == true);
}
TEST_CASE_FIXTURE(ACFixture, "string_library_non_self_calls_are_fine")
TEST_CASE_FIXTURE(ACBuiltinsFixture, "string_library_non_self_calls_are_fine")
{
ScopedFastFlag selfCallAutocompleteFix{"LuauSelfCallAutocompleteFix", true};
@ -3016,7 +3015,7 @@ string.@1
CHECK(ac.entryMap["sub"].wrongIndexType == false);
}
TEST_CASE_FIXTURE(ACFixture, "string_library_self_calls_are_invalid")
TEST_CASE_FIXTURE(ACBuiltinsFixture, "string_library_self_calls_are_invalid")
{
ScopedFastFlag selfCallAutocompleteFix{"LuauSelfCallAutocompleteFix", true};

View file

@ -10,8 +10,10 @@ using namespace Luau;
TEST_SUITE_BEGIN("BuiltinDefinitionsTest");
TEST_CASE_FIXTURE(Fixture, "lib_documentation_symbols")
TEST_CASE_FIXTURE(BuiltinsFixture, "lib_documentation_symbols")
{
CHECK(!typeChecker.globalScope->bindings.empty());
for (const auto& [name, binding] : typeChecker.globalScope->bindings)
{
std::string nameString(name.c_str());

Some files were not shown because too many files have changed in this diff Show more